xref: /linux/drivers/firmware/efi/efi.c (revision c25f2fb1f469deaed2df8db524d91f3321a0f816)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * efi.c - EFI subsystem
4  *
5  * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6  * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7  * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8  *
9  * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10  * allowing the efivarfs to be mounted or the efivars module to be loaded.
11  * The existance of /sys/firmware/efi may also be used by userspace to
12  * determine that the system supports EFI.
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
23 #include <linux/of.h>
24 #include <linux/initrd.h>
25 #include <linux/io.h>
26 #include <linux/kexec.h>
27 #include <linux/platform_device.h>
28 #include <linux/random.h>
29 #include <linux/reboot.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/ucs2_string.h>
33 #include <linux/memblock.h>
34 #include <linux/security.h>
35 #include <linux/notifier.h>
36 
37 #include <asm/early_ioremap.h>
38 
39 struct efi __read_mostly efi = {
40 	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
41 	.acpi			= EFI_INVALID_TABLE_ADDR,
42 	.acpi20			= EFI_INVALID_TABLE_ADDR,
43 	.smbios			= EFI_INVALID_TABLE_ADDR,
44 	.smbios3		= EFI_INVALID_TABLE_ADDR,
45 	.esrt			= EFI_INVALID_TABLE_ADDR,
46 	.tpm_log		= EFI_INVALID_TABLE_ADDR,
47 	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
48 	.ovmf_debug_log         = EFI_INVALID_TABLE_ADDR,
49 #ifdef CONFIG_LOAD_UEFI_KEYS
50 	.mokvar_table		= EFI_INVALID_TABLE_ADDR,
51 #endif
52 #ifdef CONFIG_EFI_COCO_SECRET
53 	.coco_secret		= EFI_INVALID_TABLE_ADDR,
54 #endif
55 #ifdef CONFIG_UNACCEPTED_MEMORY
56 	.unaccepted		= EFI_INVALID_TABLE_ADDR,
57 #endif
58 };
59 EXPORT_SYMBOL(efi);
60 
61 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
62 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
63 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
64 static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
65 
66 extern unsigned long screen_info_table;
67 
68 struct mm_struct efi_mm = {
69 	.mm_mt			= MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
70 	.mm_users		= ATOMIC_INIT(2),
71 	.mm_count		= ATOMIC_INIT(1),
72 	.write_protect_seq      = SEQCNT_ZERO(efi_mm.write_protect_seq),
73 	MMAP_LOCK_INITIALIZER(efi_mm)
74 	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
75 	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
76 	.user_ns		= &init_user_ns,
77 #ifdef CONFIG_SCHED_MM_CID
78 	.mm_cid.lock		= __RAW_SPIN_LOCK_UNLOCKED(efi_mm.mm_cid.lock),
79 #endif
80 	.flexible_array		= MM_STRUCT_FLEXIBLE_ARRAY_INIT,
81 };
82 
83 struct workqueue_struct *efi_rts_wq;
84 
85 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
setup_noefi(char * arg)86 static int __init setup_noefi(char *arg)
87 {
88 	disable_runtime = true;
89 	return 0;
90 }
91 early_param("noefi", setup_noefi);
92 
efi_runtime_disabled(void)93 bool efi_runtime_disabled(void)
94 {
95 	return disable_runtime;
96 }
97 
__efi_soft_reserve_enabled(void)98 bool __pure __efi_soft_reserve_enabled(void)
99 {
100 	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
101 }
102 
parse_efi_cmdline(char * str)103 static int __init parse_efi_cmdline(char *str)
104 {
105 	if (!str) {
106 		pr_warn("need at least one option\n");
107 		return -EINVAL;
108 	}
109 
110 	if (parse_option_str(str, "debug"))
111 		set_bit(EFI_DBG, &efi.flags);
112 
113 	if (parse_option_str(str, "noruntime"))
114 		disable_runtime = true;
115 
116 	if (parse_option_str(str, "runtime"))
117 		disable_runtime = false;
118 
119 	if (parse_option_str(str, "nosoftreserve"))
120 		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
121 
122 	return 0;
123 }
124 early_param("efi", parse_efi_cmdline);
125 
126 struct kobject *efi_kobj;
127 
128 /*
129  * Let's not leave out systab information that snuck into
130  * the efivars driver
131  * Note, do not add more fields in systab sysfs file as it breaks sysfs
132  * one value per file rule!
133  */
systab_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)134 static ssize_t systab_show(struct kobject *kobj,
135 			   struct kobj_attribute *attr, char *buf)
136 {
137 	char *str = buf;
138 
139 	if (!kobj || !buf)
140 		return -EINVAL;
141 
142 	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
143 		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
144 	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
145 		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
146 	/*
147 	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
148 	 * SMBIOS3 entry point shall be preferred, so we list it first to
149 	 * let applications stop parsing after the first match.
150 	 */
151 	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
152 		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
153 	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
154 		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
155 
156 	return str - buf;
157 }
158 
159 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
160 
fw_platform_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)161 static ssize_t fw_platform_size_show(struct kobject *kobj,
162 				     struct kobj_attribute *attr, char *buf)
163 {
164 	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
165 }
166 
167 extern __weak struct kobj_attribute efi_attr_fw_vendor;
168 extern __weak struct kobj_attribute efi_attr_runtime;
169 extern __weak struct kobj_attribute efi_attr_config_table;
170 static struct kobj_attribute efi_attr_fw_platform_size =
171 	__ATTR_RO(fw_platform_size);
172 
173 static struct attribute *efi_subsys_attrs[] = {
174 	&efi_attr_systab.attr,
175 	&efi_attr_fw_platform_size.attr,
176 	&efi_attr_fw_vendor.attr,
177 	&efi_attr_runtime.attr,
178 	&efi_attr_config_table.attr,
179 	NULL,
180 };
181 
efi_attr_is_visible(struct kobject * kobj,struct attribute * attr,int n)182 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
183 				   int n)
184 {
185 	return attr->mode;
186 }
187 
188 static const struct attribute_group efi_subsys_attr_group = {
189 	.attrs = efi_subsys_attrs,
190 	.is_visible = efi_attr_is_visible,
191 };
192 
193 struct blocking_notifier_head efivar_ops_nh;
194 EXPORT_SYMBOL_GPL(efivar_ops_nh);
195 
196 static struct efivars generic_efivars;
197 static struct efivar_operations generic_ops;
198 
generic_ops_supported(void)199 static bool generic_ops_supported(void)
200 {
201 	unsigned long name_size;
202 	efi_status_t status;
203 	efi_char16_t name;
204 	efi_guid_t guid;
205 
206 	name_size = sizeof(name);
207 
208 	if (!efi.get_next_variable)
209 		return false;
210 	status = efi.get_next_variable(&name_size, &name, &guid);
211 	if (status == EFI_UNSUPPORTED)
212 		return false;
213 
214 	return true;
215 }
216 
generic_ops_register(void)217 static int generic_ops_register(void)
218 {
219 	if (!generic_ops_supported())
220 		return 0;
221 
222 	generic_ops.get_variable = efi.get_variable;
223 	generic_ops.get_next_variable = efi.get_next_variable;
224 	generic_ops.query_variable_store = efi_query_variable_store;
225 	generic_ops.query_variable_info = efi.query_variable_info;
226 
227 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
228 		generic_ops.set_variable = efi.set_variable;
229 		generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
230 	}
231 	return efivars_register(&generic_efivars, &generic_ops);
232 }
233 
generic_ops_unregister(void)234 static void generic_ops_unregister(void)
235 {
236 	if (!generic_ops.get_variable)
237 		return;
238 
239 	efivars_unregister(&generic_efivars);
240 }
241 
efivars_generic_ops_register(void)242 void efivars_generic_ops_register(void)
243 {
244 	generic_ops_register();
245 }
246 EXPORT_SYMBOL_GPL(efivars_generic_ops_register);
247 
efivars_generic_ops_unregister(void)248 void efivars_generic_ops_unregister(void)
249 {
250 	generic_ops_unregister();
251 }
252 EXPORT_SYMBOL_GPL(efivars_generic_ops_unregister);
253 
254 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
255 #define EFIVAR_SSDT_NAME_MAX	16UL
256 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
efivar_ssdt_setup(char * str)257 static int __init efivar_ssdt_setup(char *str)
258 {
259 	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
260 
261 	if (ret)
262 		return ret;
263 
264 	if (strlen(str) < sizeof(efivar_ssdt))
265 		memcpy(efivar_ssdt, str, strlen(str));
266 	else
267 		pr_warn("efivar_ssdt: name too long: %s\n", str);
268 	return 1;
269 }
270 __setup("efivar_ssdt=", efivar_ssdt_setup);
271 
efivar_ssdt_load(void)272 static __init int efivar_ssdt_load(void)
273 {
274 	unsigned long name_size = 256;
275 	efi_char16_t *name = NULL;
276 	efi_status_t status;
277 	efi_guid_t guid;
278 	int ret = 0;
279 
280 	if (!efivar_ssdt[0])
281 		return 0;
282 
283 	name = kzalloc(name_size, GFP_KERNEL);
284 	if (!name)
285 		return -ENOMEM;
286 
287 	for (;;) {
288 		char utf8_name[EFIVAR_SSDT_NAME_MAX];
289 		unsigned long data_size = 0;
290 		void *data;
291 		int limit;
292 
293 		status = efi.get_next_variable(&name_size, name, &guid);
294 		if (status == EFI_NOT_FOUND) {
295 			break;
296 		} else if (status == EFI_BUFFER_TOO_SMALL) {
297 			efi_char16_t *name_tmp =
298 				krealloc(name, name_size, GFP_KERNEL);
299 			if (!name_tmp) {
300 				ret = -ENOMEM;
301 				goto out;
302 			}
303 			name = name_tmp;
304 			continue;
305 		}
306 
307 		limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
308 		ucs2_as_utf8(utf8_name, name, limit - 1);
309 		if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
310 			continue;
311 
312 		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
313 
314 		status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
315 		if (status != EFI_BUFFER_TOO_SMALL || !data_size) {
316 			ret = -EIO;
317 			goto out;
318 		}
319 
320 		data = kmalloc(data_size, GFP_KERNEL);
321 		if (!data) {
322 			ret = -ENOMEM;
323 			goto out;
324 		}
325 
326 		status = efi.get_variable(name, &guid, NULL, &data_size, data);
327 		if (status == EFI_SUCCESS) {
328 			acpi_status acpi_ret = acpi_load_table(data, NULL);
329 			if (ACPI_FAILURE(acpi_ret)) {
330 				pr_err("efivar_ssdt: failed to load table: %u\n",
331 				       acpi_ret);
332 			} else {
333 				/*
334 				 * The @data will be in use by ACPI engine,
335 				 * do not free it!
336 				 */
337 				continue;
338 			}
339 		} else {
340 			pr_err("efivar_ssdt: failed to get var data: 0x%lx\n", status);
341 		}
342 		kfree(data);
343 	}
344 out:
345 	kfree(name);
346 	return ret;
347 }
348 #else
efivar_ssdt_load(void)349 static inline int efivar_ssdt_load(void) { return 0; }
350 #endif
351 
352 #ifdef CONFIG_DEBUG_FS
353 
354 #define EFI_DEBUGFS_MAX_BLOBS 32
355 
356 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
357 
efi_debugfs_init(void)358 static void __init efi_debugfs_init(void)
359 {
360 	struct dentry *efi_debugfs;
361 	efi_memory_desc_t *md;
362 	char name[32];
363 	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
364 	int i = 0;
365 
366 	efi_debugfs = debugfs_create_dir("efi", NULL);
367 	if (IS_ERR(efi_debugfs))
368 		return;
369 
370 	for_each_efi_memory_desc(md) {
371 		switch (md->type) {
372 		case EFI_BOOT_SERVICES_CODE:
373 			snprintf(name, sizeof(name), "boot_services_code%d",
374 				 type_count[md->type]++);
375 			break;
376 		case EFI_BOOT_SERVICES_DATA:
377 			snprintf(name, sizeof(name), "boot_services_data%d",
378 				 type_count[md->type]++);
379 			break;
380 		default:
381 			continue;
382 		}
383 
384 		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
385 			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
386 				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
387 			break;
388 		}
389 
390 		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
391 		debugfs_blob[i].data = memremap(md->phys_addr,
392 						debugfs_blob[i].size,
393 						MEMREMAP_WB);
394 		if (!debugfs_blob[i].data)
395 			continue;
396 
397 		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
398 		i++;
399 	}
400 }
401 #else
efi_debugfs_init(void)402 static inline void efi_debugfs_init(void) {}
403 #endif
404 
405 /*
406  * We register the efi subsystem with the firmware subsystem and the
407  * efivars subsystem with the efi subsystem, if the system was booted with
408  * EFI.
409  */
efisubsys_init(void)410 static int __init efisubsys_init(void)
411 {
412 	int error;
413 
414 	if (!efi_enabled(EFI_RUNTIME_SERVICES))
415 		efi.runtime_supported_mask = 0;
416 
417 	if (!efi_enabled(EFI_BOOT))
418 		return 0;
419 
420 	if (efi.runtime_supported_mask) {
421 		/*
422 		 * Since we process only one efi_runtime_service() at a time, an
423 		 * ordered workqueue (which creates only one execution context)
424 		 * should suffice for all our needs.
425 		 */
426 		efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
427 		if (!efi_rts_wq) {
428 			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
429 			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
430 			efi.runtime_supported_mask = 0;
431 			return 0;
432 		}
433 	}
434 
435 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
436 		platform_device_register_simple("rtc-efi", 0, NULL, 0);
437 
438 	/* We register the efi directory at /sys/firmware/efi */
439 	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
440 	if (!efi_kobj) {
441 		pr_err("efi: Firmware registration failed.\n");
442 		error = -ENOMEM;
443 		goto err_destroy_wq;
444 	}
445 
446 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
447 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
448 		error = generic_ops_register();
449 		if (error)
450 			goto err_put;
451 		error = efivar_ssdt_load();
452 		if (error)
453 			pr_err("efi: failed to load SSDT, error %d.\n", error);
454 		platform_device_register_simple("efivars", 0, NULL, 0);
455 	}
456 
457 	BLOCKING_INIT_NOTIFIER_HEAD(&efivar_ops_nh);
458 
459 	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
460 	if (error) {
461 		pr_err("efi: Sysfs attribute export failed with error %d.\n",
462 		       error);
463 		goto err_unregister;
464 	}
465 
466 	/* and the standard mountpoint for efivarfs */
467 	error = sysfs_create_mount_point(efi_kobj, "efivars");
468 	if (error) {
469 		pr_err("efivars: Subsystem registration failed.\n");
470 		goto err_remove_group;
471 	}
472 
473 	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
474 		efi_debugfs_init();
475 
476 #ifdef CONFIG_EFI_COCO_SECRET
477 	if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
478 		platform_device_register_simple("efi_secret", 0, NULL, 0);
479 #endif
480 
481 	if (IS_ENABLED(CONFIG_OVMF_DEBUG_LOG) &&
482 	    efi.ovmf_debug_log != EFI_INVALID_TABLE_ADDR)
483 		ovmf_log_probe(efi.ovmf_debug_log);
484 
485 	return 0;
486 
487 err_remove_group:
488 	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
489 err_unregister:
490 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
491 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
492 		generic_ops_unregister();
493 err_put:
494 	kobject_put(efi_kobj);
495 	efi_kobj = NULL;
496 err_destroy_wq:
497 	if (efi_rts_wq)
498 		destroy_workqueue(efi_rts_wq);
499 
500 	return error;
501 }
502 
503 subsys_initcall(efisubsys_init);
504 
efi_find_mirror(void)505 void __init efi_find_mirror(void)
506 {
507 	efi_memory_desc_t *md;
508 	u64 mirror_size = 0, total_size = 0;
509 
510 	if (!efi_enabled(EFI_MEMMAP))
511 		return;
512 
513 	for_each_efi_memory_desc(md) {
514 		unsigned long long start = md->phys_addr;
515 		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
516 
517 		total_size += size;
518 		if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
519 			memblock_mark_mirror(start, size);
520 			mirror_size += size;
521 		}
522 	}
523 	if (mirror_size)
524 		pr_info("Memory: %lldM/%lldM mirrored memory\n",
525 			mirror_size>>20, total_size>>20);
526 }
527 
528 /*
529  * Find the efi memory descriptor for a given physical address.  Given a
530  * physical address, determine if it exists within an EFI Memory Map entry,
531  * and if so, populate the supplied memory descriptor with the appropriate
532  * data.
533  */
__efi_mem_desc_lookup(u64 phys_addr,efi_memory_desc_t * out_md)534 int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
535 {
536 	efi_memory_desc_t *md;
537 
538 	if (!efi_enabled(EFI_MEMMAP)) {
539 		pr_err_once("EFI_MEMMAP is not enabled.\n");
540 		return -EINVAL;
541 	}
542 
543 	if (!out_md) {
544 		pr_err_once("out_md is null.\n");
545 		return -EINVAL;
546         }
547 
548 	for_each_efi_memory_desc(md) {
549 		u64 size;
550 		u64 end;
551 
552 		/* skip bogus entries (including empty ones) */
553 		if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) ||
554 		    (md->num_pages <= 0) ||
555 		    (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT))
556 			continue;
557 
558 		size = md->num_pages << EFI_PAGE_SHIFT;
559 		end = md->phys_addr + size;
560 		if (phys_addr >= md->phys_addr && phys_addr < end) {
561 			memcpy(out_md, md, sizeof(*out_md));
562 			return 0;
563 		}
564 	}
565 	return -ENOENT;
566 }
567 
568 extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
569 	__weak __alias(__efi_mem_desc_lookup);
570 EXPORT_SYMBOL_GPL(efi_mem_desc_lookup);
571 
572 /*
573  * Calculate the highest address of an efi memory descriptor.
574  */
efi_mem_desc_end(efi_memory_desc_t * md)575 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
576 {
577 	u64 size = md->num_pages << EFI_PAGE_SHIFT;
578 	u64 end = md->phys_addr + size;
579 	return end;
580 }
581 
efi_arch_mem_reserve(phys_addr_t addr,u64 size)582 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
583 
584 /**
585  * efi_mem_reserve - Reserve an EFI memory region
586  * @addr: Physical address to reserve
587  * @size: Size of reservation
588  *
589  * Mark a region as reserved from general kernel allocation and
590  * prevent it being released by efi_free_boot_services().
591  *
592  * This function should be called drivers once they've parsed EFI
593  * configuration tables to figure out where their data lives, e.g.
594  * efi_esrt_init().
595  */
efi_mem_reserve(phys_addr_t addr,u64 size)596 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
597 {
598 	/* efi_mem_reserve() does not work under Xen */
599 	if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT)))
600 		return;
601 
602 	if (!memblock_is_region_reserved(addr, size))
603 		memblock_reserve(addr, size);
604 
605 	/*
606 	 * Some architectures (x86) reserve all boot services ranges
607 	 * until efi_free_boot_services() because of buggy firmware
608 	 * implementations. This means the above memblock_reserve() is
609 	 * superfluous on x86 and instead what it needs to do is
610 	 * ensure the @start, @size is not freed.
611 	 */
612 	efi_arch_mem_reserve(addr, size);
613 }
614 
615 static const efi_config_table_type_t common_tables[] __initconst = {
616 	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
617 	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
618 	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
619 	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
620 	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
621 	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
622 	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
623 	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
624 	{EFI_TCG2_FINAL_EVENTS_TABLE_GUID,	&efi.tpm_final_log,	"TPMFinalLog"	},
625 	{EFI_CC_FINAL_EVENTS_TABLE_GUID,	&efi.tpm_final_log,	"CCFinalLog"	},
626 	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
627 	{LINUX_EFI_INITRD_MEDIA_GUID,		&initrd,		"INITRD"	},
628 	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
629 #ifdef CONFIG_OVMF_DEBUG_LOG
630 	{OVMF_MEMORY_LOG_TABLE_GUID,		&efi.ovmf_debug_log,	"OvmfDebugLog"	},
631 #endif
632 #ifdef CONFIG_EFI_RCI2_TABLE
633 	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
634 #endif
635 #ifdef CONFIG_LOAD_UEFI_KEYS
636 	{LINUX_EFI_MOK_VARIABLE_TABLE_GUID,	&efi.mokvar_table,	"MOKvar"	},
637 #endif
638 #ifdef CONFIG_EFI_COCO_SECRET
639 	{LINUX_EFI_COCO_SECRET_AREA_GUID,	&efi.coco_secret,	"CocoSecret"	},
640 #endif
641 #ifdef CONFIG_UNACCEPTED_MEMORY
642 	{LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID,	&efi.unaccepted,	"Unaccepted"	},
643 #endif
644 #ifdef CONFIG_EFI_GENERIC_STUB
645 	{LINUX_EFI_SCREEN_INFO_TABLE_GUID,	&screen_info_table			},
646 #endif
647 	{},
648 };
649 
match_config_table(const efi_guid_t * guid,unsigned long table,const efi_config_table_type_t * table_types)650 static __init int match_config_table(const efi_guid_t *guid,
651 				     unsigned long table,
652 				     const efi_config_table_type_t *table_types)
653 {
654 	int i;
655 
656 	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
657 		if (efi_guidcmp(*guid, table_types[i].guid))
658 			continue;
659 
660 		if (!efi_config_table_is_usable(guid, table)) {
661 			if (table_types[i].name[0])
662 				pr_cont("(%s=0x%lx unusable) ",
663 					table_types[i].name, table);
664 			return 1;
665 		}
666 
667 		*(table_types[i].ptr) = table;
668 		if (table_types[i].name[0])
669 			pr_cont("%s=0x%lx ", table_types[i].name, table);
670 		return 1;
671 	}
672 
673 	return 0;
674 }
675 
676 /**
677  * reserve_unaccepted - Map and reserve unaccepted configuration table
678  * @unaccepted: Pointer to unaccepted memory table
679  *
680  * memblock_add() makes sure that the table is mapped in direct mapping. During
681  * normal boot it happens automatically because the table is allocated from
682  * usable memory. But during crashkernel boot only memory specifically reserved
683  * for crash scenario is mapped. memblock_add() forces the table to be mapped
684  * in crashkernel case.
685  *
686  * Align the range to the nearest page borders. Ranges smaller than page size
687  * are not going to be mapped.
688  *
689  * memblock_reserve() makes sure that future allocations will not touch the
690  * table.
691  */
692 
reserve_unaccepted(struct efi_unaccepted_memory * unaccepted)693 static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted)
694 {
695 	phys_addr_t start, size;
696 
697 	start = PAGE_ALIGN_DOWN(efi.unaccepted);
698 	size = PAGE_ALIGN(sizeof(*unaccepted) + unaccepted->size);
699 
700 	memblock_add(start, size);
701 	memblock_reserve(start, size);
702 }
703 
efi_config_parse_tables(const efi_config_table_t * config_tables,int count,const efi_config_table_type_t * arch_tables)704 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
705 				   int count,
706 				   const efi_config_table_type_t *arch_tables)
707 {
708 	const efi_config_table_64_t *tbl64 = (void *)config_tables;
709 	const efi_config_table_32_t *tbl32 = (void *)config_tables;
710 	const efi_guid_t *guid;
711 	unsigned long table;
712 	int i;
713 
714 	pr_info("");
715 	for (i = 0; i < count; i++) {
716 		if (!IS_ENABLED(CONFIG_X86)) {
717 			guid = &config_tables[i].guid;
718 			table = (unsigned long)config_tables[i].table;
719 		} else if (efi_enabled(EFI_64BIT)) {
720 			guid = &tbl64[i].guid;
721 			table = tbl64[i].table;
722 
723 			if (IS_ENABLED(CONFIG_X86_32) &&
724 			    tbl64[i].table > U32_MAX) {
725 				pr_cont("\n");
726 				pr_err("Table located above 4GB, disabling EFI.\n");
727 				return -EINVAL;
728 			}
729 		} else {
730 			guid = &tbl32[i].guid;
731 			table = tbl32[i].table;
732 		}
733 
734 		if (!match_config_table(guid, table, common_tables) && arch_tables)
735 			match_config_table(guid, table, arch_tables);
736 	}
737 	pr_cont("\n");
738 	set_bit(EFI_CONFIG_TABLES, &efi.flags);
739 
740 	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
741 		struct linux_efi_random_seed *seed;
742 		u32 size = 0;
743 
744 		seed = early_memremap(efi_rng_seed, sizeof(*seed));
745 		if (seed != NULL) {
746 			size = min_t(u32, seed->size, SZ_1K); // sanity check
747 			early_memunmap(seed, sizeof(*seed));
748 		} else {
749 			pr_err("Could not map UEFI random seed!\n");
750 		}
751 		if (size > 0) {
752 			seed = early_memremap(efi_rng_seed,
753 					      sizeof(*seed) + size);
754 			if (seed != NULL) {
755 				add_bootloader_randomness(seed->bits, size);
756 				memzero_explicit(seed->bits, size);
757 				early_memunmap(seed, sizeof(*seed) + size);
758 			} else {
759 				pr_err("Could not map UEFI random seed!\n");
760 			}
761 		}
762 	}
763 
764 	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
765 		efi_memattr_init();
766 
767 	efi_tpm_eventlog_init();
768 
769 	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
770 		unsigned long prsv = mem_reserve;
771 
772 		while (prsv) {
773 			struct linux_efi_memreserve *rsv;
774 			u8 *p;
775 
776 			/*
777 			 * Just map a full page: that is what we will get
778 			 * anyway, and it permits us to map the entire entry
779 			 * before knowing its size.
780 			 */
781 			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
782 					   PAGE_SIZE);
783 			if (p == NULL) {
784 				pr_err("Could not map UEFI memreserve entry!\n");
785 				return -ENOMEM;
786 			}
787 
788 			rsv = (void *)(p + prsv % PAGE_SIZE);
789 
790 			/* reserve the entry itself */
791 			memblock_reserve(prsv,
792 					 struct_size(rsv, entry, rsv->size));
793 
794 			for (i = 0; i < atomic_read(&rsv->count); i++) {
795 				memblock_reserve(rsv->entry[i].base,
796 						 rsv->entry[i].size);
797 			}
798 
799 			prsv = rsv->next;
800 			early_memunmap(p, PAGE_SIZE);
801 		}
802 	}
803 
804 	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
805 		efi_rt_properties_table_t *tbl;
806 
807 		tbl = early_memremap(rt_prop, sizeof(*tbl));
808 		if (tbl) {
809 			efi.runtime_supported_mask &= tbl->runtime_services_supported;
810 			early_memunmap(tbl, sizeof(*tbl));
811 		}
812 	}
813 
814 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
815 	    initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
816 		struct linux_efi_initrd *tbl;
817 
818 		tbl = early_memremap(initrd, sizeof(*tbl));
819 		if (tbl) {
820 			phys_initrd_start = tbl->base;
821 			phys_initrd_size = tbl->size;
822 			tbl->base = tbl->size = 0;
823 			early_memunmap(tbl, sizeof(*tbl));
824 		}
825 	}
826 
827 	if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) &&
828 	    efi.unaccepted != EFI_INVALID_TABLE_ADDR) {
829 		struct efi_unaccepted_memory *unaccepted;
830 
831 		unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted));
832 		if (unaccepted) {
833 
834 			if (unaccepted->version == 1) {
835 				reserve_unaccepted(unaccepted);
836 			} else {
837 				efi.unaccepted = EFI_INVALID_TABLE_ADDR;
838 			}
839 
840 			early_memunmap(unaccepted, sizeof(*unaccepted));
841 		}
842 	}
843 
844 	return 0;
845 }
846 
efi_systab_check_header(const efi_table_hdr_t * systab_hdr)847 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr)
848 {
849 	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
850 		pr_err("System table signature incorrect!\n");
851 		return -EINVAL;
852 	}
853 
854 	return 0;
855 }
856 
map_fw_vendor(unsigned long fw_vendor,size_t size)857 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
858 						size_t size)
859 {
860 	const efi_char16_t *ret;
861 
862 	ret = early_memremap_ro(fw_vendor, size);
863 	if (!ret)
864 		pr_err("Could not map the firmware vendor!\n");
865 	return ret;
866 }
867 
unmap_fw_vendor(const void * fw_vendor,size_t size)868 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
869 {
870 	early_memunmap((void *)fw_vendor, size);
871 }
872 
efi_systab_report_header(const efi_table_hdr_t * systab_hdr,unsigned long fw_vendor)873 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
874 				     unsigned long fw_vendor)
875 {
876 	char vendor[100] = "unknown";
877 	const efi_char16_t *c16;
878 	size_t i;
879 	u16 rev;
880 
881 	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
882 	if (c16) {
883 		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
884 			vendor[i] = c16[i];
885 		vendor[i] = '\0';
886 
887 		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
888 	}
889 
890 	rev = (u16)systab_hdr->revision;
891 	pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10);
892 
893 	rev %= 10;
894 	if (rev)
895 		pr_cont(".%u", rev);
896 
897 	pr_cont(" by %s\n", vendor);
898 
899 	if (IS_ENABLED(CONFIG_X86_64) &&
900 	    systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
901 	    !strcmp(vendor, "Apple")) {
902 		pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
903 		efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
904 	}
905 }
906 
907 static __initdata char memory_type_name[][13] = {
908 	"Reserved",
909 	"Loader Code",
910 	"Loader Data",
911 	"Boot Code",
912 	"Boot Data",
913 	"Runtime Code",
914 	"Runtime Data",
915 	"Conventional",
916 	"Unusable",
917 	"ACPI Reclaim",
918 	"ACPI Mem NVS",
919 	"MMIO",
920 	"MMIO Port",
921 	"PAL Code",
922 	"Persistent",
923 	"Unaccepted",
924 };
925 
efi_md_typeattr_format(char * buf,size_t size,const efi_memory_desc_t * md)926 char * __init efi_md_typeattr_format(char *buf, size_t size,
927 				     const efi_memory_desc_t *md)
928 {
929 	char *pos;
930 	int type_len;
931 	u64 attr;
932 
933 	pos = buf;
934 	if (md->type >= ARRAY_SIZE(memory_type_name))
935 		type_len = snprintf(pos, size, "[type=%u", md->type);
936 	else
937 		type_len = snprintf(pos, size, "[%-*s",
938 				    (int)(sizeof(memory_type_name[0]) - 1),
939 				    memory_type_name[md->type]);
940 	if (type_len >= size)
941 		return buf;
942 
943 	pos += type_len;
944 	size -= type_len;
945 
946 	attr = md->attribute;
947 	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
948 		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
949 		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
950 		     EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
951 		     EFI_MEMORY_MORE_RELIABLE | EFI_MEMORY_HOT_PLUGGABLE |
952 		     EFI_MEMORY_RUNTIME))
953 		snprintf(pos, size, "|attr=0x%016llx]",
954 			 (unsigned long long)attr);
955 	else
956 		snprintf(pos, size,
957 			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
958 			 attr & EFI_MEMORY_RUNTIME		? "RUN" : "",
959 			 attr & EFI_MEMORY_HOT_PLUGGABLE	? "HP"  : "",
960 			 attr & EFI_MEMORY_MORE_RELIABLE	? "MR"  : "",
961 			 attr & EFI_MEMORY_CPU_CRYPTO   	? "CC"  : "",
962 			 attr & EFI_MEMORY_SP			? "SP"  : "",
963 			 attr & EFI_MEMORY_NV			? "NV"  : "",
964 			 attr & EFI_MEMORY_XP			? "XP"  : "",
965 			 attr & EFI_MEMORY_RP			? "RP"  : "",
966 			 attr & EFI_MEMORY_WP			? "WP"  : "",
967 			 attr & EFI_MEMORY_RO			? "RO"  : "",
968 			 attr & EFI_MEMORY_UCE			? "UCE" : "",
969 			 attr & EFI_MEMORY_WB			? "WB"  : "",
970 			 attr & EFI_MEMORY_WT			? "WT"  : "",
971 			 attr & EFI_MEMORY_WC			? "WC"  : "",
972 			 attr & EFI_MEMORY_UC			? "UC"  : "");
973 	return buf;
974 }
975 
976 /*
977  * efi_mem_attributes - lookup memmap attributes for physical address
978  * @phys_addr: the physical address to lookup
979  *
980  * Search in the EFI memory map for the region covering
981  * @phys_addr. Returns the EFI memory attributes if the region
982  * was found in the memory map, 0 otherwise.
983  */
efi_mem_attributes(unsigned long phys_addr)984 u64 efi_mem_attributes(unsigned long phys_addr)
985 {
986 	efi_memory_desc_t *md;
987 
988 	if (!efi_enabled(EFI_MEMMAP))
989 		return 0;
990 
991 	for_each_efi_memory_desc(md) {
992 		if ((md->phys_addr <= phys_addr) &&
993 		    (phys_addr < (md->phys_addr +
994 		    (md->num_pages << EFI_PAGE_SHIFT))))
995 			return md->attribute;
996 	}
997 	return 0;
998 }
999 
1000 /*
1001  * efi_mem_type - lookup memmap type for physical address
1002  * @phys_addr: the physical address to lookup
1003  *
1004  * Search in the EFI memory map for the region covering @phys_addr.
1005  * Returns the EFI memory type if the region was found in the memory
1006  * map, -EINVAL otherwise.
1007  */
efi_mem_type(unsigned long phys_addr)1008 int efi_mem_type(unsigned long phys_addr)
1009 {
1010 	const efi_memory_desc_t *md;
1011 
1012 	if (!efi_enabled(EFI_MEMMAP))
1013 		return -ENOTSUPP;
1014 
1015 	for_each_efi_memory_desc(md) {
1016 		if ((md->phys_addr <= phys_addr) &&
1017 		    (phys_addr < (md->phys_addr +
1018 				  (md->num_pages << EFI_PAGE_SHIFT))))
1019 			return md->type;
1020 	}
1021 	return -EINVAL;
1022 }
1023 
efi_status_to_err(efi_status_t status)1024 int efi_status_to_err(efi_status_t status)
1025 {
1026 	int err;
1027 
1028 	switch (status) {
1029 	case EFI_SUCCESS:
1030 		err = 0;
1031 		break;
1032 	case EFI_INVALID_PARAMETER:
1033 		err = -EINVAL;
1034 		break;
1035 	case EFI_OUT_OF_RESOURCES:
1036 		err = -ENOSPC;
1037 		break;
1038 	case EFI_DEVICE_ERROR:
1039 		err = -EIO;
1040 		break;
1041 	case EFI_WRITE_PROTECTED:
1042 		err = -EROFS;
1043 		break;
1044 	case EFI_SECURITY_VIOLATION:
1045 		err = -EACCES;
1046 		break;
1047 	case EFI_NOT_FOUND:
1048 		err = -ENOENT;
1049 		break;
1050 	case EFI_ABORTED:
1051 		err = -EINTR;
1052 		break;
1053 	default:
1054 		err = -EINVAL;
1055 	}
1056 
1057 	return err;
1058 }
1059 EXPORT_SYMBOL_GPL(efi_status_to_err);
1060 
1061 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
1062 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
1063 
efi_memreserve_map_root(void)1064 static int __init efi_memreserve_map_root(void)
1065 {
1066 	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
1067 		return -ENODEV;
1068 
1069 	efi_memreserve_root = memremap(mem_reserve,
1070 				       sizeof(*efi_memreserve_root),
1071 				       MEMREMAP_WB);
1072 	if (WARN_ON_ONCE(!efi_memreserve_root))
1073 		return -ENOMEM;
1074 	return 0;
1075 }
1076 
efi_mem_reserve_iomem(phys_addr_t addr,u64 size)1077 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
1078 {
1079 	struct resource *res, *parent;
1080 	int ret;
1081 
1082 	res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
1083 	if (!res)
1084 		return -ENOMEM;
1085 
1086 	res->name	= "reserved";
1087 	res->flags	= IORESOURCE_MEM;
1088 	res->start	= addr;
1089 	res->end	= addr + size - 1;
1090 
1091 	/* we expect a conflict with a 'System RAM' region */
1092 	parent = request_resource_conflict(&iomem_resource, res);
1093 	ret = parent ? request_resource(parent, res) : 0;
1094 
1095 	/*
1096 	 * Given that efi_mem_reserve_iomem() can be called at any
1097 	 * time, only call memblock_reserve() if the architecture
1098 	 * keeps the infrastructure around.
1099 	 */
1100 	if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
1101 		memblock_reserve(addr, size);
1102 
1103 	return ret;
1104 }
1105 
efi_mem_reserve_persistent(phys_addr_t addr,u64 size)1106 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
1107 {
1108 	struct linux_efi_memreserve *rsv;
1109 	unsigned long prsv;
1110 	int rc, index;
1111 
1112 	if (efi_memreserve_root == (void *)ULONG_MAX)
1113 		return -ENODEV;
1114 
1115 	if (!efi_memreserve_root) {
1116 		rc = efi_memreserve_map_root();
1117 		if (rc)
1118 			return rc;
1119 	}
1120 
1121 	/* first try to find a slot in an existing linked list entry */
1122 	for (prsv = efi_memreserve_root->next; prsv; ) {
1123 		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
1124 		if (!rsv)
1125 			return -ENOMEM;
1126 		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
1127 		if (index < rsv->size) {
1128 			rsv->entry[index].base = addr;
1129 			rsv->entry[index].size = size;
1130 
1131 			memunmap(rsv);
1132 			return efi_mem_reserve_iomem(addr, size);
1133 		}
1134 		prsv = rsv->next;
1135 		memunmap(rsv);
1136 	}
1137 
1138 	/* no slot found - allocate a new linked list entry */
1139 	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
1140 	if (!rsv)
1141 		return -ENOMEM;
1142 
1143 	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
1144 	if (rc) {
1145 		free_page((unsigned long)rsv);
1146 		return rc;
1147 	}
1148 
1149 	/*
1150 	 * The memremap() call above assumes that a linux_efi_memreserve entry
1151 	 * never crosses a page boundary, so let's ensure that this remains true
1152 	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
1153 	 * using SZ_4K explicitly in the size calculation below.
1154 	 */
1155 	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
1156 	atomic_set(&rsv->count, 1);
1157 	rsv->entry[0].base = addr;
1158 	rsv->entry[0].size = size;
1159 
1160 	spin_lock(&efi_mem_reserve_persistent_lock);
1161 	rsv->next = efi_memreserve_root->next;
1162 	efi_memreserve_root->next = __pa(rsv);
1163 	spin_unlock(&efi_mem_reserve_persistent_lock);
1164 
1165 	return efi_mem_reserve_iomem(addr, size);
1166 }
1167 
efi_memreserve_root_init(void)1168 static int __init efi_memreserve_root_init(void)
1169 {
1170 	if (efi_memreserve_root)
1171 		return 0;
1172 	if (efi_memreserve_map_root())
1173 		efi_memreserve_root = (void *)ULONG_MAX;
1174 	return 0;
1175 }
1176 early_initcall(efi_memreserve_root_init);
1177 
1178 #ifdef CONFIG_KEXEC
update_efi_random_seed(struct notifier_block * nb,unsigned long code,void * unused)1179 static int update_efi_random_seed(struct notifier_block *nb,
1180 				  unsigned long code, void *unused)
1181 {
1182 	struct linux_efi_random_seed *seed;
1183 	u32 size = 0;
1184 
1185 	if (!kexec_in_progress)
1186 		return NOTIFY_DONE;
1187 
1188 	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1189 	if (seed != NULL) {
1190 		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1191 		memunmap(seed);
1192 	} else {
1193 		pr_err("Could not map UEFI random seed!\n");
1194 	}
1195 	if (size > 0) {
1196 		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1197 				MEMREMAP_WB);
1198 		if (seed != NULL) {
1199 			seed->size = size;
1200 			get_random_bytes(seed->bits, seed->size);
1201 			memunmap(seed);
1202 		} else {
1203 			pr_err("Could not map UEFI random seed!\n");
1204 		}
1205 	}
1206 	return NOTIFY_DONE;
1207 }
1208 
1209 static struct notifier_block efi_random_seed_nb = {
1210 	.notifier_call = update_efi_random_seed,
1211 };
1212 
register_update_efi_random_seed(void)1213 static int __init register_update_efi_random_seed(void)
1214 {
1215 	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1216 		return 0;
1217 	return register_reboot_notifier(&efi_random_seed_nb);
1218 }
1219 late_initcall(register_update_efi_random_seed);
1220 #endif
1221