xref: /linux/arch/arm64/kernel/efi.c (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1 /*
2  * Extensible Firmware Interface
3  *
4  * Based on Extensible Firmware Interface Specification version 2.4
5  *
6  * Copyright (C) 2013, 2014 Linaro Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  */
13 
14 #include <linux/dmi.h>
15 #include <linux/efi.h>
16 #include <linux/export.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/of.h>
20 #include <linux/of_fdt.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 
24 #include <asm/cacheflush.h>
25 #include <asm/efi.h>
26 #include <asm/tlbflush.h>
27 #include <asm/mmu_context.h>
28 
29 struct efi_memory_map memmap;
30 
31 static efi_runtime_services_t *runtime;
32 
33 static u64 efi_system_table;
34 
35 static int uefi_debug __initdata;
36 static int __init uefi_debug_setup(char *str)
37 {
38 	uefi_debug = 1;
39 
40 	return 0;
41 }
42 early_param("uefi_debug", uefi_debug_setup);
43 
44 static int __init is_normal_ram(efi_memory_desc_t *md)
45 {
46 	if (md->attribute & EFI_MEMORY_WB)
47 		return 1;
48 	return 0;
49 }
50 
51 static void __init efi_setup_idmap(void)
52 {
53 	struct memblock_region *r;
54 	efi_memory_desc_t *md;
55 	u64 paddr, npages, size;
56 
57 	for_each_memblock(memory, r)
58 		create_id_mapping(r->base, r->size, 0);
59 
60 	/* map runtime io spaces */
61 	for_each_efi_memory_desc(&memmap, md) {
62 		if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md))
63 			continue;
64 		paddr = md->phys_addr;
65 		npages = md->num_pages;
66 		memrange_efi_to_native(&paddr, &npages);
67 		size = npages << PAGE_SHIFT;
68 		create_id_mapping(paddr, size, 1);
69 	}
70 }
71 
72 static int __init uefi_init(void)
73 {
74 	efi_char16_t *c16;
75 	char vendor[100] = "unknown";
76 	int i, retval;
77 
78 	efi.systab = early_memremap(efi_system_table,
79 				    sizeof(efi_system_table_t));
80 	if (efi.systab == NULL) {
81 		pr_warn("Unable to map EFI system table.\n");
82 		return -ENOMEM;
83 	}
84 
85 	set_bit(EFI_BOOT, &efi.flags);
86 	set_bit(EFI_64BIT, &efi.flags);
87 
88 	/*
89 	 * Verify the EFI Table
90 	 */
91 	if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
92 		pr_err("System table signature incorrect\n");
93 		retval = -EINVAL;
94 		goto out;
95 	}
96 	if ((efi.systab->hdr.revision >> 16) < 2)
97 		pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n",
98 			efi.systab->hdr.revision >> 16,
99 			efi.systab->hdr.revision & 0xffff);
100 
101 	/* Show what we know for posterity */
102 	c16 = early_memremap(efi.systab->fw_vendor,
103 			     sizeof(vendor));
104 	if (c16) {
105 		for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
106 			vendor[i] = c16[i];
107 		vendor[i] = '\0';
108 		early_memunmap(c16, sizeof(vendor));
109 	}
110 
111 	pr_info("EFI v%u.%.02u by %s\n",
112 		efi.systab->hdr.revision >> 16,
113 		efi.systab->hdr.revision & 0xffff, vendor);
114 
115 	retval = efi_config_init(NULL);
116 
117 out:
118 	early_memunmap(efi.systab,  sizeof(efi_system_table_t));
119 	return retval;
120 }
121 
122 /*
123  * Return true for RAM regions we want to permanently reserve.
124  */
125 static __init int is_reserve_region(efi_memory_desc_t *md)
126 {
127 	switch (md->type) {
128 	case EFI_LOADER_CODE:
129 	case EFI_LOADER_DATA:
130 	case EFI_BOOT_SERVICES_CODE:
131 	case EFI_BOOT_SERVICES_DATA:
132 	case EFI_CONVENTIONAL_MEMORY:
133 		return 0;
134 	default:
135 		break;
136 	}
137 	return is_normal_ram(md);
138 }
139 
140 static __init void reserve_regions(void)
141 {
142 	efi_memory_desc_t *md;
143 	u64 paddr, npages, size;
144 
145 	if (uefi_debug)
146 		pr_info("Processing EFI memory map:\n");
147 
148 	for_each_efi_memory_desc(&memmap, md) {
149 		paddr = md->phys_addr;
150 		npages = md->num_pages;
151 
152 		if (uefi_debug) {
153 			char buf[64];
154 
155 			pr_info("  0x%012llx-0x%012llx %s",
156 				paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
157 				efi_md_typeattr_format(buf, sizeof(buf), md));
158 		}
159 
160 		memrange_efi_to_native(&paddr, &npages);
161 		size = npages << PAGE_SHIFT;
162 
163 		if (is_normal_ram(md))
164 			early_init_dt_add_memory_arch(paddr, size);
165 
166 		if (is_reserve_region(md) ||
167 		    md->type == EFI_BOOT_SERVICES_CODE ||
168 		    md->type == EFI_BOOT_SERVICES_DATA) {
169 			memblock_reserve(paddr, size);
170 			if (uefi_debug)
171 				pr_cont("*");
172 		}
173 
174 		if (uefi_debug)
175 			pr_cont("\n");
176 	}
177 
178 	set_bit(EFI_MEMMAP, &efi.flags);
179 }
180 
181 
182 static u64 __init free_one_region(u64 start, u64 end)
183 {
184 	u64 size = end - start;
185 
186 	if (uefi_debug)
187 		pr_info("  EFI freeing: 0x%012llx-0x%012llx\n",	start, end - 1);
188 
189 	free_bootmem_late(start, size);
190 	return size;
191 }
192 
193 static u64 __init free_region(u64 start, u64 end)
194 {
195 	u64 map_start, map_end, total = 0;
196 
197 	if (end <= start)
198 		return total;
199 
200 	map_start = (u64)memmap.phys_map;
201 	map_end = PAGE_ALIGN(map_start + (memmap.map_end - memmap.map));
202 	map_start &= PAGE_MASK;
203 
204 	if (start < map_end && end > map_start) {
205 		/* region overlaps UEFI memmap */
206 		if (start < map_start)
207 			total += free_one_region(start, map_start);
208 
209 		if (map_end < end)
210 			total += free_one_region(map_end, end);
211 	} else
212 		total += free_one_region(start, end);
213 
214 	return total;
215 }
216 
217 static void __init free_boot_services(void)
218 {
219 	u64 total_freed = 0;
220 	u64 keep_end, free_start, free_end;
221 	efi_memory_desc_t *md;
222 
223 	/*
224 	 * If kernel uses larger pages than UEFI, we have to be careful
225 	 * not to inadvertantly free memory we want to keep if there is
226 	 * overlap at the kernel page size alignment. We do not want to
227 	 * free is_reserve_region() memory nor the UEFI memmap itself.
228 	 *
229 	 * The memory map is sorted, so we keep track of the end of
230 	 * any previous region we want to keep, remember any region
231 	 * we want to free and defer freeing it until we encounter
232 	 * the next region we want to keep. This way, before freeing
233 	 * it, we can clip it as needed to avoid freeing memory we
234 	 * want to keep for UEFI.
235 	 */
236 
237 	keep_end = 0;
238 	free_start = 0;
239 
240 	for_each_efi_memory_desc(&memmap, md) {
241 		u64 paddr, npages, size;
242 
243 		if (is_reserve_region(md)) {
244 			/*
245 			 * We don't want to free any memory from this region.
246 			 */
247 			if (free_start) {
248 				/* adjust free_end then free region */
249 				if (free_end > md->phys_addr)
250 					free_end -= PAGE_SIZE;
251 				total_freed += free_region(free_start, free_end);
252 				free_start = 0;
253 			}
254 			keep_end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
255 			continue;
256 		}
257 
258 		if (md->type != EFI_BOOT_SERVICES_CODE &&
259 		    md->type != EFI_BOOT_SERVICES_DATA) {
260 			/* no need to free this region */
261 			continue;
262 		}
263 
264 		/*
265 		 * We want to free memory from this region.
266 		 */
267 		paddr = md->phys_addr;
268 		npages = md->num_pages;
269 		memrange_efi_to_native(&paddr, &npages);
270 		size = npages << PAGE_SHIFT;
271 
272 		if (free_start) {
273 			if (paddr <= free_end)
274 				free_end = paddr + size;
275 			else {
276 				total_freed += free_region(free_start, free_end);
277 				free_start = paddr;
278 				free_end = paddr + size;
279 			}
280 		} else {
281 			free_start = paddr;
282 			free_end = paddr + size;
283 		}
284 		if (free_start < keep_end) {
285 			free_start += PAGE_SIZE;
286 			if (free_start >= free_end)
287 				free_start = 0;
288 		}
289 	}
290 	if (free_start)
291 		total_freed += free_region(free_start, free_end);
292 
293 	if (total_freed)
294 		pr_info("Freed 0x%llx bytes of EFI boot services memory",
295 			total_freed);
296 }
297 
298 void __init efi_init(void)
299 {
300 	struct efi_fdt_params params;
301 
302 	/* Grab UEFI information placed in FDT by stub */
303 	if (!efi_get_fdt_params(&params, uefi_debug))
304 		return;
305 
306 	efi_system_table = params.system_table;
307 
308 	memblock_reserve(params.mmap & PAGE_MASK,
309 			 PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK)));
310 	memmap.phys_map = (void *)params.mmap;
311 	memmap.map = early_memremap(params.mmap, params.mmap_size);
312 	memmap.map_end = memmap.map + params.mmap_size;
313 	memmap.desc_size = params.desc_size;
314 	memmap.desc_version = params.desc_ver;
315 
316 	if (uefi_init() < 0)
317 		return;
318 
319 	reserve_regions();
320 }
321 
322 void __init efi_idmap_init(void)
323 {
324 	if (!efi_enabled(EFI_BOOT))
325 		return;
326 
327 	/* boot time idmap_pg_dir is incomplete, so fill in missing parts */
328 	efi_setup_idmap();
329 	early_memunmap(memmap.map, memmap.map_end - memmap.map);
330 }
331 
332 static int __init remap_region(efi_memory_desc_t *md, void **new)
333 {
334 	u64 paddr, vaddr, npages, size;
335 
336 	paddr = md->phys_addr;
337 	npages = md->num_pages;
338 	memrange_efi_to_native(&paddr, &npages);
339 	size = npages << PAGE_SHIFT;
340 
341 	if (is_normal_ram(md))
342 		vaddr = (__force u64)ioremap_cache(paddr, size);
343 	else
344 		vaddr = (__force u64)ioremap(paddr, size);
345 
346 	if (!vaddr) {
347 		pr_err("Unable to remap 0x%llx pages @ %p\n",
348 		       npages, (void *)paddr);
349 		return 0;
350 	}
351 
352 	/* adjust for any rounding when EFI and system pagesize differs */
353 	md->virt_addr = vaddr + (md->phys_addr - paddr);
354 
355 	if (uefi_debug)
356 		pr_info("  EFI remap 0x%012llx => %p\n",
357 			md->phys_addr, (void *)md->virt_addr);
358 
359 	memcpy(*new, md, memmap.desc_size);
360 	*new += memmap.desc_size;
361 
362 	return 1;
363 }
364 
365 /*
366  * Switch UEFI from an identity map to a kernel virtual map
367  */
368 static int __init arm64_enter_virtual_mode(void)
369 {
370 	efi_memory_desc_t *md;
371 	phys_addr_t virtmap_phys;
372 	void *virtmap, *virt_md;
373 	efi_status_t status;
374 	u64 mapsize;
375 	int count = 0;
376 	unsigned long flags;
377 
378 	if (!efi_enabled(EFI_BOOT)) {
379 		pr_info("EFI services will not be available.\n");
380 		return -1;
381 	}
382 
383 	mapsize = memmap.map_end - memmap.map;
384 
385 	if (efi_runtime_disabled()) {
386 		pr_info("EFI runtime services will be disabled.\n");
387 		return -1;
388 	}
389 
390 	pr_info("Remapping and enabling EFI services.\n");
391 	/* replace early memmap mapping with permanent mapping */
392 	memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
393 						   mapsize);
394 	memmap.map_end = memmap.map + mapsize;
395 
396 	efi.memmap = &memmap;
397 
398 	/* Map the runtime regions */
399 	virtmap = kmalloc(mapsize, GFP_KERNEL);
400 	if (!virtmap) {
401 		pr_err("Failed to allocate EFI virtual memmap\n");
402 		return -1;
403 	}
404 	virtmap_phys = virt_to_phys(virtmap);
405 	virt_md = virtmap;
406 
407 	for_each_efi_memory_desc(&memmap, md) {
408 		if (!(md->attribute & EFI_MEMORY_RUNTIME))
409 			continue;
410 		if (!remap_region(md, &virt_md))
411 			goto err_unmap;
412 		++count;
413 	}
414 
415 	efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table);
416 	if (!efi.systab) {
417 		/*
418 		 * If we have no virtual mapping for the System Table at this
419 		 * point, the memory map doesn't cover the physical offset where
420 		 * it resides. This means the System Table will be inaccessible
421 		 * to Runtime Services themselves once the virtual mapping is
422 		 * installed.
423 		 */
424 		pr_err("Failed to remap EFI System Table -- buggy firmware?\n");
425 		goto err_unmap;
426 	}
427 	set_bit(EFI_SYSTEM_TABLES, &efi.flags);
428 
429 	local_irq_save(flags);
430 	cpu_switch_mm(idmap_pg_dir, &init_mm);
431 
432 	/* Call SetVirtualAddressMap with the physical address of the map */
433 	runtime = efi.systab->runtime;
434 	efi.set_virtual_address_map = runtime->set_virtual_address_map;
435 
436 	status = efi.set_virtual_address_map(count * memmap.desc_size,
437 					     memmap.desc_size,
438 					     memmap.desc_version,
439 					     (efi_memory_desc_t *)virtmap_phys);
440 	cpu_set_reserved_ttbr0();
441 	flush_tlb_all();
442 	local_irq_restore(flags);
443 
444 	kfree(virtmap);
445 
446 	free_boot_services();
447 
448 	if (status != EFI_SUCCESS) {
449 		pr_err("Failed to set EFI virtual address map! [%lx]\n",
450 			status);
451 		return -1;
452 	}
453 
454 	/* Set up runtime services function pointers */
455 	runtime = efi.systab->runtime;
456 	efi_native_runtime_setup();
457 	set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
458 
459 	efi.runtime_version = efi.systab->hdr.revision;
460 
461 	return 0;
462 
463 err_unmap:
464 	/* unmap all mappings that succeeded: there are 'count' of those */
465 	for (virt_md = virtmap; count--; virt_md += memmap.desc_size) {
466 		md = virt_md;
467 		iounmap((__force void __iomem *)md->virt_addr);
468 	}
469 	kfree(virtmap);
470 	return -1;
471 }
472 early_initcall(arm64_enter_virtual_mode);
473 
474 static int __init arm64_dmi_init(void)
475 {
476 	/*
477 	 * On arm64, DMI depends on UEFI, and dmi_scan_machine() needs to
478 	 * be called early because dmi_id_init(), which is an arch_initcall
479 	 * itself, depends on dmi_scan_machine() having been called already.
480 	 */
481 	dmi_scan_machine();
482 	if (dmi_available)
483 		dmi_set_dump_stack_arch_desc();
484 	return 0;
485 }
486 core_initcall(arm64_dmi_init);
487