xref: /linux/drivers/firmware/efi/libstub/arm64-stub.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2013, 2014 Linaro Ltd;  <roy.franz@linaro.org>
4  *
5  * This file implements the EFI boot stub for the arm64 kernel.
6  * Adapted from ARM version by Mark Salter <msalter@redhat.com>
7  */
8 
9 
10 #include <linux/efi.h>
11 #include <asm/efi.h>
12 #include <asm/memory.h>
13 #include <asm/sections.h>
14 
15 #include "efistub.h"
16 
17 efi_status_t handle_kernel_image(unsigned long *image_addr,
18 				 unsigned long *image_size,
19 				 unsigned long *reserve_addr,
20 				 unsigned long *reserve_size,
21 				 efi_loaded_image_t *image,
22 				 efi_handle_t image_handle)
23 {
24 	unsigned long kernel_size, kernel_codesize, kernel_memsize;
25 
26 	if (image->image_base != _text) {
27 		efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
28 		image->image_base = _text;
29 	}
30 
31 	if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN))
32 		efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n",
33 			SEGMENT_ALIGN >> 10);
34 
35 	kernel_size = _edata - _text;
36 	kernel_codesize = __inittext_end - _text;
37 	kernel_memsize = kernel_size + (_end - _edata);
38 	*reserve_size = kernel_memsize;
39 	*image_addr = (unsigned long)_text;
40 
41 	return efi_kaslr_relocate_kernel(image_addr, reserve_addr, reserve_size,
42 					 kernel_size, kernel_codesize, kernel_memsize,
43 					 efi_kaslr_get_phys_seed(image_handle));
44 }
45 
46 asmlinkage void primary_entry(void);
47 
48 unsigned long primary_entry_offset(void)
49 {
50 	/*
51 	 * When built as part of the kernel, the EFI stub cannot branch to the
52 	 * kernel proper via the image header, as the PE/COFF header is
53 	 * strictly not part of the in-memory presentation of the image, only
54 	 * of the file representation. So instead, we need to jump to the
55 	 * actual entrypoint in the .text region of the image.
56 	 */
57 	return (char *)primary_entry - _text;
58 }
59 
60 void efi_icache_sync(unsigned long start, unsigned long end)
61 {
62 	caches_clean_inval_pou(start, end);
63 }
64