xref: /linux/arch/arm64/kernel/kexec_image.c (revision 17cfcb68af3bc7d5e8ae08779b1853310a2949f3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kexec image loader
4 
5  * Copyright (C) 2018 Linaro Limited
6  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
7  */
8 
9 #define pr_fmt(fmt)	"kexec_file(Image): " fmt
10 
11 #include <linux/err.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/kexec.h>
15 #include <linux/pe.h>
16 #include <linux/string.h>
17 #include <linux/verification.h>
18 #include <asm/byteorder.h>
19 #include <asm/cpufeature.h>
20 #include <asm/image.h>
21 #include <asm/memory.h>
22 
23 static int image_probe(const char *kernel_buf, unsigned long kernel_len)
24 {
25 	const struct arm64_image_header *h =
26 		(const struct arm64_image_header *)(kernel_buf);
27 
28 	if (!h || (kernel_len < sizeof(*h)))
29 		return -EINVAL;
30 
31 	if (memcmp(&h->magic, ARM64_IMAGE_MAGIC, sizeof(h->magic)))
32 		return -EINVAL;
33 
34 	return 0;
35 }
36 
37 static void *image_load(struct kimage *image,
38 				char *kernel, unsigned long kernel_len,
39 				char *initrd, unsigned long initrd_len,
40 				char *cmdline, unsigned long cmdline_len)
41 {
42 	struct arm64_image_header *h;
43 	u64 flags, value;
44 	bool be_image, be_kernel;
45 	struct kexec_buf kbuf;
46 	unsigned long text_offset;
47 	struct kexec_segment *kernel_segment;
48 	int ret;
49 
50 	/* We don't support crash kernels yet. */
51 	if (image->type == KEXEC_TYPE_CRASH)
52 		return ERR_PTR(-EOPNOTSUPP);
53 
54 	/*
55 	 * We require a kernel with an unambiguous Image header. Per
56 	 * Documentation/arm64/booting.rst, this is the case when image_size
57 	 * is non-zero (practically speaking, since v3.17).
58 	 */
59 	h = (struct arm64_image_header *)kernel;
60 	if (!h->image_size)
61 		return ERR_PTR(-EINVAL);
62 
63 	/* Check cpu features */
64 	flags = le64_to_cpu(h->flags);
65 	be_image = arm64_image_flag_field(flags, ARM64_IMAGE_FLAG_BE);
66 	be_kernel = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
67 	if ((be_image != be_kernel) && !system_supports_mixed_endian())
68 		return ERR_PTR(-EINVAL);
69 
70 	value = arm64_image_flag_field(flags, ARM64_IMAGE_FLAG_PAGE_SIZE);
71 	if (((value == ARM64_IMAGE_FLAG_PAGE_SIZE_4K) &&
72 			!system_supports_4kb_granule()) ||
73 	    ((value == ARM64_IMAGE_FLAG_PAGE_SIZE_64K) &&
74 			!system_supports_64kb_granule()) ||
75 	    ((value == ARM64_IMAGE_FLAG_PAGE_SIZE_16K) &&
76 			!system_supports_16kb_granule()))
77 		return ERR_PTR(-EINVAL);
78 
79 	/* Load the kernel */
80 	kbuf.image = image;
81 	kbuf.buf_min = 0;
82 	kbuf.buf_max = ULONG_MAX;
83 	kbuf.top_down = false;
84 
85 	kbuf.buffer = kernel;
86 	kbuf.bufsz = kernel_len;
87 	kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
88 	kbuf.memsz = le64_to_cpu(h->image_size);
89 	text_offset = le64_to_cpu(h->text_offset);
90 	kbuf.buf_align = MIN_KIMG_ALIGN;
91 
92 	/* Adjust kernel segment with TEXT_OFFSET */
93 	kbuf.memsz += text_offset;
94 
95 	ret = kexec_add_buffer(&kbuf);
96 	if (ret)
97 		return ERR_PTR(ret);
98 
99 	kernel_segment = &image->segment[image->nr_segments - 1];
100 	kernel_segment->mem += text_offset;
101 	kernel_segment->memsz -= text_offset;
102 	image->start = kernel_segment->mem;
103 
104 	pr_debug("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
105 				kernel_segment->mem, kbuf.bufsz,
106 				kernel_segment->memsz);
107 
108 	/* Load additional data */
109 	ret = load_other_segments(image,
110 				kernel_segment->mem, kernel_segment->memsz,
111 				initrd, initrd_len, cmdline);
112 
113 	return ERR_PTR(ret);
114 }
115 
116 #ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
117 static int image_verify_sig(const char *kernel, unsigned long kernel_len)
118 {
119 	return verify_pefile_signature(kernel, kernel_len, NULL,
120 				       VERIFYING_KEXEC_PE_SIGNATURE);
121 }
122 #endif
123 
124 const struct kexec_file_ops kexec_image_ops = {
125 	.probe = image_probe,
126 	.load = image_load,
127 #ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
128 	.verify_sig = image_verify_sig,
129 #endif
130 };
131