1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * kexec_file for arm64
4 *
5 * Copyright (C) 2018 Linaro Limited
6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
7 *
8 * Most code is derived from arm64 port of kexec-tools
9 */
10
11 #define pr_fmt(fmt) "kexec_file: " fmt
12
13 #include <linux/ioport.h>
14 #include <linux/kernel.h>
15 #include <linux/kexec.h>
16 #include <linux/libfdt.h>
17 #include <linux/memblock.h>
18 #include <linux/of.h>
19 #include <linux/of_fdt.h>
20 #include <linux/slab.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/vmalloc.h>
24
25 const struct kexec_file_ops * const kexec_file_loaders[] = {
26 &kexec_image_ops,
27 NULL
28 };
29
arch_kimage_file_post_load_cleanup(struct kimage * image)30 int arch_kimage_file_post_load_cleanup(struct kimage *image)
31 {
32 kvfree(image->arch.dtb);
33 image->arch.dtb = NULL;
34
35 vfree(image->elf_headers);
36 image->elf_headers = NULL;
37 image->elf_headers_sz = 0;
38
39 return kexec_image_post_load_cleanup_default(image);
40 }
41
42 #ifdef CONFIG_CRASH_DUMP
prepare_elf_headers(void ** addr,unsigned long * sz)43 static int prepare_elf_headers(void **addr, unsigned long *sz)
44 {
45 struct crash_mem *cmem;
46 unsigned int nr_ranges;
47 int ret;
48 u64 i;
49 phys_addr_t start, end;
50
51 nr_ranges = 2; /* for exclusion of crashkernel region */
52 for_each_mem_range(i, &start, &end)
53 nr_ranges++;
54
55 cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
56 if (!cmem)
57 return -ENOMEM;
58
59 cmem->max_nr_ranges = nr_ranges;
60 cmem->nr_ranges = 0;
61 for_each_mem_range(i, &start, &end) {
62 cmem->ranges[cmem->nr_ranges].start = start;
63 cmem->ranges[cmem->nr_ranges].end = end - 1;
64 cmem->nr_ranges++;
65 }
66
67 /* Exclude crashkernel region */
68 ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
69 if (ret)
70 goto out;
71
72 if (crashk_low_res.end) {
73 ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
74 if (ret)
75 goto out;
76 }
77
78 ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
79
80 out:
81 kfree(cmem);
82 return ret;
83 }
84 #endif
85
86 /*
87 * Tries to add the initrd and DTB to the image. If it is not possible to find
88 * valid locations, this function will undo changes to the image and return non
89 * zero.
90 */
load_other_segments(struct kimage * image,unsigned long kernel_load_addr,unsigned long kernel_size,char * initrd,unsigned long initrd_len,char * cmdline)91 int load_other_segments(struct kimage *image,
92 unsigned long kernel_load_addr,
93 unsigned long kernel_size,
94 char *initrd, unsigned long initrd_len,
95 char *cmdline)
96 {
97 struct kexec_buf kbuf;
98 void *dtb = NULL;
99 unsigned long initrd_load_addr = 0, dtb_len,
100 orig_segments = image->nr_segments;
101 int ret = 0;
102
103 kbuf.image = image;
104 /* not allocate anything below the kernel */
105 kbuf.buf_min = kernel_load_addr + kernel_size;
106
107 #ifdef CONFIG_CRASH_DUMP
108 /* load elf core header */
109 void *headers;
110 unsigned long headers_sz;
111 if (image->type == KEXEC_TYPE_CRASH) {
112 ret = prepare_elf_headers(&headers, &headers_sz);
113 if (ret) {
114 pr_err("Preparing elf core header failed\n");
115 goto out_err;
116 }
117
118 kbuf.buffer = headers;
119 kbuf.bufsz = headers_sz;
120 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
121 kbuf.memsz = headers_sz;
122 kbuf.buf_align = SZ_64K; /* largest supported page size */
123 kbuf.buf_max = ULONG_MAX;
124 kbuf.top_down = true;
125
126 ret = kexec_add_buffer(&kbuf);
127 if (ret) {
128 vfree(headers);
129 goto out_err;
130 }
131 image->elf_headers = headers;
132 image->elf_load_addr = kbuf.mem;
133 image->elf_headers_sz = headers_sz;
134
135 kexec_dprintk("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
136 image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
137 }
138 #endif
139
140 /* load initrd */
141 if (initrd) {
142 kbuf.buffer = initrd;
143 kbuf.bufsz = initrd_len;
144 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
145 kbuf.memsz = initrd_len;
146 kbuf.buf_align = 0;
147 /* within 1GB-aligned window of up to 32GB in size */
148 kbuf.buf_max = round_down(kernel_load_addr, SZ_1G)
149 + (unsigned long)SZ_1G * 32;
150 kbuf.top_down = false;
151
152 ret = kexec_add_buffer(&kbuf);
153 if (ret)
154 goto out_err;
155 initrd_load_addr = kbuf.mem;
156
157 kexec_dprintk("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
158 initrd_load_addr, kbuf.bufsz, kbuf.memsz);
159 }
160
161 /* load dtb */
162 dtb = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr,
163 initrd_len, cmdline, 0);
164 if (!dtb) {
165 pr_err("Preparing for new dtb failed\n");
166 ret = -EINVAL;
167 goto out_err;
168 }
169
170 /* trim it */
171 fdt_pack(dtb);
172 dtb_len = fdt_totalsize(dtb);
173 kbuf.buffer = dtb;
174 kbuf.bufsz = dtb_len;
175 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
176 kbuf.memsz = dtb_len;
177 /* not across 2MB boundary */
178 kbuf.buf_align = SZ_2M;
179 kbuf.buf_max = ULONG_MAX;
180 kbuf.top_down = true;
181
182 ret = kexec_add_buffer(&kbuf);
183 if (ret)
184 goto out_err;
185 image->arch.dtb = dtb;
186 image->arch.dtb_mem = kbuf.mem;
187
188 kexec_dprintk("Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
189 kbuf.mem, kbuf.bufsz, kbuf.memsz);
190
191 return 0;
192
193 out_err:
194 image->nr_segments = orig_segments;
195 kvfree(dtb);
196 return ret;
197 }
198