1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2016 IBM Corporation
4 *
5 * Authors:
6 * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
7 * Mimi Zohar <zohar@linux.vnet.ibm.com>
8 */
9
10 #include <linux/seq_file.h>
11 #include <linux/vmalloc.h>
12 #include <linux/kexec.h>
13 #include <linux/of.h>
14 #include <linux/ima.h>
15 #include <linux/mm.h>
16 #include <linux/overflow.h>
17 #include <linux/reboot.h>
18 #include <asm/page.h>
19 #include "ima.h"
20
21 #ifdef CONFIG_IMA_KEXEC
22 #define IMA_KEXEC_EVENT_LEN 256
23
24 static bool ima_kexec_update_registered;
25 static struct seq_file ima_kexec_file;
26 static size_t kexec_segment_size;
27 static void *ima_kexec_buffer;
28
ima_free_kexec_file_buf(struct seq_file * sf)29 static void ima_free_kexec_file_buf(struct seq_file *sf)
30 {
31 vfree(sf->buf);
32 sf->buf = NULL;
33 sf->size = 0;
34 sf->read_pos = 0;
35 sf->count = 0;
36 }
37
ima_measure_kexec_event(const char * event_name)38 void ima_measure_kexec_event(const char *event_name)
39 {
40 char ima_kexec_event[IMA_KEXEC_EVENT_LEN];
41 size_t buf_size = 0;
42 long len;
43 int n;
44
45 buf_size = ima_get_binary_runtime_size();
46 len = atomic_long_read(&ima_htable.len);
47
48 n = scnprintf(ima_kexec_event, IMA_KEXEC_EVENT_LEN,
49 "kexec_segment_size=%lu;ima_binary_runtime_size=%lu;"
50 "ima_runtime_measurements_count=%ld;",
51 kexec_segment_size, buf_size, len);
52
53 ima_measure_critical_data("ima_kexec", event_name, ima_kexec_event, n, false, NULL, 0);
54 }
55
ima_alloc_kexec_file_buf(size_t segment_size)56 static int ima_alloc_kexec_file_buf(size_t segment_size)
57 {
58 /*
59 * kexec 'load' may be called multiple times.
60 * Free and realloc the buffer only if the segment_size is
61 * changed from the previous kexec 'load' call.
62 */
63 if (ima_kexec_file.buf && ima_kexec_file.size == segment_size)
64 goto out;
65
66 ima_free_kexec_file_buf(&ima_kexec_file);
67
68 /* segment size can't change between kexec load and execute */
69 ima_kexec_file.buf = vmalloc(segment_size);
70 if (!ima_kexec_file.buf)
71 return -ENOMEM;
72
73 ima_kexec_file.size = segment_size;
74
75 out:
76 ima_kexec_file.read_pos = 0;
77 ima_kexec_file.count = sizeof(struct ima_kexec_hdr); /* reserved space */
78 ima_measure_kexec_event("kexec_load");
79
80 return 0;
81 }
82
ima_dump_measurement_list(unsigned long * buffer_size,void ** buffer,unsigned long segment_size)83 static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
84 unsigned long segment_size)
85 {
86 struct ima_queue_entry *qe;
87 struct ima_kexec_hdr khdr;
88 int ret = 0;
89
90 /* segment size can't change between kexec load and execute */
91 if (!ima_kexec_file.buf) {
92 pr_err("Kexec file buf not allocated\n");
93 return -EINVAL;
94 }
95
96 memset(&khdr, 0, sizeof(khdr));
97 khdr.version = 1;
98 /* This is an append-only list, no need to hold the RCU read lock */
99 list_for_each_entry_rcu(qe, &ima_measurements, later, true) {
100 if (ima_kexec_file.count < ima_kexec_file.size) {
101 khdr.count++;
102 ima_measurements_show(&ima_kexec_file, qe);
103 } else {
104 ret = -EINVAL;
105 break;
106 }
107 }
108
109 /*
110 * fill in reserved space with some buffer details
111 * (eg. version, buffer size, number of measurements)
112 */
113 khdr.buffer_size = ima_kexec_file.count;
114 if (ima_canonical_fmt) {
115 khdr.version = cpu_to_le16(khdr.version);
116 khdr.count = cpu_to_le64(khdr.count);
117 khdr.buffer_size = cpu_to_le64(khdr.buffer_size);
118 }
119 memcpy(ima_kexec_file.buf, &khdr, sizeof(khdr));
120
121 print_hex_dump_debug("ima dump: ", DUMP_PREFIX_NONE, 16, 1,
122 ima_kexec_file.buf, ima_kexec_file.count < 100 ?
123 ima_kexec_file.count : 100,
124 true);
125
126 *buffer_size = ima_kexec_file.count;
127 *buffer = ima_kexec_file.buf;
128
129 return ret;
130 }
131
132 /*
133 * Called during kexec_file_load so that IMA can add a segment to the kexec
134 * image for the measurement list for the next kernel.
135 *
136 * This function assumes that kexec_lock is held.
137 */
ima_add_kexec_buffer(struct kimage * image)138 void ima_add_kexec_buffer(struct kimage *image)
139 {
140 struct kexec_buf kbuf = { .image = image, .buf_align = PAGE_SIZE,
141 .buf_min = 0, .buf_max = ULONG_MAX,
142 .top_down = true };
143 unsigned long binary_runtime_size;
144 unsigned long extra_memory;
145
146 /* use more understandable variable names than defined in kbuf */
147 size_t kexec_buffer_size = 0;
148 void *kexec_buffer = NULL;
149 int ret;
150
151 if (image->type == KEXEC_TYPE_CRASH)
152 return;
153
154 /*
155 * Reserve extra memory for measurements added during kexec.
156 */
157 if (CONFIG_IMA_KEXEC_EXTRA_MEMORY_KB <= 0)
158 extra_memory = PAGE_SIZE / 2;
159 else
160 extra_memory = CONFIG_IMA_KEXEC_EXTRA_MEMORY_KB * 1024;
161
162 binary_runtime_size = ima_get_binary_runtime_size() + extra_memory;
163
164 if (binary_runtime_size >= ULONG_MAX - PAGE_SIZE)
165 kexec_segment_size = ULONG_MAX;
166 else
167 kexec_segment_size = ALIGN(binary_runtime_size, PAGE_SIZE);
168
169 if ((kexec_segment_size == ULONG_MAX) ||
170 ((kexec_segment_size >> PAGE_SHIFT) > totalram_pages() / 2)) {
171 pr_err("Binary measurement list too large.\n");
172 return;
173 }
174
175 ret = ima_alloc_kexec_file_buf(kexec_segment_size);
176 if (ret < 0) {
177 pr_err("Not enough memory for the kexec measurement buffer.\n");
178 return;
179 }
180
181 kbuf.buffer = kexec_buffer;
182 kbuf.bufsz = kexec_buffer_size;
183 kbuf.memsz = kexec_segment_size;
184 image->is_ima_segment_index_set = false;
185 ret = kexec_add_buffer(&kbuf);
186 if (ret) {
187 pr_err("Error passing over kexec measurement buffer.\n");
188 vfree(kexec_buffer);
189 return;
190 }
191
192 image->ima_buffer_addr = kbuf.mem;
193 image->ima_buffer_size = kexec_segment_size;
194 image->ima_buffer = kexec_buffer;
195 image->ima_segment_index = image->nr_segments - 1;
196 image->is_ima_segment_index_set = true;
197
198 kexec_dprintk("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
199 kbuf.mem);
200 }
201
202 /*
203 * Called during kexec execute so that IMA can update the measurement list.
204 */
ima_update_kexec_buffer(struct notifier_block * self,unsigned long action,void * data)205 static int ima_update_kexec_buffer(struct notifier_block *self,
206 unsigned long action, void *data)
207 {
208 size_t buf_size = 0;
209 int ret = NOTIFY_OK;
210 void *buf = NULL;
211
212 if (!kexec_in_progress) {
213 pr_info("No kexec in progress.\n");
214 return ret;
215 }
216
217 if (!ima_kexec_buffer) {
218 pr_err("Kexec buffer not set.\n");
219 return ret;
220 }
221
222 ret = ima_dump_measurement_list(&buf_size, &buf, kexec_segment_size);
223
224 if (ret)
225 pr_err("Dump measurements failed. Error:%d\n", ret);
226
227 if (buf_size != 0)
228 memcpy(ima_kexec_buffer, buf, buf_size);
229
230 kimage_unmap_segment(ima_kexec_buffer);
231 ima_kexec_buffer = NULL;
232
233 return ret;
234 }
235
236 static struct notifier_block update_buffer_nb = {
237 .notifier_call = ima_update_kexec_buffer,
238 .priority = INT_MIN
239 };
240
241 /*
242 * Create a mapping for the source pages that contain the IMA buffer
243 * so we can update it later.
244 */
ima_kexec_post_load(struct kimage * image)245 void ima_kexec_post_load(struct kimage *image)
246 {
247 if (ima_kexec_buffer) {
248 kimage_unmap_segment(ima_kexec_buffer);
249 ima_kexec_buffer = NULL;
250 }
251
252 if (!image->ima_buffer_addr)
253 return;
254
255 ima_kexec_buffer = kimage_map_segment(image, image->ima_segment_index);
256 if (!ima_kexec_buffer) {
257 pr_err("Could not map measurements buffer.\n");
258 return;
259 }
260
261 if (!ima_kexec_update_registered) {
262 register_reboot_notifier(&update_buffer_nb);
263 ima_kexec_update_registered = true;
264 }
265 }
266
267 #endif /* IMA_KEXEC */
268
269 /*
270 * Restore the measurement list from the previous kernel.
271 */
ima_load_kexec_buffer(void)272 void __init ima_load_kexec_buffer(void)
273 {
274 void *kexec_buffer = NULL;
275 size_t kexec_buffer_size = 0;
276 int rc;
277
278 rc = ima_get_kexec_buffer(&kexec_buffer, &kexec_buffer_size);
279 switch (rc) {
280 case 0:
281 rc = ima_restore_measurement_list(kexec_buffer_size,
282 kexec_buffer);
283 if (rc != 0)
284 pr_err("Failed to restore the measurement list: %d\n",
285 rc);
286
287 ima_free_kexec_buffer();
288 break;
289 case -ENOTSUPP:
290 pr_debug("Restoring the measurement list not supported\n");
291 break;
292 case -ENOENT:
293 pr_debug("No measurement list to restore\n");
294 break;
295 default:
296 pr_debug("Error restoring the measurement list: %d\n", rc);
297 }
298 }
299
300 /*
301 * ima_validate_range - verify a physical buffer lies in addressable RAM
302 * @phys: physical start address of the buffer from previous kernel
303 * @size: size of the buffer
304 *
305 * On success return 0. On failure returns -EINVAL so callers can skip
306 * restoring.
307 */
ima_validate_range(phys_addr_t phys,size_t size)308 int ima_validate_range(phys_addr_t phys, size_t size)
309 {
310 unsigned long start_pfn, end_pfn;
311 phys_addr_t end_phys;
312
313 if (check_add_overflow(phys, (phys_addr_t)size - 1, &end_phys))
314 return -EINVAL;
315
316 start_pfn = PHYS_PFN(phys);
317 end_pfn = PHYS_PFN(end_phys);
318
319 #ifdef CONFIG_X86
320 if (!pfn_range_is_mapped(start_pfn, end_pfn))
321 #else
322 if (!page_is_ram(start_pfn) || !page_is_ram(end_pfn))
323 #endif
324 {
325 pr_warn("IMA: previous kernel measurement buffer %pa (size 0x%zx) lies outside available memory\n",
326 &phys, size);
327 return -EINVAL;
328 }
329
330 return 0;
331 }
332