xref: /linux/arch/powerpc/kexec/file_load_64.c (revision 9e4e86a604dfd06402933467578c4b79f5412b2c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * ppc64 code to implement the kexec_file_load syscall
4  *
5  * Copyright (C) 2004  Adam Litke (agl@us.ibm.com)
6  * Copyright (C) 2004  IBM Corp.
7  * Copyright (C) 2004,2005  Milton D Miller II, IBM Corporation
8  * Copyright (C) 2005  R Sharada (sharada@in.ibm.com)
9  * Copyright (C) 2006  Mohan Kumar M (mohan@in.ibm.com)
10  * Copyright (C) 2020  IBM Corporation
11  *
12  * Based on kexec-tools' kexec-ppc64.c, kexec-elf-rel-ppc64.c, fs2dt.c.
13  * Heavily modified for the kernel by
14  * Hari Bathini, IBM Corporation.
15  */
16 
17 #include <linux/kexec.h>
18 #include <linux/of_fdt.h>
19 #include <linux/libfdt.h>
20 #include <linux/of.h>
21 #include <linux/of_address.h>
22 #include <linux/memblock.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <asm/setup.h>
26 #include <asm/drmem.h>
27 #include <asm/firmware.h>
28 #include <asm/kexec_ranges.h>
29 #include <asm/crashdump-ppc64.h>
30 #include <asm/mmzone.h>
31 #include <asm/iommu.h>
32 #include <asm/prom.h>
33 #include <asm/plpks.h>
34 #include <asm/cputhreads.h>
35 
36 struct umem_info {
37 	__be64 *buf;		/* data buffer for usable-memory property */
38 	u32 size;		/* size allocated for the data buffer */
39 	u32 max_entries;	/* maximum no. of entries */
40 	u32 idx;		/* index of current entry */
41 
42 	/* usable memory ranges to look up */
43 	unsigned int nr_ranges;
44 	const struct range *ranges;
45 };
46 
47 const struct kexec_file_ops * const kexec_file_loaders[] = {
48 	&kexec_elf64_ops,
49 	NULL
50 };
51 
arch_check_excluded_range(struct kimage * image,unsigned long start,unsigned long end)52 int arch_check_excluded_range(struct kimage *image, unsigned long start,
53 			      unsigned long end)
54 {
55 	struct crash_mem *emem;
56 	int i;
57 
58 	emem = image->arch.exclude_ranges;
59 	for (i = 0; i < emem->nr_ranges; i++)
60 		if (start < emem->ranges[i].end && end > emem->ranges[i].start)
61 			return 1;
62 
63 	return 0;
64 }
65 
66 #ifdef CONFIG_CRASH_DUMP
67 /**
68  * check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
69  * @um_info:                  Usable memory buffer and ranges info.
70  * @cnt:                      No. of entries to accommodate.
71  *
72  * Frees up the old buffer if memory reallocation fails.
73  *
74  * Returns buffer on success, NULL on error.
75  */
check_realloc_usable_mem(struct umem_info * um_info,int cnt)76 static __be64 *check_realloc_usable_mem(struct umem_info *um_info, int cnt)
77 {
78 	u32 new_size;
79 	__be64 *tbuf;
80 
81 	if ((um_info->idx + cnt) <= um_info->max_entries)
82 		return um_info->buf;
83 
84 	new_size = um_info->size + MEM_RANGE_CHUNK_SZ;
85 	tbuf = krealloc(um_info->buf, new_size, GFP_KERNEL);
86 	if (tbuf) {
87 		um_info->buf = tbuf;
88 		um_info->size = new_size;
89 		um_info->max_entries = (um_info->size / sizeof(u64));
90 	}
91 
92 	return tbuf;
93 }
94 
95 /**
96  * add_usable_mem - Add the usable memory ranges within the given memory range
97  *                  to the buffer
98  * @um_info:        Usable memory buffer and ranges info.
99  * @base:           Base address of memory range to look for.
100  * @end:            End address of memory range to look for.
101  *
102  * Returns 0 on success, negative errno on error.
103  */
add_usable_mem(struct umem_info * um_info,u64 base,u64 end)104 static int add_usable_mem(struct umem_info *um_info, u64 base, u64 end)
105 {
106 	u64 loc_base, loc_end;
107 	bool add;
108 	int i;
109 
110 	for (i = 0; i < um_info->nr_ranges; i++) {
111 		add = false;
112 		loc_base = um_info->ranges[i].start;
113 		loc_end = um_info->ranges[i].end;
114 		if (loc_base >= base && loc_end <= end)
115 			add = true;
116 		else if (base < loc_end && end > loc_base) {
117 			if (loc_base < base)
118 				loc_base = base;
119 			if (loc_end > end)
120 				loc_end = end;
121 			add = true;
122 		}
123 
124 		if (add) {
125 			if (!check_realloc_usable_mem(um_info, 2))
126 				return -ENOMEM;
127 
128 			um_info->buf[um_info->idx++] = cpu_to_be64(loc_base);
129 			um_info->buf[um_info->idx++] =
130 					cpu_to_be64(loc_end - loc_base + 1);
131 		}
132 	}
133 
134 	return 0;
135 }
136 
137 /**
138  * kdump_setup_usable_lmb - This is a callback function that gets called by
139  *                          walk_drmem_lmbs for every LMB to set its
140  *                          usable memory ranges.
141  * @lmb:                    LMB info.
142  * @usm:                    linux,drconf-usable-memory property value.
143  * @data:                   Pointer to usable memory buffer and ranges info.
144  *
145  * Returns 0 on success, negative errno on error.
146  */
kdump_setup_usable_lmb(struct drmem_lmb * lmb,const __be32 ** usm,void * data)147 static int kdump_setup_usable_lmb(struct drmem_lmb *lmb, const __be32 **usm,
148 				  void *data)
149 {
150 	struct umem_info *um_info;
151 	int tmp_idx, ret;
152 	u64 base, end;
153 
154 	/*
155 	 * kdump load isn't supported on kernels already booted with
156 	 * linux,drconf-usable-memory property.
157 	 */
158 	if (*usm) {
159 		pr_err("linux,drconf-usable-memory property already exists!");
160 		return -EINVAL;
161 	}
162 
163 	um_info = data;
164 	tmp_idx = um_info->idx;
165 	if (!check_realloc_usable_mem(um_info, 1))
166 		return -ENOMEM;
167 
168 	um_info->idx++;
169 	base = lmb->base_addr;
170 	end = base + drmem_lmb_size() - 1;
171 	ret = add_usable_mem(um_info, base, end);
172 	if (!ret) {
173 		/*
174 		 * Update the no. of ranges added. Two entries (base & size)
175 		 * for every range added.
176 		 */
177 		um_info->buf[tmp_idx] =
178 				cpu_to_be64((um_info->idx - tmp_idx - 1) / 2);
179 	}
180 
181 	return ret;
182 }
183 
184 #define NODE_PATH_LEN		256
185 /**
186  * add_usable_mem_property - Add usable memory property for the given
187  *                           memory node.
188  * @fdt:                     Flattened device tree for the kdump kernel.
189  * @dn:                      Memory node.
190  * @um_info:                 Usable memory buffer and ranges info.
191  *
192  * Returns 0 on success, negative errno on error.
193  */
add_usable_mem_property(void * fdt,struct device_node * dn,struct umem_info * um_info)194 static int add_usable_mem_property(void *fdt, struct device_node *dn,
195 				   struct umem_info *um_info)
196 {
197 	int node;
198 	char path[NODE_PATH_LEN];
199 	int i, ret;
200 	u64 base, size;
201 
202 	of_node_get(dn);
203 
204 	if (snprintf(path, NODE_PATH_LEN, "%pOF", dn) > (NODE_PATH_LEN - 1)) {
205 		pr_err("Buffer (%d) too small for memory node: %pOF\n",
206 		       NODE_PATH_LEN, dn);
207 		return -EOVERFLOW;
208 	}
209 	kexec_dprintk("Memory node path: %s\n", path);
210 
211 	/* Now that we know the path, find its offset in kdump kernel's fdt */
212 	node = fdt_path_offset(fdt, path);
213 	if (node < 0) {
214 		pr_err("Malformed device tree: error reading %s\n", path);
215 		ret = -EINVAL;
216 		goto out;
217 	}
218 
219 	um_info->idx  = 0;
220 	if (!check_realloc_usable_mem(um_info, 2)) {
221 		ret = -ENOMEM;
222 		goto out;
223 	}
224 
225 	/*
226 	 * "reg" property represents sequence of (addr,size) tuples
227 	 * each representing a memory range.
228 	 */
229 	for (i = 0; ; i++) {
230 		ret = of_property_read_reg(dn, i, &base, &size);
231 		if (ret)
232 			break;
233 
234 		ret = add_usable_mem(um_info, base, base + size - 1);
235 		if (ret)
236 			goto out;
237 	}
238 
239 	// No reg or empty reg? Skip this node.
240 	if (i == 0)
241 		goto out;
242 
243 	/*
244 	 * No kdump kernel usable memory found in this memory node.
245 	 * Write (0,0) tuple in linux,usable-memory property for
246 	 * this region to be ignored.
247 	 */
248 	if (um_info->idx == 0) {
249 		um_info->buf[0] = 0;
250 		um_info->buf[1] = 0;
251 		um_info->idx = 2;
252 	}
253 
254 	ret = fdt_setprop(fdt, node, "linux,usable-memory", um_info->buf,
255 			  (um_info->idx * sizeof(u64)));
256 
257 out:
258 	of_node_put(dn);
259 	return ret;
260 }
261 
262 
263 /**
264  * update_usable_mem_fdt - Updates kdump kernel's fdt with linux,usable-memory
265  *                         and linux,drconf-usable-memory DT properties as
266  *                         appropriate to restrict its memory usage.
267  * @fdt:                   Flattened device tree for the kdump kernel.
268  * @usable_mem:            Usable memory ranges for kdump kernel.
269  *
270  * Returns 0 on success, negative errno on error.
271  */
update_usable_mem_fdt(void * fdt,struct crash_mem * usable_mem)272 static int update_usable_mem_fdt(void *fdt, struct crash_mem *usable_mem)
273 {
274 	struct umem_info um_info;
275 	struct device_node *dn;
276 	int node, ret = 0;
277 
278 	if (!usable_mem) {
279 		pr_err("Usable memory ranges for kdump kernel not found\n");
280 		return -ENOENT;
281 	}
282 
283 	node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
284 	if (node == -FDT_ERR_NOTFOUND)
285 		kexec_dprintk("No dynamic reconfiguration memory found\n");
286 	else if (node < 0) {
287 		pr_err("Malformed device tree: error reading /ibm,dynamic-reconfiguration-memory.\n");
288 		return -EINVAL;
289 	}
290 
291 	um_info.buf  = NULL;
292 	um_info.size = 0;
293 	um_info.max_entries = 0;
294 	um_info.idx  = 0;
295 	/* Memory ranges to look up */
296 	um_info.ranges = &(usable_mem->ranges[0]);
297 	um_info.nr_ranges = usable_mem->nr_ranges;
298 
299 	dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
300 	if (dn) {
301 		ret = walk_drmem_lmbs(dn, &um_info, kdump_setup_usable_lmb);
302 		of_node_put(dn);
303 
304 		if (ret) {
305 			pr_err("Could not setup linux,drconf-usable-memory property for kdump\n");
306 			goto out;
307 		}
308 
309 		ret = fdt_setprop(fdt, node, "linux,drconf-usable-memory",
310 				  um_info.buf, (um_info.idx * sizeof(u64)));
311 		if (ret) {
312 			pr_err("Failed to update fdt with linux,drconf-usable-memory property: %s",
313 			       fdt_strerror(ret));
314 			goto out;
315 		}
316 	}
317 
318 	/*
319 	 * Walk through each memory node and set linux,usable-memory property
320 	 * for the corresponding node in kdump kernel's fdt.
321 	 */
322 	for_each_node_by_type(dn, "memory") {
323 		ret = add_usable_mem_property(fdt, dn, &um_info);
324 		if (ret) {
325 			pr_err("Failed to set linux,usable-memory property for %s node",
326 			       dn->full_name);
327 			of_node_put(dn);
328 			goto out;
329 		}
330 	}
331 
332 out:
333 	kfree(um_info.buf);
334 	return ret;
335 }
336 
337 /**
338  * load_backup_segment - Locate a memory hole to place the backup region.
339  * @image:               Kexec image.
340  * @kbuf:                Buffer contents and memory parameters.
341  *
342  * Returns 0 on success, negative errno on error.
343  */
load_backup_segment(struct kimage * image,struct kexec_buf * kbuf)344 static int load_backup_segment(struct kimage *image, struct kexec_buf *kbuf)
345 {
346 	void *buf;
347 	int ret;
348 
349 	/*
350 	 * Setup a source buffer for backup segment.
351 	 *
352 	 * A source buffer has no meaning for backup region as data will
353 	 * be copied from backup source, after crash, in the purgatory.
354 	 * But as load segment code doesn't recognize such segments,
355 	 * setup a dummy source buffer to keep it happy for now.
356 	 */
357 	buf = vzalloc(BACKUP_SRC_SIZE);
358 	if (!buf)
359 		return -ENOMEM;
360 
361 	kbuf->buffer = buf;
362 	kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
363 	kbuf->bufsz = kbuf->memsz = BACKUP_SRC_SIZE;
364 	kbuf->top_down = false;
365 
366 	ret = kexec_add_buffer(kbuf);
367 	if (ret) {
368 		vfree(buf);
369 		return ret;
370 	}
371 
372 	image->arch.backup_buf = buf;
373 	image->arch.backup_start = kbuf->mem;
374 	return 0;
375 }
376 
kdump_extra_elfcorehdr_size(struct crash_mem * cmem)377 static unsigned int kdump_extra_elfcorehdr_size(struct crash_mem *cmem)
378 {
379 #if defined(CONFIG_CRASH_HOTPLUG) && defined(CONFIG_MEMORY_HOTPLUG)
380 	unsigned int extra_sz = 0;
381 
382 	if (CONFIG_CRASH_MAX_MEMORY_RANGES > (unsigned int)PN_XNUM)
383 		pr_warn("Number of Phdrs %u exceeds max\n", CONFIG_CRASH_MAX_MEMORY_RANGES);
384 	else if (cmem->nr_ranges >= CONFIG_CRASH_MAX_MEMORY_RANGES)
385 		pr_warn("Configured crash mem ranges may not be enough\n");
386 	else
387 		extra_sz = (CONFIG_CRASH_MAX_MEMORY_RANGES - cmem->nr_ranges) * sizeof(Elf64_Phdr);
388 
389 	return extra_sz;
390 #endif
391 	return 0;
392 }
393 
394 /**
395  * load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr
396  *                           segment needed to load kdump kernel.
397  * @image:                   Kexec image.
398  * @kbuf:                    Buffer contents and memory parameters.
399  *
400  * Returns 0 on success, negative errno on error.
401  */
load_elfcorehdr_segment(struct kimage * image,struct kexec_buf * kbuf)402 static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf)
403 {
404 	struct crash_mem *cmem = NULL;
405 	unsigned long headers_sz;
406 	void *headers = NULL;
407 	int ret;
408 
409 	ret = get_crash_memory_ranges(&cmem);
410 	if (ret)
411 		goto out;
412 
413 	/* Setup elfcorehdr segment */
414 	ret = crash_prepare_elf64_headers(cmem, false, &headers, &headers_sz);
415 	if (ret) {
416 		pr_err("Failed to prepare elf headers for the core\n");
417 		goto out;
418 	}
419 
420 	/* Fix the offset for backup region in the ELF header */
421 	sync_backup_region_phdr(image, headers, false);
422 
423 	kbuf->buffer = headers;
424 	kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
425 	kbuf->bufsz = headers_sz;
426 
427 	/*
428 	 * Account for extra space required to accommodate additional memory
429 	 * ranges in elfcorehdr due to memory hotplug events.
430 	 */
431 	kbuf->memsz = headers_sz + kdump_extra_elfcorehdr_size(cmem);
432 	kbuf->top_down = false;
433 
434 	ret = kexec_add_buffer(kbuf);
435 	if (ret) {
436 		vfree(headers);
437 		goto out;
438 	}
439 
440 	image->elf_load_addr = kbuf->mem;
441 
442 	/*
443 	 * If CONFIG_CRASH_HOTPLUG is enabled, the elfcorehdr kexec segment
444 	 * memsz can be larger than bufsz. Always initialize elf_headers_sz
445 	 * with memsz. This ensures the correct size is reserved for elfcorehdr
446 	 * memory in the FDT prepared for kdump.
447 	 */
448 	image->elf_headers_sz = kbuf->memsz;
449 	image->elf_headers = headers;
450 out:
451 	kfree(cmem);
452 	return ret;
453 }
454 
455 /**
456  * load_crashdump_segments_ppc64 - Initialize the additional segements needed
457  *                                 to load kdump kernel.
458  * @image:                         Kexec image.
459  * @kbuf:                          Buffer contents and memory parameters.
460  *
461  * Returns 0 on success, negative errno on error.
462  */
load_crashdump_segments_ppc64(struct kimage * image,struct kexec_buf * kbuf)463 int load_crashdump_segments_ppc64(struct kimage *image,
464 				  struct kexec_buf *kbuf)
465 {
466 	int ret;
467 
468 	/* Load backup segment - first 64K bytes of the crashing kernel */
469 	ret = load_backup_segment(image, kbuf);
470 	if (ret) {
471 		pr_err("Failed to load backup segment\n");
472 		return ret;
473 	}
474 	kexec_dprintk("Loaded the backup region at 0x%lx\n", kbuf->mem);
475 
476 	/* Load elfcorehdr segment - to export crashing kernel's vmcore */
477 	ret = load_elfcorehdr_segment(image, kbuf);
478 	if (ret) {
479 		pr_err("Failed to load elfcorehdr segment\n");
480 		return ret;
481 	}
482 	kexec_dprintk("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n",
483 		      image->elf_load_addr, kbuf->bufsz, kbuf->memsz);
484 
485 	return 0;
486 }
487 #endif
488 
489 /**
490  * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
491  *                         variables and call setup_purgatory() to initialize
492  *                         common global variable.
493  * @image:                 kexec image.
494  * @slave_code:            Slave code for the purgatory.
495  * @fdt:                   Flattened device tree for the next kernel.
496  * @kernel_load_addr:      Address where the kernel is loaded.
497  * @fdt_load_addr:         Address where the flattened device tree is loaded.
498  *
499  * Returns 0 on success, negative errno on error.
500  */
setup_purgatory_ppc64(struct kimage * image,const void * slave_code,const void * fdt,unsigned long kernel_load_addr,unsigned long fdt_load_addr)501 int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
502 			  const void *fdt, unsigned long kernel_load_addr,
503 			  unsigned long fdt_load_addr)
504 {
505 	struct device_node *dn = NULL;
506 	int ret;
507 
508 	ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr,
509 			      fdt_load_addr);
510 	if (ret)
511 		goto out;
512 
513 	if (image->type == KEXEC_TYPE_CRASH) {
514 		u32 my_run_at_load = 1;
515 
516 		/*
517 		 * Tell relocatable kernel to run at load address
518 		 * via the word meant for that at 0x5c.
519 		 */
520 		ret = kexec_purgatory_get_set_symbol(image, "run_at_load",
521 						     &my_run_at_load,
522 						     sizeof(my_run_at_load),
523 						     false);
524 		if (ret)
525 			goto out;
526 	}
527 
528 	/* Tell purgatory where to look for backup region */
529 	ret = kexec_purgatory_get_set_symbol(image, "backup_start",
530 					     &image->arch.backup_start,
531 					     sizeof(image->arch.backup_start),
532 					     false);
533 	if (ret)
534 		goto out;
535 
536 	/* Setup OPAL base & entry values */
537 	dn = of_find_node_by_path("/ibm,opal");
538 	if (dn) {
539 		u64 val;
540 
541 		ret = of_property_read_u64(dn, "opal-base-address", &val);
542 		if (ret)
543 			goto out;
544 
545 		ret = kexec_purgatory_get_set_symbol(image, "opal_base", &val,
546 						     sizeof(val), false);
547 		if (ret)
548 			goto out;
549 
550 		ret = of_property_read_u64(dn, "opal-entry-address", &val);
551 		if (ret)
552 			goto out;
553 		ret = kexec_purgatory_get_set_symbol(image, "opal_entry", &val,
554 						     sizeof(val), false);
555 	}
556 out:
557 	if (ret)
558 		pr_err("Failed to setup purgatory symbols");
559 	of_node_put(dn);
560 	return ret;
561 }
562 
563 /**
564  * cpu_node_size - Compute the size of a CPU node in the FDT.
565  *                 This should be done only once and the value is stored in
566  *                 a static variable.
567  * Returns the max size of a CPU node in the FDT.
568  */
cpu_node_size(void)569 static unsigned int cpu_node_size(void)
570 {
571 	static unsigned int size;
572 	struct device_node *dn;
573 	struct property *pp;
574 
575 	/*
576 	 * Don't compute it twice, we are assuming that the per CPU node size
577 	 * doesn't change during the system's life.
578 	 */
579 	if (size)
580 		return size;
581 
582 	dn = of_find_node_by_type(NULL, "cpu");
583 	if (WARN_ON_ONCE(!dn)) {
584 		// Unlikely to happen
585 		return 0;
586 	}
587 
588 	/*
589 	 * We compute the sub node size for a CPU node, assuming it
590 	 * will be the same for all.
591 	 */
592 	size += strlen(dn->name) + 5;
593 	for_each_property_of_node(dn, pp) {
594 		size += strlen(pp->name);
595 		size += pp->length;
596 	}
597 
598 	of_node_put(dn);
599 	return size;
600 }
601 
kdump_extra_fdt_size_ppc64(struct kimage * image,unsigned int cpu_nodes)602 static unsigned int kdump_extra_fdt_size_ppc64(struct kimage *image, unsigned int cpu_nodes)
603 {
604 	unsigned int extra_size = 0;
605 	u64 usm_entries;
606 #ifdef CONFIG_CRASH_HOTPLUG
607 	unsigned int possible_cpu_nodes;
608 #endif
609 
610 	if (!IS_ENABLED(CONFIG_CRASH_DUMP) || image->type != KEXEC_TYPE_CRASH)
611 		return 0;
612 
613 	/*
614 	 * For kdump kernel, account for linux,usable-memory and
615 	 * linux,drconf-usable-memory properties. Get an approximate on the
616 	 * number of usable memory entries and use for FDT size estimation.
617 	 */
618 	if (drmem_lmb_size()) {
619 		usm_entries = ((memory_hotplug_max() / drmem_lmb_size()) +
620 			       (2 * (resource_size(&crashk_res) / drmem_lmb_size())));
621 		extra_size += (unsigned int)(usm_entries * sizeof(u64));
622 	}
623 
624 #ifdef CONFIG_CRASH_HOTPLUG
625 	/*
626 	 * Make sure enough space is reserved to accommodate possible CPU nodes
627 	 * in the crash FDT. This allows packing possible CPU nodes which are
628 	 * not yet present in the system without regenerating the entire FDT.
629 	 */
630 	if (image->type == KEXEC_TYPE_CRASH) {
631 		possible_cpu_nodes = num_possible_cpus() / threads_per_core;
632 		if (possible_cpu_nodes > cpu_nodes)
633 			extra_size += (possible_cpu_nodes - cpu_nodes) * cpu_node_size();
634 	}
635 #endif
636 
637 	return extra_size;
638 }
639 
640 /**
641  * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
642  *                              setup FDT for kexec/kdump kernel.
643  * @image:                      kexec image being loaded.
644  *
645  * Returns the estimated extra size needed for kexec/kdump kernel FDT.
646  */
kexec_extra_fdt_size_ppc64(struct kimage * image,struct crash_mem * rmem)647 unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image, struct crash_mem *rmem)
648 {
649 	struct device_node *dn;
650 	unsigned int cpu_nodes = 0, extra_size = 0;
651 
652 	// Budget some space for the password blob. There's already extra space
653 	// for the key name
654 	if (plpks_is_available())
655 		extra_size += (unsigned int)plpks_get_passwordlen();
656 
657 	/* Get the number of CPU nodes in the current device tree */
658 	for_each_node_by_type(dn, "cpu") {
659 		cpu_nodes++;
660 	}
661 
662 	/* Consider extra space for CPU nodes added since the boot time */
663 	if (cpu_nodes > boot_cpu_node_count)
664 		extra_size += (cpu_nodes - boot_cpu_node_count) * cpu_node_size();
665 
666 	/* Consider extra space for reserved memory ranges if any */
667 	if (rmem->nr_ranges > 0)
668 		extra_size += sizeof(struct fdt_reserve_entry) * rmem->nr_ranges;
669 
670 	return extra_size + kdump_extra_fdt_size_ppc64(image, cpu_nodes);
671 }
672 
copy_property(void * fdt,int node_offset,const struct device_node * dn,const char * propname)673 static int copy_property(void *fdt, int node_offset, const struct device_node *dn,
674 			 const char *propname)
675 {
676 	const void *prop, *fdtprop;
677 	int len = 0, fdtlen = 0;
678 
679 	prop = of_get_property(dn, propname, &len);
680 	fdtprop = fdt_getprop(fdt, node_offset, propname, &fdtlen);
681 
682 	if (fdtprop && !prop)
683 		return fdt_delprop(fdt, node_offset, propname);
684 	else if (prop)
685 		return fdt_setprop(fdt, node_offset, propname, prop, len);
686 	else
687 		return -FDT_ERR_NOTFOUND;
688 }
689 
update_pci_dma_nodes(void * fdt,const char * dmapropname)690 static int update_pci_dma_nodes(void *fdt, const char *dmapropname)
691 {
692 	struct device_node *dn;
693 	int pci_offset, root_offset, ret = 0;
694 
695 	if (!firmware_has_feature(FW_FEATURE_LPAR))
696 		return 0;
697 
698 	root_offset = fdt_path_offset(fdt, "/");
699 	for_each_node_with_property(dn, dmapropname) {
700 		pci_offset = fdt_subnode_offset(fdt, root_offset, of_node_full_name(dn));
701 		if (pci_offset < 0)
702 			continue;
703 
704 		ret = copy_property(fdt, pci_offset, dn, "ibm,dma-window");
705 		if (ret < 0) {
706 			of_node_put(dn);
707 			break;
708 		}
709 		ret = copy_property(fdt, pci_offset, dn, dmapropname);
710 		if (ret < 0) {
711 			of_node_put(dn);
712 			break;
713 		}
714 	}
715 
716 	return ret;
717 }
718 
719 /**
720  * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
721  *                       being loaded.
722  * @image:               kexec image being loaded.
723  * @fdt:                 Flattened device tree for the next kernel.
724  * @rmem:                Reserved memory ranges.
725  *
726  * Returns 0 on success, negative errno on error.
727  */
setup_new_fdt_ppc64(const struct kimage * image,void * fdt,struct crash_mem * rmem)728 int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, struct crash_mem *rmem)
729 {
730 	struct crash_mem *umem = NULL;
731 	int i, nr_ranges, ret;
732 
733 #ifdef CONFIG_CRASH_DUMP
734 	/*
735 	 * Restrict memory usage for kdump kernel by setting up
736 	 * usable memory ranges and memory reserve map.
737 	 */
738 	if (image->type == KEXEC_TYPE_CRASH) {
739 		ret = get_usable_memory_ranges(&umem);
740 		if (ret)
741 			goto out;
742 
743 		ret = update_usable_mem_fdt(fdt, umem);
744 		if (ret) {
745 			pr_err("Error setting up usable-memory property for kdump kernel\n");
746 			goto out;
747 		}
748 
749 		/*
750 		 * Ensure we don't touch crashed kernel's memory except the
751 		 * first 64K of RAM, which will be backed up.
752 		 */
753 		ret = fdt_add_mem_rsv(fdt, BACKUP_SRC_END + 1,
754 				      crashk_res.start - BACKUP_SRC_SIZE);
755 		if (ret) {
756 			pr_err("Error reserving crash memory: %s\n",
757 			       fdt_strerror(ret));
758 			goto out;
759 		}
760 
761 		/* Ensure backup region is not used by kdump/capture kernel */
762 		ret = fdt_add_mem_rsv(fdt, image->arch.backup_start,
763 				      BACKUP_SRC_SIZE);
764 		if (ret) {
765 			pr_err("Error reserving memory for backup: %s\n",
766 			       fdt_strerror(ret));
767 			goto out;
768 		}
769 	}
770 #endif
771 
772 	/* Update cpus nodes information to account hotplug CPUs. */
773 	ret =  update_cpus_node(fdt);
774 	if (ret < 0)
775 		goto out;
776 
777 	ret = update_pci_dma_nodes(fdt, DIRECT64_PROPNAME);
778 	if (ret < 0)
779 		goto out;
780 
781 	ret = update_pci_dma_nodes(fdt, DMA64_PROPNAME);
782 	if (ret < 0)
783 		goto out;
784 
785 	/* Update memory reserve map */
786 	nr_ranges = rmem ? rmem->nr_ranges : 0;
787 	for (i = 0; i < nr_ranges; i++) {
788 		u64 base, size;
789 
790 		base = rmem->ranges[i].start;
791 		size = rmem->ranges[i].end - base + 1;
792 		ret = fdt_add_mem_rsv(fdt, base, size);
793 		if (ret) {
794 			pr_err("Error updating memory reserve map: %s\n",
795 			       fdt_strerror(ret));
796 			goto out;
797 		}
798 	}
799 
800 	// If we have PLPKS active, we need to provide the password to the new kernel
801 	if (plpks_is_available())
802 		ret = plpks_populate_fdt(fdt);
803 
804 out:
805 	kfree(umem);
806 	return ret;
807 }
808 
809 /**
810  * arch_kexec_kernel_image_probe - Does additional handling needed to setup
811  *                                 kexec segments.
812  * @image:                         kexec image being loaded.
813  * @buf:                           Buffer pointing to elf data.
814  * @buf_len:                       Length of the buffer.
815  *
816  * Returns 0 on success, negative errno on error.
817  */
arch_kexec_kernel_image_probe(struct kimage * image,void * buf,unsigned long buf_len)818 int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
819 				  unsigned long buf_len)
820 {
821 	int ret;
822 
823 	/* Get exclude memory ranges needed for setting up kexec segments */
824 	ret = get_exclude_memory_ranges(&(image->arch.exclude_ranges));
825 	if (ret) {
826 		pr_err("Failed to setup exclude memory ranges for buffer lookup\n");
827 		return ret;
828 	}
829 
830 	return kexec_image_probe_default(image, buf, buf_len);
831 }
832 
833 /**
834  * arch_kimage_file_post_load_cleanup - Frees up all the allocations done
835  *                                      while loading the image.
836  * @image:                              kexec image being loaded.
837  *
838  * Returns 0 on success, negative errno on error.
839  */
arch_kimage_file_post_load_cleanup(struct kimage * image)840 int arch_kimage_file_post_load_cleanup(struct kimage *image)
841 {
842 	kfree(image->arch.exclude_ranges);
843 	image->arch.exclude_ranges = NULL;
844 
845 	vfree(image->arch.backup_buf);
846 	image->arch.backup_buf = NULL;
847 
848 	vfree(image->elf_headers);
849 	image->elf_headers = NULL;
850 	image->elf_headers_sz = 0;
851 
852 	kvfree(image->arch.fdt);
853 	image->arch.fdt = NULL;
854 
855 	return kexec_image_post_load_cleanup_default(image);
856 }
857