xref: /linux/kernel/liveupdate/kexec_handover.c (revision 6b0dd42d7681af148e13df3806f251bc3dc7c36e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kexec_handover.c - kexec handover metadata processing
4  * Copyright (C) 2023 Alexander Graf <graf@amazon.com>
5  * Copyright (C) 2025 Microsoft Corporation, Mike Rapoport <rppt@kernel.org>
6  * Copyright (C) 2025 Google LLC, Changyuan Lyu <changyuanl@google.com>
7  * Copyright (C) 2025 Pasha Tatashin <pasha.tatashin@soleen.com>
8  * Copyright (C) 2026 Google LLC, Jason Miu <jasonmiu@google.com>
9  */
10 
11 #define pr_fmt(fmt) "KHO: " fmt
12 
13 #include <linux/cleanup.h>
14 #include <linux/cma.h>
15 #include <linux/kmemleak.h>
16 #include <linux/count_zeros.h>
17 #include <linux/kexec.h>
18 #include <linux/kexec_handover.h>
19 #include <linux/kho_radix_tree.h>
20 #include <linux/kho/abi/kexec_handover.h>
21 #include <linux/libfdt.h>
22 #include <linux/list.h>
23 #include <linux/memblock.h>
24 #include <linux/page-isolation.h>
25 #include <linux/unaligned.h>
26 #include <linux/vmalloc.h>
27 
28 #include <asm/early_ioremap.h>
29 
30 /*
31  * KHO is tightly coupled with mm init and needs access to some of mm
32  * internal APIs.
33  */
34 #include "../../mm/internal.h"
35 #include "../kexec_internal.h"
36 #include "kexec_handover_internal.h"
37 
38 /* The magic token for preserved pages */
39 #define KHO_PAGE_MAGIC 0x4b484f50U /* ASCII for 'KHOP' */
40 
41 /*
42  * KHO uses page->private, which is an unsigned long, to store page metadata.
43  * Use it to store both the magic and the order.
44  */
45 union kho_page_info {
46 	unsigned long page_private;
47 	struct {
48 		unsigned int order;
49 		unsigned int magic;
50 	};
51 };
52 
53 static_assert(sizeof(union kho_page_info) == sizeof(((struct page *)0)->private));
54 
55 static bool kho_enable __ro_after_init = IS_ENABLED(CONFIG_KEXEC_HANDOVER_ENABLE_DEFAULT);
56 
57 bool kho_is_enabled(void)
58 {
59 	return kho_enable;
60 }
61 EXPORT_SYMBOL_GPL(kho_is_enabled);
62 
63 static int __init kho_parse_enable(char *p)
64 {
65 	return kstrtobool(p, &kho_enable);
66 }
67 early_param("kho", kho_parse_enable);
68 
69 struct kho_out {
70 	void *fdt;
71 	struct mutex lock; /* protects KHO FDT */
72 
73 	struct kho_radix_tree radix_tree;
74 	struct kho_debugfs dbg;
75 };
76 
77 static struct kho_out kho_out = {
78 	.lock = __MUTEX_INITIALIZER(kho_out.lock),
79 	.radix_tree = {
80 		.lock = __MUTEX_INITIALIZER(kho_out.radix_tree.lock),
81 	},
82 };
83 
84 /**
85  * kho_radix_encode_key - Encodes a physical address and order into a radix key.
86  * @phys: The physical address of the page.
87  * @order: The order of the page.
88  *
89  * This function combines a page's physical address and its order into a
90  * single unsigned long, which is used as a key for all radix tree
91  * operations.
92  *
93  * Return: The encoded unsigned long radix key.
94  */
95 static unsigned long kho_radix_encode_key(phys_addr_t phys, unsigned int order)
96 {
97 	/* Order bits part */
98 	unsigned long h = 1UL << (KHO_ORDER_0_LOG2 - order);
99 	/* Shifted physical address part */
100 	unsigned long l = phys >> (PAGE_SHIFT + order);
101 
102 	return h | l;
103 }
104 
105 /**
106  * kho_radix_decode_key - Decodes a radix key back into a physical address and order.
107  * @key: The unsigned long key to decode.
108  * @order: An output parameter, a pointer to an unsigned int where the decoded
109  *         page order will be stored.
110  *
111  * This function reverses the encoding performed by kho_radix_encode_key(),
112  * extracting the original physical address and page order from a given key.
113  *
114  * Return: The decoded physical address.
115  */
116 static phys_addr_t kho_radix_decode_key(unsigned long key, unsigned int *order)
117 {
118 	unsigned int order_bit = fls64(key);
119 	phys_addr_t phys;
120 
121 	/* order_bit is numbered starting at 1 from fls64 */
122 	*order = KHO_ORDER_0_LOG2 - order_bit + 1;
123 	/* The order is discarded by the shift */
124 	phys = key << (PAGE_SHIFT + *order);
125 
126 	return phys;
127 }
128 
129 static unsigned long kho_radix_get_bitmap_index(unsigned long key)
130 {
131 	return key % (1 << KHO_BITMAP_SIZE_LOG2);
132 }
133 
134 static unsigned long kho_radix_get_table_index(unsigned long key,
135 					       unsigned int level)
136 {
137 	int s;
138 
139 	s = ((level - 1) * KHO_TABLE_SIZE_LOG2) + KHO_BITMAP_SIZE_LOG2;
140 	return (key >> s) % (1 << KHO_TABLE_SIZE_LOG2);
141 }
142 
143 /**
144  * kho_radix_add_page - Marks a page as preserved in the radix tree.
145  * @tree: The KHO radix tree.
146  * @pfn: The page frame number of the page to preserve.
147  * @order: The order of the page.
148  *
149  * This function traverses the radix tree based on the key derived from @pfn
150  * and @order. It sets the corresponding bit in the leaf bitmap to mark the
151  * page for preservation. If intermediate nodes do not exist along the path,
152  * they are allocated and added to the tree.
153  *
154  * Return: 0 on success, or a negative error code on failure.
155  */
156 int kho_radix_add_page(struct kho_radix_tree *tree,
157 		       unsigned long pfn, unsigned int order)
158 {
159 	/* Newly allocated nodes for error cleanup */
160 	struct kho_radix_node *intermediate_nodes[KHO_TREE_MAX_DEPTH] = { 0 };
161 	unsigned long key = kho_radix_encode_key(PFN_PHYS(pfn), order);
162 	struct kho_radix_node *anchor_node = NULL;
163 	struct kho_radix_node *node = tree->root;
164 	struct kho_radix_node *new_node;
165 	unsigned int i, idx, anchor_idx;
166 	struct kho_radix_leaf *leaf;
167 	int err = 0;
168 
169 	if (WARN_ON_ONCE(!tree->root))
170 		return -EINVAL;
171 
172 	might_sleep();
173 
174 	guard(mutex)(&tree->lock);
175 
176 	/* Go from high levels to low levels */
177 	for (i = KHO_TREE_MAX_DEPTH - 1; i > 0; i--) {
178 		idx = kho_radix_get_table_index(key, i);
179 
180 		if (node->table[idx]) {
181 			node = phys_to_virt(node->table[idx]);
182 			continue;
183 		}
184 
185 		/* Next node is empty, create a new node for it */
186 		new_node = (struct kho_radix_node *)get_zeroed_page(GFP_KERNEL);
187 		if (!new_node) {
188 			err = -ENOMEM;
189 			goto err_free_nodes;
190 		}
191 
192 		node->table[idx] = virt_to_phys(new_node);
193 
194 		/*
195 		 * Capture the node where the new branch starts for cleanup
196 		 * if allocation fails.
197 		 */
198 		if (!anchor_node) {
199 			anchor_node = node;
200 			anchor_idx = idx;
201 		}
202 		intermediate_nodes[i] = new_node;
203 
204 		node = new_node;
205 	}
206 
207 	/* Handle the leaf level bitmap (level 0) */
208 	idx = kho_radix_get_bitmap_index(key);
209 	leaf = (struct kho_radix_leaf *)node;
210 	__set_bit(idx, leaf->bitmap);
211 
212 	return 0;
213 
214 err_free_nodes:
215 	for (i = KHO_TREE_MAX_DEPTH - 1; i > 0; i--) {
216 		if (intermediate_nodes[i])
217 			free_page((unsigned long)intermediate_nodes[i]);
218 	}
219 	if (anchor_node)
220 		anchor_node->table[anchor_idx] = 0;
221 
222 	return err;
223 }
224 EXPORT_SYMBOL_GPL(kho_radix_add_page);
225 
226 /**
227  * kho_radix_del_page - Removes a page's preservation status from the radix tree.
228  * @tree: The KHO radix tree.
229  * @pfn: The page frame number of the page to unpreserve.
230  * @order: The order of the page.
231  *
232  * This function traverses the radix tree and clears the bit corresponding to
233  * the page, effectively removing its "preserved" status. It does not free
234  * the tree's intermediate nodes, even if they become empty.
235  */
236 void kho_radix_del_page(struct kho_radix_tree *tree, unsigned long pfn,
237 			unsigned int order)
238 {
239 	unsigned long key = kho_radix_encode_key(PFN_PHYS(pfn), order);
240 	struct kho_radix_node *node = tree->root;
241 	struct kho_radix_leaf *leaf;
242 	unsigned int i, idx;
243 
244 	if (WARN_ON_ONCE(!tree->root))
245 		return;
246 
247 	might_sleep();
248 
249 	guard(mutex)(&tree->lock);
250 
251 	/* Go from high levels to low levels */
252 	for (i = KHO_TREE_MAX_DEPTH - 1; i > 0; i--) {
253 		idx = kho_radix_get_table_index(key, i);
254 
255 		/*
256 		 * Attempting to delete a page that has not been preserved,
257 		 * return with a warning.
258 		 */
259 		if (WARN_ON(!node->table[idx]))
260 			return;
261 
262 		node = phys_to_virt(node->table[idx]);
263 	}
264 
265 	/* Handle the leaf level bitmap (level 0) */
266 	leaf = (struct kho_radix_leaf *)node;
267 	idx = kho_radix_get_bitmap_index(key);
268 	__clear_bit(idx, leaf->bitmap);
269 }
270 EXPORT_SYMBOL_GPL(kho_radix_del_page);
271 
272 static int kho_radix_walk_leaf(struct kho_radix_leaf *leaf,
273 			       unsigned long key,
274 			       kho_radix_tree_walk_callback_t cb)
275 {
276 	unsigned long *bitmap = (unsigned long *)leaf;
277 	unsigned int order;
278 	phys_addr_t phys;
279 	unsigned int i;
280 	int err;
281 
282 	for_each_set_bit(i, bitmap, PAGE_SIZE * BITS_PER_BYTE) {
283 		phys = kho_radix_decode_key(key | i, &order);
284 		err = cb(phys, order);
285 		if (err)
286 			return err;
287 	}
288 
289 	return 0;
290 }
291 
292 static int __kho_radix_walk_tree(struct kho_radix_node *root,
293 				 unsigned int level, unsigned long start,
294 				 kho_radix_tree_walk_callback_t cb)
295 {
296 	struct kho_radix_node *node;
297 	struct kho_radix_leaf *leaf;
298 	unsigned long key, i;
299 	unsigned int shift;
300 	int err;
301 
302 	for (i = 0; i < PAGE_SIZE / sizeof(phys_addr_t); i++) {
303 		if (!root->table[i])
304 			continue;
305 
306 		shift = ((level - 1) * KHO_TABLE_SIZE_LOG2) +
307 			KHO_BITMAP_SIZE_LOG2;
308 		key = start | (i << shift);
309 
310 		node = phys_to_virt(root->table[i]);
311 
312 		if (level == 1) {
313 			/*
314 			 * we are at level 1,
315 			 * node is pointing to the level 0 bitmap.
316 			 */
317 			leaf = (struct kho_radix_leaf *)node;
318 			err = kho_radix_walk_leaf(leaf, key, cb);
319 		} else {
320 			err  = __kho_radix_walk_tree(node, level - 1,
321 						     key, cb);
322 		}
323 
324 		if (err)
325 			return err;
326 	}
327 
328 	return 0;
329 }
330 
331 /**
332  * kho_radix_walk_tree - Traverses the radix tree and calls a callback for each preserved page.
333  * @tree: A pointer to the KHO radix tree to walk.
334  * @cb: A callback function of type kho_radix_tree_walk_callback_t that will be
335  *      invoked for each preserved page found in the tree. The callback receives
336  *      the physical address and order of the preserved page.
337  *
338  * This function walks the radix tree, searching from the specified top level
339  * down to the lowest level (level 0). For each preserved page found, it invokes
340  * the provided callback, passing the page's physical address and order.
341  *
342  * Return: 0 if the walk completed the specified tree, or the non-zero return
343  *         value from the callback that stopped the walk.
344  */
345 int kho_radix_walk_tree(struct kho_radix_tree *tree,
346 			kho_radix_tree_walk_callback_t cb)
347 {
348 	if (WARN_ON_ONCE(!tree->root))
349 		return -EINVAL;
350 
351 	guard(mutex)(&tree->lock);
352 
353 	return __kho_radix_walk_tree(tree->root, KHO_TREE_MAX_DEPTH - 1, 0, cb);
354 }
355 EXPORT_SYMBOL_GPL(kho_radix_walk_tree);
356 
357 static void __kho_unpreserve(struct kho_radix_tree *tree,
358 			     unsigned long pfn, unsigned long end_pfn)
359 {
360 	unsigned int order;
361 
362 	while (pfn < end_pfn) {
363 		order = min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
364 
365 		kho_radix_del_page(tree, pfn, order);
366 
367 		pfn += 1 << order;
368 	}
369 }
370 
371 /* For physically contiguous 0-order pages. */
372 static void kho_init_pages(struct page *page, unsigned long nr_pages)
373 {
374 	for (unsigned long i = 0; i < nr_pages; i++) {
375 		set_page_count(page + i, 1);
376 		/* Clear each page's codetag to avoid accounting mismatch. */
377 		clear_page_tag_ref(page + i);
378 	}
379 }
380 
381 static void kho_init_folio(struct page *page, unsigned int order)
382 {
383 	unsigned long nr_pages = (1 << order);
384 
385 	/* Head page gets refcount of 1. */
386 	set_page_count(page, 1);
387 	/* Clear head page's codetag to avoid accounting mismatch. */
388 	clear_page_tag_ref(page);
389 
390 	/* For higher order folios, tail pages get a page count of zero. */
391 	for (unsigned long i = 1; i < nr_pages; i++)
392 		set_page_count(page + i, 0);
393 
394 	if (order > 0)
395 		prep_compound_page(page, order);
396 }
397 
398 static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
399 {
400 	struct page *page = pfn_to_online_page(PHYS_PFN(phys));
401 	unsigned long nr_pages;
402 	union kho_page_info info;
403 
404 	if (!page)
405 		return NULL;
406 
407 	info.page_private = page->private;
408 	/*
409 	 * deserialize_bitmap() only sets the magic on the head page. This magic
410 	 * check also implicitly makes sure phys is order-aligned since for
411 	 * non-order-aligned phys addresses, magic will never be set.
412 	 */
413 	if (WARN_ON_ONCE(info.magic != KHO_PAGE_MAGIC || info.order > MAX_PAGE_ORDER))
414 		return NULL;
415 	nr_pages = (1 << info.order);
416 
417 	/* Clear private to make sure later restores on this page error out. */
418 	page->private = 0;
419 
420 	if (is_folio)
421 		kho_init_folio(page, info.order);
422 	else
423 		kho_init_pages(page, nr_pages);
424 
425 	adjust_managed_page_count(page, nr_pages);
426 	return page;
427 }
428 
429 /**
430  * kho_restore_folio - recreates the folio from the preserved memory.
431  * @phys: physical address of the folio.
432  *
433  * Return: pointer to the struct folio on success, NULL on failure.
434  */
435 struct folio *kho_restore_folio(phys_addr_t phys)
436 {
437 	struct page *page = kho_restore_page(phys, true);
438 
439 	return page ? page_folio(page) : NULL;
440 }
441 EXPORT_SYMBOL_GPL(kho_restore_folio);
442 
443 /**
444  * kho_restore_pages - restore list of contiguous order 0 pages.
445  * @phys: physical address of the first page.
446  * @nr_pages: number of pages.
447  *
448  * Restore a contiguous list of order 0 pages that was preserved with
449  * kho_preserve_pages().
450  *
451  * Return: the first page on success, NULL on failure.
452  */
453 struct page *kho_restore_pages(phys_addr_t phys, unsigned long nr_pages)
454 {
455 	const unsigned long start_pfn = PHYS_PFN(phys);
456 	const unsigned long end_pfn = start_pfn + nr_pages;
457 	unsigned long pfn = start_pfn;
458 
459 	while (pfn < end_pfn) {
460 		const unsigned int order =
461 			min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
462 		struct page *page = kho_restore_page(PFN_PHYS(pfn), false);
463 
464 		if (!page)
465 			return NULL;
466 		pfn += 1 << order;
467 	}
468 
469 	return pfn_to_page(start_pfn);
470 }
471 EXPORT_SYMBOL_GPL(kho_restore_pages);
472 
473 static int __init kho_preserved_memory_reserve(phys_addr_t phys,
474 					       unsigned int order)
475 {
476 	union kho_page_info info;
477 	struct page *page;
478 	u64 sz;
479 
480 	sz = 1 << (order + PAGE_SHIFT);
481 	page = phys_to_page(phys);
482 
483 	/* Reserve the memory preserved in KHO in memblock */
484 	memblock_reserve(phys, sz);
485 	memblock_reserved_mark_noinit(phys, sz);
486 	info.magic = KHO_PAGE_MAGIC;
487 	info.order = order;
488 	page->private = info.page_private;
489 
490 	return 0;
491 }
492 
493 /* Returns physical address of the preserved memory map from FDT */
494 static phys_addr_t __init kho_get_mem_map_phys(const void *fdt)
495 {
496 	const void *mem_ptr;
497 	int len;
498 
499 	mem_ptr = fdt_getprop(fdt, 0, KHO_FDT_MEMORY_MAP_PROP_NAME, &len);
500 	if (!mem_ptr || len != sizeof(u64)) {
501 		pr_err("failed to get preserved memory map\n");
502 		return 0;
503 	}
504 
505 	return get_unaligned((const u64 *)mem_ptr);
506 }
507 
508 /*
509  * With KHO enabled, memory can become fragmented because KHO regions may
510  * be anywhere in physical address space. The scratch regions give us a
511  * safe zones that we will never see KHO allocations from. This is where we
512  * can later safely load our new kexec images into and then use the scratch
513  * area for early allocations that happen before page allocator is
514  * initialized.
515  */
516 struct kho_scratch *kho_scratch;
517 unsigned int kho_scratch_cnt;
518 
519 /*
520  * The scratch areas are scaled by default as percent of memory allocated from
521  * memblock. A user can override the scale with command line parameter:
522  *
523  * kho_scratch=N%
524  *
525  * It is also possible to explicitly define size for a lowmem, a global and
526  * per-node scratch areas:
527  *
528  * kho_scratch=l[KMG],n[KMG],m[KMG]
529  *
530  * The explicit size definition takes precedence over scale definition.
531  */
532 static unsigned int scratch_scale __initdata = 200;
533 static phys_addr_t scratch_size_global __initdata;
534 static phys_addr_t scratch_size_pernode __initdata;
535 static phys_addr_t scratch_size_lowmem __initdata;
536 
537 static int __init kho_parse_scratch_size(char *p)
538 {
539 	size_t len;
540 	unsigned long sizes[3];
541 	size_t total_size = 0;
542 	int i;
543 
544 	if (!p)
545 		return -EINVAL;
546 
547 	len = strlen(p);
548 	if (!len)
549 		return -EINVAL;
550 
551 	/* parse nn% */
552 	if (p[len - 1] == '%') {
553 		/* unsigned int max is 4,294,967,295, 10 chars */
554 		char s_scale[11] = {};
555 		int ret = 0;
556 
557 		if (len > ARRAY_SIZE(s_scale))
558 			return -EINVAL;
559 
560 		memcpy(s_scale, p, len - 1);
561 		ret = kstrtouint(s_scale, 10, &scratch_scale);
562 		if (!ret)
563 			pr_notice("scratch scale is %d%%\n", scratch_scale);
564 		return ret;
565 	}
566 
567 	/* parse ll[KMG],mm[KMG],nn[KMG] */
568 	for (i = 0; i < ARRAY_SIZE(sizes); i++) {
569 		char *endp = p;
570 
571 		if (i > 0) {
572 			if (*p != ',')
573 				return -EINVAL;
574 			p += 1;
575 		}
576 
577 		sizes[i] = memparse(p, &endp);
578 		if (endp == p)
579 			return -EINVAL;
580 		p = endp;
581 		total_size += sizes[i];
582 	}
583 
584 	if (!total_size)
585 		return -EINVAL;
586 
587 	/* The string should be fully consumed by now. */
588 	if (*p)
589 		return -EINVAL;
590 
591 	scratch_size_lowmem = sizes[0];
592 	scratch_size_global = sizes[1];
593 	scratch_size_pernode = sizes[2];
594 	scratch_scale = 0;
595 
596 	pr_notice("scratch areas: lowmem: %lluMiB global: %lluMiB pernode: %lldMiB\n",
597 		  (u64)(scratch_size_lowmem >> 20),
598 		  (u64)(scratch_size_global >> 20),
599 		  (u64)(scratch_size_pernode >> 20));
600 
601 	return 0;
602 }
603 early_param("kho_scratch", kho_parse_scratch_size);
604 
605 static void __init scratch_size_update(void)
606 {
607 	phys_addr_t size;
608 
609 	if (!scratch_scale)
610 		return;
611 
612 	size = memblock_reserved_kern_size(ARCH_LOW_ADDRESS_LIMIT,
613 					   NUMA_NO_NODE);
614 	size = size * scratch_scale / 100;
615 	scratch_size_lowmem = round_up(size, CMA_MIN_ALIGNMENT_BYTES);
616 
617 	size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE,
618 					   NUMA_NO_NODE);
619 	size = size * scratch_scale / 100 - scratch_size_lowmem;
620 	scratch_size_global = round_up(size, CMA_MIN_ALIGNMENT_BYTES);
621 }
622 
623 static phys_addr_t __init scratch_size_node(int nid)
624 {
625 	phys_addr_t size;
626 
627 	if (scratch_scale) {
628 		size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE,
629 						   nid);
630 		size = size * scratch_scale / 100;
631 	} else {
632 		size = scratch_size_pernode;
633 	}
634 
635 	return round_up(size, CMA_MIN_ALIGNMENT_BYTES);
636 }
637 
638 /**
639  * kho_reserve_scratch - Reserve a contiguous chunk of memory for kexec
640  *
641  * With KHO we can preserve arbitrary pages in the system. To ensure we still
642  * have a large contiguous region of memory when we search the physical address
643  * space for target memory, let's make sure we always have a large CMA region
644  * active. This CMA region will only be used for movable pages which are not a
645  * problem for us during KHO because we can just move them somewhere else.
646  */
647 static void __init kho_reserve_scratch(void)
648 {
649 	phys_addr_t addr, size;
650 	int nid, i = 0;
651 
652 	if (!kho_enable)
653 		return;
654 
655 	scratch_size_update();
656 
657 	/* FIXME: deal with node hot-plug/remove */
658 	kho_scratch_cnt = nodes_weight(node_states[N_MEMORY]) + 2;
659 	size = kho_scratch_cnt * sizeof(*kho_scratch);
660 	kho_scratch = memblock_alloc(size, PAGE_SIZE);
661 	if (!kho_scratch) {
662 		pr_err("Failed to reserve scratch array\n");
663 		goto err_disable_kho;
664 	}
665 
666 	/*
667 	 * reserve scratch area in low memory for lowmem allocations in the
668 	 * next kernel
669 	 */
670 	size = scratch_size_lowmem;
671 	addr = memblock_phys_alloc_range(size, CMA_MIN_ALIGNMENT_BYTES, 0,
672 					 ARCH_LOW_ADDRESS_LIMIT);
673 	if (!addr) {
674 		pr_err("Failed to reserve lowmem scratch buffer\n");
675 		goto err_free_scratch_desc;
676 	}
677 
678 	kho_scratch[i].addr = addr;
679 	kho_scratch[i].size = size;
680 	i++;
681 
682 	/* reserve large contiguous area for allocations without nid */
683 	size = scratch_size_global;
684 	addr = memblock_phys_alloc(size, CMA_MIN_ALIGNMENT_BYTES);
685 	if (!addr) {
686 		pr_err("Failed to reserve global scratch buffer\n");
687 		goto err_free_scratch_areas;
688 	}
689 
690 	kho_scratch[i].addr = addr;
691 	kho_scratch[i].size = size;
692 	i++;
693 
694 	/*
695 	 * Loop over nodes that have both memory and are online. Skip
696 	 * memoryless nodes, as we can not allocate scratch areas there.
697 	 */
698 	for_each_node_state(nid, N_MEMORY) {
699 		size = scratch_size_node(nid);
700 		addr = memblock_alloc_range_nid(size, CMA_MIN_ALIGNMENT_BYTES,
701 						0, MEMBLOCK_ALLOC_ACCESSIBLE,
702 						nid, true);
703 		if (!addr) {
704 			pr_err("Failed to reserve nid %d scratch buffer\n", nid);
705 			goto err_free_scratch_areas;
706 		}
707 
708 		kho_scratch[i].addr = addr;
709 		kho_scratch[i].size = size;
710 		i++;
711 	}
712 
713 	return;
714 
715 err_free_scratch_areas:
716 	for (i--; i >= 0; i--)
717 		memblock_phys_free(kho_scratch[i].addr, kho_scratch[i].size);
718 err_free_scratch_desc:
719 	memblock_free(kho_scratch, kho_scratch_cnt * sizeof(*kho_scratch));
720 err_disable_kho:
721 	pr_warn("Failed to reserve scratch area, disabling kexec handover\n");
722 	kho_enable = false;
723 }
724 
725 /**
726  * kho_add_subtree - record the physical address of a sub FDT in KHO root tree.
727  * @name: name of the sub tree.
728  * @fdt: the sub tree blob.
729  *
730  * Creates a new child node named @name in KHO root FDT and records
731  * the physical address of @fdt. The pages of @fdt must also be preserved
732  * by KHO for the new kernel to retrieve it after kexec.
733  *
734  * A debugfs blob entry is also created at
735  * ``/sys/kernel/debug/kho/out/sub_fdts/@name`` when kernel is configured with
736  * CONFIG_KEXEC_HANDOVER_DEBUGFS
737  *
738  * Return: 0 on success, error code on failure
739  */
740 int kho_add_subtree(const char *name, void *fdt)
741 {
742 	phys_addr_t phys = virt_to_phys(fdt);
743 	void *root_fdt = kho_out.fdt;
744 	int err = -ENOMEM;
745 	int off, fdt_err;
746 
747 	guard(mutex)(&kho_out.lock);
748 
749 	fdt_err = fdt_open_into(root_fdt, root_fdt, PAGE_SIZE);
750 	if (fdt_err < 0)
751 		return err;
752 
753 	off = fdt_add_subnode(root_fdt, 0, name);
754 	if (off < 0) {
755 		if (off == -FDT_ERR_EXISTS)
756 			err = -EEXIST;
757 		goto out_pack;
758 	}
759 
760 	err = fdt_setprop(root_fdt, off, KHO_FDT_SUB_TREE_PROP_NAME,
761 			  &phys, sizeof(phys));
762 	if (err < 0)
763 		goto out_pack;
764 
765 	WARN_ON_ONCE(kho_debugfs_fdt_add(&kho_out.dbg, name, fdt, false));
766 
767 out_pack:
768 	fdt_pack(root_fdt);
769 
770 	return err;
771 }
772 EXPORT_SYMBOL_GPL(kho_add_subtree);
773 
774 void kho_remove_subtree(void *fdt)
775 {
776 	phys_addr_t target_phys = virt_to_phys(fdt);
777 	void *root_fdt = kho_out.fdt;
778 	int off;
779 	int err;
780 
781 	guard(mutex)(&kho_out.lock);
782 
783 	err = fdt_open_into(root_fdt, root_fdt, PAGE_SIZE);
784 	if (err < 0)
785 		return;
786 
787 	for (off = fdt_first_subnode(root_fdt, 0); off >= 0;
788 	     off = fdt_next_subnode(root_fdt, off)) {
789 		const u64 *val;
790 		int len;
791 
792 		val = fdt_getprop(root_fdt, off, KHO_FDT_SUB_TREE_PROP_NAME, &len);
793 		if (!val || len != sizeof(phys_addr_t))
794 			continue;
795 
796 		if ((phys_addr_t)*val == target_phys) {
797 			fdt_del_node(root_fdt, off);
798 			kho_debugfs_fdt_remove(&kho_out.dbg, fdt);
799 			break;
800 		}
801 	}
802 
803 	fdt_pack(root_fdt);
804 }
805 EXPORT_SYMBOL_GPL(kho_remove_subtree);
806 
807 /**
808  * kho_preserve_folio - preserve a folio across kexec.
809  * @folio: folio to preserve.
810  *
811  * Instructs KHO to preserve the whole folio across kexec. The order
812  * will be preserved as well.
813  *
814  * Return: 0 on success, error code on failure
815  */
816 int kho_preserve_folio(struct folio *folio)
817 {
818 	struct kho_radix_tree *tree = &kho_out.radix_tree;
819 	const unsigned long pfn = folio_pfn(folio);
820 	const unsigned int order = folio_order(folio);
821 
822 	if (WARN_ON(kho_scratch_overlap(pfn << PAGE_SHIFT, PAGE_SIZE << order)))
823 		return -EINVAL;
824 
825 	return kho_radix_add_page(tree, pfn, order);
826 }
827 EXPORT_SYMBOL_GPL(kho_preserve_folio);
828 
829 /**
830  * kho_unpreserve_folio - unpreserve a folio.
831  * @folio: folio to unpreserve.
832  *
833  * Instructs KHO to unpreserve a folio that was preserved by
834  * kho_preserve_folio() before. The provided @folio (pfn and order)
835  * must exactly match a previously preserved folio.
836  */
837 void kho_unpreserve_folio(struct folio *folio)
838 {
839 	struct kho_radix_tree *tree = &kho_out.radix_tree;
840 	const unsigned long pfn = folio_pfn(folio);
841 	const unsigned int order = folio_order(folio);
842 
843 	kho_radix_del_page(tree, pfn, order);
844 }
845 EXPORT_SYMBOL_GPL(kho_unpreserve_folio);
846 
847 /**
848  * kho_preserve_pages - preserve contiguous pages across kexec
849  * @page: first page in the list.
850  * @nr_pages: number of pages.
851  *
852  * Preserve a contiguous list of order 0 pages. Must be restored using
853  * kho_restore_pages() to ensure the pages are restored properly as order 0.
854  *
855  * Return: 0 on success, error code on failure
856  */
857 int kho_preserve_pages(struct page *page, unsigned long nr_pages)
858 {
859 	struct kho_radix_tree *tree = &kho_out.radix_tree;
860 	const unsigned long start_pfn = page_to_pfn(page);
861 	const unsigned long end_pfn = start_pfn + nr_pages;
862 	unsigned long pfn = start_pfn;
863 	unsigned long failed_pfn = 0;
864 	int err = 0;
865 
866 	if (WARN_ON(kho_scratch_overlap(start_pfn << PAGE_SHIFT,
867 					nr_pages << PAGE_SHIFT))) {
868 		return -EINVAL;
869 	}
870 
871 	while (pfn < end_pfn) {
872 		const unsigned int order =
873 			min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
874 
875 		err = kho_radix_add_page(tree, pfn, order);
876 		if (err) {
877 			failed_pfn = pfn;
878 			break;
879 		}
880 
881 		pfn += 1 << order;
882 	}
883 
884 	if (err)
885 		__kho_unpreserve(tree, start_pfn, failed_pfn);
886 
887 	return err;
888 }
889 EXPORT_SYMBOL_GPL(kho_preserve_pages);
890 
891 /**
892  * kho_unpreserve_pages - unpreserve contiguous pages.
893  * @page: first page in the list.
894  * @nr_pages: number of pages.
895  *
896  * Instructs KHO to unpreserve @nr_pages contiguous pages starting from @page.
897  * This must be called with the same @page and @nr_pages as the corresponding
898  * kho_preserve_pages() call. Unpreserving arbitrary sub-ranges of larger
899  * preserved blocks is not supported.
900  */
901 void kho_unpreserve_pages(struct page *page, unsigned long nr_pages)
902 {
903 	struct kho_radix_tree *tree = &kho_out.radix_tree;
904 	const unsigned long start_pfn = page_to_pfn(page);
905 	const unsigned long end_pfn = start_pfn + nr_pages;
906 
907 	__kho_unpreserve(tree, start_pfn, end_pfn);
908 }
909 EXPORT_SYMBOL_GPL(kho_unpreserve_pages);
910 
911 /* vmalloc flags KHO supports */
912 #define KHO_VMALLOC_SUPPORTED_FLAGS	(VM_ALLOC | VM_ALLOW_HUGE_VMAP)
913 
914 /* KHO internal flags for vmalloc preservations */
915 #define KHO_VMALLOC_ALLOC	0x0001
916 #define KHO_VMALLOC_HUGE_VMAP	0x0002
917 
918 static unsigned short vmalloc_flags_to_kho(unsigned int vm_flags)
919 {
920 	unsigned short kho_flags = 0;
921 
922 	if (vm_flags & VM_ALLOC)
923 		kho_flags |= KHO_VMALLOC_ALLOC;
924 	if (vm_flags & VM_ALLOW_HUGE_VMAP)
925 		kho_flags |= KHO_VMALLOC_HUGE_VMAP;
926 
927 	return kho_flags;
928 }
929 
930 static unsigned int kho_flags_to_vmalloc(unsigned short kho_flags)
931 {
932 	unsigned int vm_flags = 0;
933 
934 	if (kho_flags & KHO_VMALLOC_ALLOC)
935 		vm_flags |= VM_ALLOC;
936 	if (kho_flags & KHO_VMALLOC_HUGE_VMAP)
937 		vm_flags |= VM_ALLOW_HUGE_VMAP;
938 
939 	return vm_flags;
940 }
941 
942 static struct kho_vmalloc_chunk *new_vmalloc_chunk(struct kho_vmalloc_chunk *cur)
943 {
944 	struct kho_vmalloc_chunk *chunk;
945 	int err;
946 
947 	chunk = (struct kho_vmalloc_chunk *)get_zeroed_page(GFP_KERNEL);
948 	if (!chunk)
949 		return NULL;
950 
951 	err = kho_preserve_pages(virt_to_page(chunk), 1);
952 	if (err)
953 		goto err_free;
954 	if (cur)
955 		KHOSER_STORE_PTR(cur->hdr.next, chunk);
956 	return chunk;
957 
958 err_free:
959 	free_page((unsigned long)chunk);
960 	return NULL;
961 }
962 
963 static void kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk *chunk,
964 					 unsigned short order)
965 {
966 	struct kho_radix_tree *tree = &kho_out.radix_tree;
967 	unsigned long pfn = PHYS_PFN(virt_to_phys(chunk));
968 
969 	__kho_unpreserve(tree, pfn, pfn + 1);
970 
971 	for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) {
972 		pfn = PHYS_PFN(chunk->phys[i]);
973 		__kho_unpreserve(tree, pfn, pfn + (1 << order));
974 	}
975 }
976 
977 /**
978  * kho_preserve_vmalloc - preserve memory allocated with vmalloc() across kexec
979  * @ptr: pointer to the area in vmalloc address space
980  * @preservation: placeholder for preservation metadata
981  *
982  * Instructs KHO to preserve the area in vmalloc address space at @ptr. The
983  * physical pages mapped at @ptr will be preserved and on successful return
984  * @preservation will hold the physical address of a structure that describes
985  * the preservation.
986  *
987  * NOTE: The memory allocated with vmalloc_node() variants cannot be reliably
988  * restored on the same node
989  *
990  * Return: 0 on success, error code on failure
991  */
992 int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation)
993 {
994 	struct kho_vmalloc_chunk *chunk;
995 	struct vm_struct *vm = find_vm_area(ptr);
996 	unsigned int order, flags, nr_contig_pages;
997 	unsigned int idx = 0;
998 	int err;
999 
1000 	if (!vm)
1001 		return -EINVAL;
1002 
1003 	if (vm->flags & ~KHO_VMALLOC_SUPPORTED_FLAGS)
1004 		return -EOPNOTSUPP;
1005 
1006 	flags = vmalloc_flags_to_kho(vm->flags);
1007 	order = get_vm_area_page_order(vm);
1008 
1009 	chunk = new_vmalloc_chunk(NULL);
1010 	if (!chunk)
1011 		return -ENOMEM;
1012 	KHOSER_STORE_PTR(preservation->first, chunk);
1013 
1014 	nr_contig_pages = (1 << order);
1015 	for (int i = 0; i < vm->nr_pages; i += nr_contig_pages) {
1016 		phys_addr_t phys = page_to_phys(vm->pages[i]);
1017 
1018 		err = kho_preserve_pages(vm->pages[i], nr_contig_pages);
1019 		if (err)
1020 			goto err_free;
1021 
1022 		chunk->phys[idx++] = phys;
1023 		if (idx == ARRAY_SIZE(chunk->phys)) {
1024 			chunk = new_vmalloc_chunk(chunk);
1025 			if (!chunk) {
1026 				err = -ENOMEM;
1027 				goto err_free;
1028 			}
1029 			idx = 0;
1030 		}
1031 	}
1032 
1033 	preservation->total_pages = vm->nr_pages;
1034 	preservation->flags = flags;
1035 	preservation->order = order;
1036 
1037 	return 0;
1038 
1039 err_free:
1040 	kho_unpreserve_vmalloc(preservation);
1041 	return err;
1042 }
1043 EXPORT_SYMBOL_GPL(kho_preserve_vmalloc);
1044 
1045 /**
1046  * kho_unpreserve_vmalloc - unpreserve memory allocated with vmalloc()
1047  * @preservation: preservation metadata returned by kho_preserve_vmalloc()
1048  *
1049  * Instructs KHO to unpreserve the area in vmalloc address space that was
1050  * previously preserved with kho_preserve_vmalloc().
1051  */
1052 void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation)
1053 {
1054 	struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first);
1055 
1056 	while (chunk) {
1057 		struct kho_vmalloc_chunk *tmp = chunk;
1058 
1059 		kho_vmalloc_unpreserve_chunk(chunk, preservation->order);
1060 
1061 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
1062 		free_page((unsigned long)tmp);
1063 	}
1064 }
1065 EXPORT_SYMBOL_GPL(kho_unpreserve_vmalloc);
1066 
1067 /**
1068  * kho_restore_vmalloc - recreates and populates an area in vmalloc address
1069  * space from the preserved memory.
1070  * @preservation: preservation metadata.
1071  *
1072  * Recreates an area in vmalloc address space and populates it with memory that
1073  * was preserved using kho_preserve_vmalloc().
1074  *
1075  * Return: pointer to the area in the vmalloc address space, NULL on failure.
1076  */
1077 void *kho_restore_vmalloc(const struct kho_vmalloc *preservation)
1078 {
1079 	struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first);
1080 	unsigned int align, order, shift, vm_flags;
1081 	unsigned long total_pages, contig_pages;
1082 	unsigned long addr, size;
1083 	struct vm_struct *area;
1084 	struct page **pages;
1085 	unsigned int idx = 0;
1086 	int err;
1087 
1088 	vm_flags = kho_flags_to_vmalloc(preservation->flags);
1089 	if (vm_flags & ~KHO_VMALLOC_SUPPORTED_FLAGS)
1090 		return NULL;
1091 
1092 	total_pages = preservation->total_pages;
1093 	pages = kvmalloc_objs(*pages, total_pages);
1094 	if (!pages)
1095 		return NULL;
1096 	order = preservation->order;
1097 	contig_pages = (1 << order);
1098 	shift = PAGE_SHIFT + order;
1099 	align = 1 << shift;
1100 
1101 	while (chunk) {
1102 		struct page *page;
1103 
1104 		for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) {
1105 			phys_addr_t phys = chunk->phys[i];
1106 
1107 			if (idx + contig_pages > total_pages)
1108 				goto err_free_pages_array;
1109 
1110 			page = kho_restore_pages(phys, contig_pages);
1111 			if (!page)
1112 				goto err_free_pages_array;
1113 
1114 			for (int j = 0; j < contig_pages; j++)
1115 				pages[idx++] = page + j;
1116 
1117 			phys += contig_pages * PAGE_SIZE;
1118 		}
1119 
1120 		page = kho_restore_pages(virt_to_phys(chunk), 1);
1121 		if (!page)
1122 			goto err_free_pages_array;
1123 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
1124 		__free_page(page);
1125 	}
1126 
1127 	if (idx != total_pages)
1128 		goto err_free_pages_array;
1129 
1130 	area = __get_vm_area_node(total_pages * PAGE_SIZE, align, shift,
1131 				  vm_flags, VMALLOC_START, VMALLOC_END,
1132 				  NUMA_NO_NODE, GFP_KERNEL,
1133 				  __builtin_return_address(0));
1134 	if (!area)
1135 		goto err_free_pages_array;
1136 
1137 	addr = (unsigned long)area->addr;
1138 	size = get_vm_area_size(area);
1139 	err = vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, shift);
1140 	if (err)
1141 		goto err_free_vm_area;
1142 
1143 	area->nr_pages = total_pages;
1144 	area->pages = pages;
1145 
1146 	return area->addr;
1147 
1148 err_free_vm_area:
1149 	free_vm_area(area);
1150 err_free_pages_array:
1151 	kvfree(pages);
1152 	return NULL;
1153 }
1154 EXPORT_SYMBOL_GPL(kho_restore_vmalloc);
1155 
1156 /**
1157  * kho_alloc_preserve - Allocate, zero, and preserve memory.
1158  * @size: The number of bytes to allocate.
1159  *
1160  * Allocates a physically contiguous block of zeroed pages that is large
1161  * enough to hold @size bytes. The allocated memory is then registered with
1162  * KHO for preservation across a kexec.
1163  *
1164  * Note: The actual allocated size will be rounded up to the nearest
1165  * power-of-two page boundary.
1166  *
1167  * @return A virtual pointer to the allocated and preserved memory on success,
1168  * or an ERR_PTR() encoded error on failure.
1169  */
1170 void *kho_alloc_preserve(size_t size)
1171 {
1172 	struct folio *folio;
1173 	int order, ret;
1174 
1175 	if (!size)
1176 		return ERR_PTR(-EINVAL);
1177 
1178 	order = get_order(size);
1179 	if (order > MAX_PAGE_ORDER)
1180 		return ERR_PTR(-E2BIG);
1181 
1182 	folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, order);
1183 	if (!folio)
1184 		return ERR_PTR(-ENOMEM);
1185 
1186 	ret = kho_preserve_folio(folio);
1187 	if (ret) {
1188 		folio_put(folio);
1189 		return ERR_PTR(ret);
1190 	}
1191 
1192 	return folio_address(folio);
1193 }
1194 EXPORT_SYMBOL_GPL(kho_alloc_preserve);
1195 
1196 /**
1197  * kho_unpreserve_free - Unpreserve and free memory.
1198  * @mem:  Pointer to the memory allocated by kho_alloc_preserve().
1199  *
1200  * Unregisters the memory from KHO preservation and frees the underlying
1201  * pages back to the system. This function should be called to clean up
1202  * memory allocated with kho_alloc_preserve().
1203  */
1204 void kho_unpreserve_free(void *mem)
1205 {
1206 	struct folio *folio;
1207 
1208 	if (!mem)
1209 		return;
1210 
1211 	folio = virt_to_folio(mem);
1212 	kho_unpreserve_folio(folio);
1213 	folio_put(folio);
1214 }
1215 EXPORT_SYMBOL_GPL(kho_unpreserve_free);
1216 
1217 /**
1218  * kho_restore_free - Restore and free memory after kexec.
1219  * @mem:  Pointer to the memory (in the new kernel's address space)
1220  * that was allocated by the old kernel.
1221  *
1222  * This function is intended to be called in the new kernel (post-kexec)
1223  * to take ownership of and free a memory region that was preserved by the
1224  * old kernel using kho_alloc_preserve().
1225  *
1226  * It first restores the pages from KHO (using their physical address)
1227  * and then frees the pages back to the new kernel's page allocator.
1228  */
1229 void kho_restore_free(void *mem)
1230 {
1231 	struct folio *folio;
1232 
1233 	if (!mem)
1234 		return;
1235 
1236 	folio = kho_restore_folio(__pa(mem));
1237 	if (!WARN_ON(!folio))
1238 		folio_put(folio);
1239 }
1240 EXPORT_SYMBOL_GPL(kho_restore_free);
1241 
1242 struct kho_in {
1243 	phys_addr_t fdt_phys;
1244 	phys_addr_t scratch_phys;
1245 	struct kho_debugfs dbg;
1246 };
1247 
1248 static struct kho_in kho_in = {
1249 };
1250 
1251 static const void *kho_get_fdt(void)
1252 {
1253 	return kho_in.fdt_phys ? phys_to_virt(kho_in.fdt_phys) : NULL;
1254 }
1255 
1256 /**
1257  * is_kho_boot - check if current kernel was booted via KHO-enabled
1258  * kexec
1259  *
1260  * This function checks if the current kernel was loaded through a kexec
1261  * operation with KHO enabled, by verifying that a valid KHO FDT
1262  * was passed.
1263  *
1264  * Note: This function returns reliable results only after
1265  * kho_populate() has been called during early boot. Before that,
1266  * it may return false even if KHO data is present.
1267  *
1268  * Return: true if booted via KHO-enabled kexec, false otherwise
1269  */
1270 bool is_kho_boot(void)
1271 {
1272 	return !!kho_get_fdt();
1273 }
1274 EXPORT_SYMBOL_GPL(is_kho_boot);
1275 
1276 /**
1277  * kho_retrieve_subtree - retrieve a preserved sub FDT by its name.
1278  * @name: the name of the sub FDT passed to kho_add_subtree().
1279  * @phys: if found, the physical address of the sub FDT is stored in @phys.
1280  *
1281  * Retrieve a preserved sub FDT named @name and store its physical
1282  * address in @phys.
1283  *
1284  * Return: 0 on success, error code on failure
1285  */
1286 int kho_retrieve_subtree(const char *name, phys_addr_t *phys)
1287 {
1288 	const void *fdt = kho_get_fdt();
1289 	const u64 *val;
1290 	int offset, len;
1291 
1292 	if (!fdt)
1293 		return -ENOENT;
1294 
1295 	if (!phys)
1296 		return -EINVAL;
1297 
1298 	offset = fdt_subnode_offset(fdt, 0, name);
1299 	if (offset < 0)
1300 		return -ENOENT;
1301 
1302 	val = fdt_getprop(fdt, offset, KHO_FDT_SUB_TREE_PROP_NAME, &len);
1303 	if (!val || len != sizeof(*val))
1304 		return -EINVAL;
1305 
1306 	*phys = (phys_addr_t)*val;
1307 
1308 	return 0;
1309 }
1310 EXPORT_SYMBOL_GPL(kho_retrieve_subtree);
1311 
1312 static int __init kho_mem_retrieve(const void *fdt)
1313 {
1314 	struct kho_radix_tree tree;
1315 	const phys_addr_t *mem;
1316 	int len;
1317 
1318 	/* Retrieve the KHO radix tree from passed-in FDT. */
1319 	mem = fdt_getprop(fdt, 0, KHO_FDT_MEMORY_MAP_PROP_NAME, &len);
1320 
1321 	if (!mem || len != sizeof(*mem)) {
1322 		pr_err("failed to get preserved KHO memory tree\n");
1323 		return -ENOENT;
1324 	}
1325 
1326 	if (!*mem)
1327 		return -EINVAL;
1328 
1329 	tree.root = phys_to_virt(*mem);
1330 	mutex_init(&tree.lock);
1331 	return kho_radix_walk_tree(&tree, kho_preserved_memory_reserve);
1332 }
1333 
1334 static __init int kho_out_fdt_setup(void)
1335 {
1336 	struct kho_radix_tree *tree = &kho_out.radix_tree;
1337 	void *root = kho_out.fdt;
1338 	u64 preserved_mem_tree_pa;
1339 	int err;
1340 
1341 	err = fdt_create(root, PAGE_SIZE);
1342 	err |= fdt_finish_reservemap(root);
1343 	err |= fdt_begin_node(root, "");
1344 	err |= fdt_property_string(root, "compatible", KHO_FDT_COMPATIBLE);
1345 
1346 	preserved_mem_tree_pa = virt_to_phys(tree->root);
1347 
1348 	err |= fdt_property(root, KHO_FDT_MEMORY_MAP_PROP_NAME,
1349 			    &preserved_mem_tree_pa,
1350 			    sizeof(preserved_mem_tree_pa));
1351 
1352 	err |= fdt_end_node(root);
1353 	err |= fdt_finish(root);
1354 
1355 	return err;
1356 }
1357 
1358 static __init int kho_init(void)
1359 {
1360 	struct kho_radix_tree *tree = &kho_out.radix_tree;
1361 	const void *fdt = kho_get_fdt();
1362 	int err = 0;
1363 
1364 	if (!kho_enable)
1365 		return 0;
1366 
1367 	tree->root = kzalloc(PAGE_SIZE, GFP_KERNEL);
1368 	if (!tree->root) {
1369 		err = -ENOMEM;
1370 		goto err_free_scratch;
1371 	}
1372 
1373 	kho_out.fdt = kho_alloc_preserve(PAGE_SIZE);
1374 	if (IS_ERR(kho_out.fdt)) {
1375 		err = PTR_ERR(kho_out.fdt);
1376 		goto err_free_kho_radix_tree_root;
1377 	}
1378 
1379 	err = kho_debugfs_init();
1380 	if (err)
1381 		goto err_free_fdt;
1382 
1383 	err = kho_out_debugfs_init(&kho_out.dbg);
1384 	if (err)
1385 		goto err_free_fdt;
1386 
1387 	err = kho_out_fdt_setup();
1388 	if (err)
1389 		goto err_free_fdt;
1390 
1391 	if (fdt) {
1392 		kho_in_debugfs_init(&kho_in.dbg, fdt);
1393 		return 0;
1394 	}
1395 
1396 	for (int i = 0; i < kho_scratch_cnt; i++) {
1397 		unsigned long base_pfn = PHYS_PFN(kho_scratch[i].addr);
1398 		unsigned long count = kho_scratch[i].size >> PAGE_SHIFT;
1399 		unsigned long pfn;
1400 
1401 		/*
1402 		 * When debug_pagealloc is enabled, __free_pages() clears the
1403 		 * corresponding PRESENT bit in the kernel page table.
1404 		 * Subsequent kmemleak scans of these pages cause the
1405 		 * non-PRESENT page faults.
1406 		 * Mark scratch areas with kmemleak_ignore_phys() to exclude
1407 		 * them from kmemleak scanning.
1408 		 */
1409 		kmemleak_ignore_phys(kho_scratch[i].addr);
1410 		for (pfn = base_pfn; pfn < base_pfn + count;
1411 		     pfn += pageblock_nr_pages)
1412 			init_cma_reserved_pageblock(pfn_to_page(pfn));
1413 	}
1414 
1415 	WARN_ON_ONCE(kho_debugfs_fdt_add(&kho_out.dbg, "fdt",
1416 					 kho_out.fdt, true));
1417 
1418 	return 0;
1419 
1420 err_free_fdt:
1421 	kho_unpreserve_free(kho_out.fdt);
1422 err_free_kho_radix_tree_root:
1423 	kfree(tree->root);
1424 	tree->root = NULL;
1425 err_free_scratch:
1426 	kho_out.fdt = NULL;
1427 	for (int i = 0; i < kho_scratch_cnt; i++) {
1428 		void *start = __va(kho_scratch[i].addr);
1429 		void *end = start + kho_scratch[i].size;
1430 
1431 		free_reserved_area(start, end, -1, "");
1432 	}
1433 	kho_enable = false;
1434 	return err;
1435 }
1436 fs_initcall(kho_init);
1437 
1438 static void __init kho_release_scratch(void)
1439 {
1440 	phys_addr_t start, end;
1441 	u64 i;
1442 
1443 	memmap_init_kho_scratch_pages();
1444 
1445 	/*
1446 	 * Mark scratch mem as CMA before we return it. That way we
1447 	 * ensure that no kernel allocations happen on it. That means
1448 	 * we can reuse it as scratch memory again later.
1449 	 */
1450 	__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
1451 			     MEMBLOCK_KHO_SCRATCH, &start, &end, NULL) {
1452 		ulong start_pfn = pageblock_start_pfn(PFN_DOWN(start));
1453 		ulong end_pfn = pageblock_align(PFN_UP(end));
1454 		ulong pfn;
1455 
1456 		for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages)
1457 			init_pageblock_migratetype(pfn_to_page(pfn),
1458 						   MIGRATE_CMA, false);
1459 	}
1460 }
1461 
1462 void __init kho_memory_init(void)
1463 {
1464 	if (kho_in.scratch_phys) {
1465 		kho_scratch = phys_to_virt(kho_in.scratch_phys);
1466 		kho_release_scratch();
1467 
1468 		if (kho_mem_retrieve(kho_get_fdt()))
1469 			kho_in.fdt_phys = 0;
1470 	} else {
1471 		kho_reserve_scratch();
1472 	}
1473 }
1474 
1475 void __init kho_populate(phys_addr_t fdt_phys, u64 fdt_len,
1476 			 phys_addr_t scratch_phys, u64 scratch_len)
1477 {
1478 	unsigned int scratch_cnt = scratch_len / sizeof(*kho_scratch);
1479 	struct kho_scratch *scratch = NULL;
1480 	phys_addr_t mem_map_phys;
1481 	void *fdt = NULL;
1482 	bool populated = false;
1483 	int err;
1484 
1485 	/* Validate the input FDT */
1486 	fdt = early_memremap(fdt_phys, fdt_len);
1487 	if (!fdt) {
1488 		pr_warn("setup: failed to memremap FDT (0x%llx)\n", fdt_phys);
1489 		goto report;
1490 	}
1491 	err = fdt_check_header(fdt);
1492 	if (err) {
1493 		pr_warn("setup: handover FDT (0x%llx) is invalid: %d\n",
1494 			fdt_phys, err);
1495 		goto unmap_fdt;
1496 	}
1497 	err = fdt_node_check_compatible(fdt, 0, KHO_FDT_COMPATIBLE);
1498 	if (err) {
1499 		pr_warn("setup: handover FDT (0x%llx) is incompatible with '%s': %d\n",
1500 			fdt_phys, KHO_FDT_COMPATIBLE, err);
1501 		goto unmap_fdt;
1502 	}
1503 
1504 	mem_map_phys = kho_get_mem_map_phys(fdt);
1505 	if (!mem_map_phys)
1506 		goto unmap_fdt;
1507 
1508 	scratch = early_memremap(scratch_phys, scratch_len);
1509 	if (!scratch) {
1510 		pr_warn("setup: failed to memremap scratch (phys=0x%llx, len=%lld)\n",
1511 			scratch_phys, scratch_len);
1512 		goto unmap_fdt;
1513 	}
1514 
1515 	/*
1516 	 * We pass a safe contiguous blocks of memory to use for early boot
1517 	 * purporses from the previous kernel so that we can resize the
1518 	 * memblock array as needed.
1519 	 */
1520 	for (int i = 0; i < scratch_cnt; i++) {
1521 		struct kho_scratch *area = &scratch[i];
1522 		u64 size = area->size;
1523 
1524 		memblock_add(area->addr, size);
1525 		err = memblock_mark_kho_scratch(area->addr, size);
1526 		if (err) {
1527 			pr_warn("failed to mark the scratch region 0x%pa+0x%pa: %pe",
1528 				&area->addr, &size, ERR_PTR(err));
1529 			goto unmap_scratch;
1530 		}
1531 		pr_debug("Marked 0x%pa+0x%pa as scratch", &area->addr, &size);
1532 	}
1533 
1534 	memblock_reserve(scratch_phys, scratch_len);
1535 
1536 	/*
1537 	 * Now that we have a viable region of scratch memory, let's tell
1538 	 * the memblocks allocator to only use that for any allocations.
1539 	 * That way we ensure that nothing scribbles over in use data while
1540 	 * we initialize the page tables which we will need to ingest all
1541 	 * memory reservations from the previous kernel.
1542 	 */
1543 	memblock_set_kho_scratch_only();
1544 
1545 	kho_in.fdt_phys = fdt_phys;
1546 	kho_in.scratch_phys = scratch_phys;
1547 	kho_scratch_cnt = scratch_cnt;
1548 
1549 	populated = true;
1550 	pr_info("found kexec handover data.\n");
1551 
1552 unmap_scratch:
1553 	early_memunmap(scratch, scratch_len);
1554 unmap_fdt:
1555 	early_memunmap(fdt, fdt_len);
1556 report:
1557 	if (!populated)
1558 		pr_warn("disabling KHO revival\n");
1559 }
1560 
1561 /* Helper functions for kexec_file_load */
1562 
1563 int kho_fill_kimage(struct kimage *image)
1564 {
1565 	ssize_t scratch_size;
1566 	int err = 0;
1567 	struct kexec_buf scratch;
1568 
1569 	if (!kho_enable)
1570 		return 0;
1571 
1572 	image->kho.fdt = virt_to_phys(kho_out.fdt);
1573 
1574 	scratch_size = sizeof(*kho_scratch) * kho_scratch_cnt;
1575 	scratch = (struct kexec_buf){
1576 		.image = image,
1577 		.buffer = kho_scratch,
1578 		.bufsz = scratch_size,
1579 		.mem = KEXEC_BUF_MEM_UNKNOWN,
1580 		.memsz = scratch_size,
1581 		.buf_align = SZ_64K, /* Makes it easier to map */
1582 		.buf_max = ULONG_MAX,
1583 		.top_down = true,
1584 	};
1585 	err = kexec_add_buffer(&scratch);
1586 	if (err)
1587 		return err;
1588 	image->kho.scratch = &image->segment[image->nr_segments - 1];
1589 
1590 	return 0;
1591 }
1592 
1593 static int kho_walk_scratch(struct kexec_buf *kbuf,
1594 			    int (*func)(struct resource *, void *))
1595 {
1596 	int ret = 0;
1597 	int i;
1598 
1599 	for (i = 0; i < kho_scratch_cnt; i++) {
1600 		struct resource res = {
1601 			.start = kho_scratch[i].addr,
1602 			.end = kho_scratch[i].addr + kho_scratch[i].size - 1,
1603 		};
1604 
1605 		/* Try to fit the kimage into our KHO scratch region */
1606 		ret = func(&res, kbuf);
1607 		if (ret)
1608 			break;
1609 	}
1610 
1611 	return ret;
1612 }
1613 
1614 int kho_locate_mem_hole(struct kexec_buf *kbuf,
1615 			int (*func)(struct resource *, void *))
1616 {
1617 	int ret;
1618 
1619 	if (!kho_enable || kbuf->image->type == KEXEC_TYPE_CRASH)
1620 		return 1;
1621 
1622 	ret = kho_walk_scratch(kbuf, func);
1623 
1624 	return ret == 1 ? 0 : -EADDRNOTAVAIL;
1625 }
1626