xref: /linux/kernel/liveupdate/kexec_handover.c (revision bf45794244ca1fb1c135754f36ff765eea01f9e6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kexec_handover.c - kexec handover metadata processing
4  * Copyright (C) 2023 Alexander Graf <graf@amazon.com>
5  * Copyright (C) 2025 Microsoft Corporation, Mike Rapoport <rppt@kernel.org>
6  * Copyright (C) 2025 Google LLC, Changyuan Lyu <changyuanl@google.com>
7  * Copyright (C) 2025 Pasha Tatashin <pasha.tatashin@soleen.com>
8  */
9 
10 #define pr_fmt(fmt) "KHO: " fmt
11 
12 #include <linux/cleanup.h>
13 #include <linux/cma.h>
14 #include <linux/kmemleak.h>
15 #include <linux/count_zeros.h>
16 #include <linux/kexec.h>
17 #include <linux/kexec_handover.h>
18 #include <linux/kho/abi/kexec_handover.h>
19 #include <linux/libfdt.h>
20 #include <linux/list.h>
21 #include <linux/memblock.h>
22 #include <linux/page-isolation.h>
23 #include <linux/unaligned.h>
24 #include <linux/vmalloc.h>
25 
26 #include <asm/early_ioremap.h>
27 
28 #include "kexec_handover_internal.h"
29 /*
30  * KHO is tightly coupled with mm init and needs access to some of mm
31  * internal APIs.
32  */
33 #include "../../mm/internal.h"
34 #include "../kexec_internal.h"
35 #include "kexec_handover_internal.h"
36 
37 /* The magic token for preserved pages */
38 #define KHO_PAGE_MAGIC 0x4b484f50U /* ASCII for 'KHOP' */
39 
40 /*
41  * KHO uses page->private, which is an unsigned long, to store page metadata.
42  * Use it to store both the magic and the order.
43  */
44 union kho_page_info {
45 	unsigned long page_private;
46 	struct {
47 		unsigned int order;
48 		unsigned int magic;
49 	};
50 };
51 
52 static_assert(sizeof(union kho_page_info) == sizeof(((struct page *)0)->private));
53 
54 static bool kho_enable __ro_after_init = IS_ENABLED(CONFIG_KEXEC_HANDOVER_ENABLE_DEFAULT);
55 
56 bool kho_is_enabled(void)
57 {
58 	return kho_enable;
59 }
60 EXPORT_SYMBOL_GPL(kho_is_enabled);
61 
62 static int __init kho_parse_enable(char *p)
63 {
64 	return kstrtobool(p, &kho_enable);
65 }
66 early_param("kho", kho_parse_enable);
67 
68 /*
69  * Keep track of memory that is to be preserved across KHO.
70  *
71  * The serializing side uses two levels of xarrays to manage chunks of per-order
72  * PAGE_SIZE byte bitmaps. For instance if PAGE_SIZE = 4096, the entire 1G order
73  * of a 8TB system would fit inside a single 4096 byte bitmap. For order 0
74  * allocations each bitmap will cover 128M of address space. Thus, for 16G of
75  * memory at most 512K of bitmap memory will be needed for order 0.
76  *
77  * This approach is fully incremental, as the serialization progresses folios
78  * can continue be aggregated to the tracker. The final step, immediately prior
79  * to kexec would serialize the xarray information into a linked list for the
80  * successor kernel to parse.
81  */
82 
83 #define PRESERVE_BITS (PAGE_SIZE * 8)
84 
85 struct kho_mem_phys_bits {
86 	DECLARE_BITMAP(preserve, PRESERVE_BITS);
87 };
88 
89 static_assert(sizeof(struct kho_mem_phys_bits) == PAGE_SIZE);
90 
91 struct kho_mem_phys {
92 	/*
93 	 * Points to kho_mem_phys_bits, a sparse bitmap array. Each bit is sized
94 	 * to order.
95 	 */
96 	struct xarray phys_bits;
97 };
98 
99 struct kho_mem_track {
100 	/* Points to kho_mem_phys, each order gets its own bitmap tree */
101 	struct xarray orders;
102 };
103 
104 struct khoser_mem_chunk;
105 
106 struct kho_out {
107 	void *fdt;
108 	bool finalized;
109 	struct mutex lock; /* protects KHO FDT finalization */
110 
111 	struct kho_mem_track track;
112 	struct kho_debugfs dbg;
113 };
114 
115 static struct kho_out kho_out = {
116 	.lock = __MUTEX_INITIALIZER(kho_out.lock),
117 	.track = {
118 		.orders = XARRAY_INIT(kho_out.track.orders, 0),
119 	},
120 	.finalized = false,
121 };
122 
123 static void *xa_load_or_alloc(struct xarray *xa, unsigned long index)
124 {
125 	void *res = xa_load(xa, index);
126 
127 	if (res)
128 		return res;
129 
130 	void *elm __free(free_page) = (void *)get_zeroed_page(GFP_KERNEL);
131 
132 	if (!elm)
133 		return ERR_PTR(-ENOMEM);
134 
135 	if (WARN_ON(kho_scratch_overlap(virt_to_phys(elm), PAGE_SIZE)))
136 		return ERR_PTR(-EINVAL);
137 
138 	res = xa_cmpxchg(xa, index, NULL, elm, GFP_KERNEL);
139 	if (xa_is_err(res))
140 		return ERR_PTR(xa_err(res));
141 	else if (res)
142 		return res;
143 
144 	return no_free_ptr(elm);
145 }
146 
147 static void __kho_unpreserve_order(struct kho_mem_track *track, unsigned long pfn,
148 				   unsigned int order)
149 {
150 	struct kho_mem_phys_bits *bits;
151 	struct kho_mem_phys *physxa;
152 	const unsigned long pfn_high = pfn >> order;
153 
154 	physxa = xa_load(&track->orders, order);
155 	if (WARN_ON_ONCE(!physxa))
156 		return;
157 
158 	bits = xa_load(&physxa->phys_bits, pfn_high / PRESERVE_BITS);
159 	if (WARN_ON_ONCE(!bits))
160 		return;
161 
162 	clear_bit(pfn_high % PRESERVE_BITS, bits->preserve);
163 }
164 
165 static void __kho_unpreserve(struct kho_mem_track *track, unsigned long pfn,
166 			     unsigned long end_pfn)
167 {
168 	unsigned int order;
169 
170 	while (pfn < end_pfn) {
171 		order = min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
172 
173 		__kho_unpreserve_order(track, pfn, order);
174 
175 		pfn += 1 << order;
176 	}
177 }
178 
179 static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
180 				unsigned int order)
181 {
182 	struct kho_mem_phys_bits *bits;
183 	struct kho_mem_phys *physxa, *new_physxa;
184 	const unsigned long pfn_high = pfn >> order;
185 
186 	might_sleep();
187 	physxa = xa_load(&track->orders, order);
188 	if (!physxa) {
189 		int err;
190 
191 		new_physxa = kzalloc(sizeof(*physxa), GFP_KERNEL);
192 		if (!new_physxa)
193 			return -ENOMEM;
194 
195 		xa_init(&new_physxa->phys_bits);
196 		physxa = xa_cmpxchg(&track->orders, order, NULL, new_physxa,
197 				    GFP_KERNEL);
198 
199 		err = xa_err(physxa);
200 		if (err || physxa) {
201 			xa_destroy(&new_physxa->phys_bits);
202 			kfree(new_physxa);
203 
204 			if (err)
205 				return err;
206 		} else {
207 			physxa = new_physxa;
208 		}
209 	}
210 
211 	bits = xa_load_or_alloc(&physxa->phys_bits, pfn_high / PRESERVE_BITS);
212 	if (IS_ERR(bits))
213 		return PTR_ERR(bits);
214 
215 	set_bit(pfn_high % PRESERVE_BITS, bits->preserve);
216 
217 	return 0;
218 }
219 
220 static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
221 {
222 	struct page *page = pfn_to_online_page(PHYS_PFN(phys));
223 	unsigned int nr_pages, ref_cnt;
224 	union kho_page_info info;
225 
226 	if (!page)
227 		return NULL;
228 
229 	info.page_private = page->private;
230 	/*
231 	 * deserialize_bitmap() only sets the magic on the head page. This magic
232 	 * check also implicitly makes sure phys is order-aligned since for
233 	 * non-order-aligned phys addresses, magic will never be set.
234 	 */
235 	if (WARN_ON_ONCE(info.magic != KHO_PAGE_MAGIC || info.order > MAX_PAGE_ORDER))
236 		return NULL;
237 	nr_pages = (1 << info.order);
238 
239 	/* Clear private to make sure later restores on this page error out. */
240 	page->private = 0;
241 	/* Head page gets refcount of 1. */
242 	set_page_count(page, 1);
243 
244 	/*
245 	 * For higher order folios, tail pages get a page count of zero.
246 	 * For physically contiguous order-0 pages every pages gets a page
247 	 * count of 1
248 	 */
249 	ref_cnt = is_folio ? 0 : 1;
250 	for (unsigned int i = 1; i < nr_pages; i++)
251 		set_page_count(page + i, ref_cnt);
252 
253 	if (is_folio && info.order)
254 		prep_compound_page(page, info.order);
255 
256 	adjust_managed_page_count(page, nr_pages);
257 	return page;
258 }
259 
260 /**
261  * kho_restore_folio - recreates the folio from the preserved memory.
262  * @phys: physical address of the folio.
263  *
264  * Return: pointer to the struct folio on success, NULL on failure.
265  */
266 struct folio *kho_restore_folio(phys_addr_t phys)
267 {
268 	struct page *page = kho_restore_page(phys, true);
269 
270 	return page ? page_folio(page) : NULL;
271 }
272 EXPORT_SYMBOL_GPL(kho_restore_folio);
273 
274 /**
275  * kho_restore_pages - restore list of contiguous order 0 pages.
276  * @phys: physical address of the first page.
277  * @nr_pages: number of pages.
278  *
279  * Restore a contiguous list of order 0 pages that was preserved with
280  * kho_preserve_pages().
281  *
282  * Return: 0 on success, error code on failure
283  */
284 struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages)
285 {
286 	const unsigned long start_pfn = PHYS_PFN(phys);
287 	const unsigned long end_pfn = start_pfn + nr_pages;
288 	unsigned long pfn = start_pfn;
289 
290 	while (pfn < end_pfn) {
291 		const unsigned int order =
292 			min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
293 		struct page *page = kho_restore_page(PFN_PHYS(pfn), false);
294 
295 		if (!page)
296 			return NULL;
297 		pfn += 1 << order;
298 	}
299 
300 	return pfn_to_page(start_pfn);
301 }
302 EXPORT_SYMBOL_GPL(kho_restore_pages);
303 
304 /* Serialize and deserialize struct kho_mem_phys across kexec
305  *
306  * Record all the bitmaps in a linked list of pages for the next kernel to
307  * process. Each chunk holds bitmaps of the same order and each block of bitmaps
308  * starts at a given physical address. This allows the bitmaps to be sparse. The
309  * xarray is used to store them in a tree while building up the data structure,
310  * but the KHO successor kernel only needs to process them once in order.
311  *
312  * All of this memory is normal kmalloc() memory and is not marked for
313  * preservation. The successor kernel will remain isolated to the scratch space
314  * until it completes processing this list. Once processed all the memory
315  * storing these ranges will be marked as free.
316  */
317 
318 struct khoser_mem_bitmap_ptr {
319 	phys_addr_t phys_start;
320 	DECLARE_KHOSER_PTR(bitmap, struct kho_mem_phys_bits *);
321 };
322 
323 struct khoser_mem_chunk_hdr {
324 	DECLARE_KHOSER_PTR(next, struct khoser_mem_chunk *);
325 	unsigned int order;
326 	unsigned int num_elms;
327 };
328 
329 #define KHOSER_BITMAP_SIZE                                   \
330 	((PAGE_SIZE - sizeof(struct khoser_mem_chunk_hdr)) / \
331 	 sizeof(struct khoser_mem_bitmap_ptr))
332 
333 struct khoser_mem_chunk {
334 	struct khoser_mem_chunk_hdr hdr;
335 	struct khoser_mem_bitmap_ptr bitmaps[KHOSER_BITMAP_SIZE];
336 };
337 
338 static_assert(sizeof(struct khoser_mem_chunk) == PAGE_SIZE);
339 
340 static struct khoser_mem_chunk *new_chunk(struct khoser_mem_chunk *cur_chunk,
341 					  unsigned long order)
342 {
343 	struct khoser_mem_chunk *chunk __free(free_page) = NULL;
344 
345 	chunk = (void *)get_zeroed_page(GFP_KERNEL);
346 	if (!chunk)
347 		return ERR_PTR(-ENOMEM);
348 
349 	if (WARN_ON(kho_scratch_overlap(virt_to_phys(chunk), PAGE_SIZE)))
350 		return ERR_PTR(-EINVAL);
351 
352 	chunk->hdr.order = order;
353 	if (cur_chunk)
354 		KHOSER_STORE_PTR(cur_chunk->hdr.next, chunk);
355 	return no_free_ptr(chunk);
356 }
357 
358 static void kho_mem_ser_free(struct khoser_mem_chunk *first_chunk)
359 {
360 	struct khoser_mem_chunk *chunk = first_chunk;
361 
362 	while (chunk) {
363 		struct khoser_mem_chunk *tmp = chunk;
364 
365 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
366 		free_page((unsigned long)tmp);
367 	}
368 }
369 
370 /*
371  *  Update memory map property, if old one is found discard it via
372  *  kho_mem_ser_free().
373  */
374 static void kho_update_memory_map(struct khoser_mem_chunk *first_chunk)
375 {
376 	void *ptr;
377 	u64 phys;
378 
379 	ptr = fdt_getprop_w(kho_out.fdt, 0, KHO_FDT_MEMORY_MAP_PROP_NAME, NULL);
380 
381 	/* Check and discard previous memory map */
382 	phys = get_unaligned((u64 *)ptr);
383 	if (phys)
384 		kho_mem_ser_free((struct khoser_mem_chunk *)phys_to_virt(phys));
385 
386 	/* Update with the new value */
387 	phys = first_chunk ? (u64)virt_to_phys(first_chunk) : 0;
388 	put_unaligned(phys, (u64 *)ptr);
389 }
390 
391 static int kho_mem_serialize(struct kho_out *kho_out)
392 {
393 	struct khoser_mem_chunk *first_chunk = NULL;
394 	struct khoser_mem_chunk *chunk = NULL;
395 	struct kho_mem_phys *physxa;
396 	unsigned long order;
397 	int err = -ENOMEM;
398 
399 	xa_for_each(&kho_out->track.orders, order, physxa) {
400 		struct kho_mem_phys_bits *bits;
401 		unsigned long phys;
402 
403 		chunk = new_chunk(chunk, order);
404 		if (IS_ERR(chunk)) {
405 			err = PTR_ERR(chunk);
406 			goto err_free;
407 		}
408 
409 		if (!first_chunk)
410 			first_chunk = chunk;
411 
412 		xa_for_each(&physxa->phys_bits, phys, bits) {
413 			struct khoser_mem_bitmap_ptr *elm;
414 
415 			if (chunk->hdr.num_elms == ARRAY_SIZE(chunk->bitmaps)) {
416 				chunk = new_chunk(chunk, order);
417 				if (IS_ERR(chunk)) {
418 					err = PTR_ERR(chunk);
419 					goto err_free;
420 				}
421 			}
422 
423 			elm = &chunk->bitmaps[chunk->hdr.num_elms];
424 			chunk->hdr.num_elms++;
425 			elm->phys_start = (phys * PRESERVE_BITS)
426 					  << (order + PAGE_SHIFT);
427 			KHOSER_STORE_PTR(elm->bitmap, bits);
428 		}
429 	}
430 
431 	kho_update_memory_map(first_chunk);
432 
433 	return 0;
434 
435 err_free:
436 	kho_mem_ser_free(first_chunk);
437 	return err;
438 }
439 
440 static void __init deserialize_bitmap(unsigned int order,
441 				      struct khoser_mem_bitmap_ptr *elm)
442 {
443 	struct kho_mem_phys_bits *bitmap = KHOSER_LOAD_PTR(elm->bitmap);
444 	unsigned long bit;
445 
446 	for_each_set_bit(bit, bitmap->preserve, PRESERVE_BITS) {
447 		int sz = 1 << (order + PAGE_SHIFT);
448 		phys_addr_t phys =
449 			elm->phys_start + (bit << (order + PAGE_SHIFT));
450 		struct page *page = phys_to_page(phys);
451 		union kho_page_info info;
452 
453 		memblock_reserve(phys, sz);
454 		memblock_reserved_mark_noinit(phys, sz);
455 		info.magic = KHO_PAGE_MAGIC;
456 		info.order = order;
457 		page->private = info.page_private;
458 	}
459 }
460 
461 /* Returns physical address of the preserved memory map from FDT */
462 static phys_addr_t __init kho_get_mem_map_phys(const void *fdt)
463 {
464 	const void *mem_ptr;
465 	int len;
466 
467 	mem_ptr = fdt_getprop(fdt, 0, KHO_FDT_MEMORY_MAP_PROP_NAME, &len);
468 	if (!mem_ptr || len != sizeof(u64)) {
469 		pr_err("failed to get preserved memory bitmaps\n");
470 		return 0;
471 	}
472 
473 	return get_unaligned((const u64 *)mem_ptr);
474 }
475 
476 static void __init kho_mem_deserialize(struct khoser_mem_chunk *chunk)
477 {
478 	while (chunk) {
479 		unsigned int i;
480 
481 		for (i = 0; i != chunk->hdr.num_elms; i++)
482 			deserialize_bitmap(chunk->hdr.order,
483 					   &chunk->bitmaps[i]);
484 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
485 	}
486 }
487 
488 /*
489  * With KHO enabled, memory can become fragmented because KHO regions may
490  * be anywhere in physical address space. The scratch regions give us a
491  * safe zones that we will never see KHO allocations from. This is where we
492  * can later safely load our new kexec images into and then use the scratch
493  * area for early allocations that happen before page allocator is
494  * initialized.
495  */
496 struct kho_scratch *kho_scratch;
497 unsigned int kho_scratch_cnt;
498 
499 /*
500  * The scratch areas are scaled by default as percent of memory allocated from
501  * memblock. A user can override the scale with command line parameter:
502  *
503  * kho_scratch=N%
504  *
505  * It is also possible to explicitly define size for a lowmem, a global and
506  * per-node scratch areas:
507  *
508  * kho_scratch=l[KMG],n[KMG],m[KMG]
509  *
510  * The explicit size definition takes precedence over scale definition.
511  */
512 static unsigned int scratch_scale __initdata = 200;
513 static phys_addr_t scratch_size_global __initdata;
514 static phys_addr_t scratch_size_pernode __initdata;
515 static phys_addr_t scratch_size_lowmem __initdata;
516 
517 static int __init kho_parse_scratch_size(char *p)
518 {
519 	size_t len;
520 	unsigned long sizes[3];
521 	size_t total_size = 0;
522 	int i;
523 
524 	if (!p)
525 		return -EINVAL;
526 
527 	len = strlen(p);
528 	if (!len)
529 		return -EINVAL;
530 
531 	/* parse nn% */
532 	if (p[len - 1] == '%') {
533 		/* unsigned int max is 4,294,967,295, 10 chars */
534 		char s_scale[11] = {};
535 		int ret = 0;
536 
537 		if (len > ARRAY_SIZE(s_scale))
538 			return -EINVAL;
539 
540 		memcpy(s_scale, p, len - 1);
541 		ret = kstrtouint(s_scale, 10, &scratch_scale);
542 		if (!ret)
543 			pr_notice("scratch scale is %d%%\n", scratch_scale);
544 		return ret;
545 	}
546 
547 	/* parse ll[KMG],mm[KMG],nn[KMG] */
548 	for (i = 0; i < ARRAY_SIZE(sizes); i++) {
549 		char *endp = p;
550 
551 		if (i > 0) {
552 			if (*p != ',')
553 				return -EINVAL;
554 			p += 1;
555 		}
556 
557 		sizes[i] = memparse(p, &endp);
558 		if (endp == p)
559 			return -EINVAL;
560 		p = endp;
561 		total_size += sizes[i];
562 	}
563 
564 	if (!total_size)
565 		return -EINVAL;
566 
567 	/* The string should be fully consumed by now. */
568 	if (*p)
569 		return -EINVAL;
570 
571 	scratch_size_lowmem = sizes[0];
572 	scratch_size_global = sizes[1];
573 	scratch_size_pernode = sizes[2];
574 	scratch_scale = 0;
575 
576 	pr_notice("scratch areas: lowmem: %lluMiB global: %lluMiB pernode: %lldMiB\n",
577 		  (u64)(scratch_size_lowmem >> 20),
578 		  (u64)(scratch_size_global >> 20),
579 		  (u64)(scratch_size_pernode >> 20));
580 
581 	return 0;
582 }
583 early_param("kho_scratch", kho_parse_scratch_size);
584 
585 static void __init scratch_size_update(void)
586 {
587 	phys_addr_t size;
588 
589 	if (!scratch_scale)
590 		return;
591 
592 	size = memblock_reserved_kern_size(ARCH_LOW_ADDRESS_LIMIT,
593 					   NUMA_NO_NODE);
594 	size = size * scratch_scale / 100;
595 	scratch_size_lowmem = round_up(size, CMA_MIN_ALIGNMENT_BYTES);
596 
597 	size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE,
598 					   NUMA_NO_NODE);
599 	size = size * scratch_scale / 100 - scratch_size_lowmem;
600 	scratch_size_global = round_up(size, CMA_MIN_ALIGNMENT_BYTES);
601 }
602 
603 static phys_addr_t __init scratch_size_node(int nid)
604 {
605 	phys_addr_t size;
606 
607 	if (scratch_scale) {
608 		size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE,
609 						   nid);
610 		size = size * scratch_scale / 100;
611 	} else {
612 		size = scratch_size_pernode;
613 	}
614 
615 	return round_up(size, CMA_MIN_ALIGNMENT_BYTES);
616 }
617 
618 /**
619  * kho_reserve_scratch - Reserve a contiguous chunk of memory for kexec
620  *
621  * With KHO we can preserve arbitrary pages in the system. To ensure we still
622  * have a large contiguous region of memory when we search the physical address
623  * space for target memory, let's make sure we always have a large CMA region
624  * active. This CMA region will only be used for movable pages which are not a
625  * problem for us during KHO because we can just move them somewhere else.
626  */
627 static void __init kho_reserve_scratch(void)
628 {
629 	phys_addr_t addr, size;
630 	int nid, i = 0;
631 
632 	if (!kho_enable)
633 		return;
634 
635 	scratch_size_update();
636 
637 	/* FIXME: deal with node hot-plug/remove */
638 	kho_scratch_cnt = num_online_nodes() + 2;
639 	size = kho_scratch_cnt * sizeof(*kho_scratch);
640 	kho_scratch = memblock_alloc(size, PAGE_SIZE);
641 	if (!kho_scratch)
642 		goto err_disable_kho;
643 
644 	/*
645 	 * reserve scratch area in low memory for lowmem allocations in the
646 	 * next kernel
647 	 */
648 	size = scratch_size_lowmem;
649 	addr = memblock_phys_alloc_range(size, CMA_MIN_ALIGNMENT_BYTES, 0,
650 					 ARCH_LOW_ADDRESS_LIMIT);
651 	if (!addr)
652 		goto err_free_scratch_desc;
653 
654 	kho_scratch[i].addr = addr;
655 	kho_scratch[i].size = size;
656 	i++;
657 
658 	/* reserve large contiguous area for allocations without nid */
659 	size = scratch_size_global;
660 	addr = memblock_phys_alloc(size, CMA_MIN_ALIGNMENT_BYTES);
661 	if (!addr)
662 		goto err_free_scratch_areas;
663 
664 	kho_scratch[i].addr = addr;
665 	kho_scratch[i].size = size;
666 	i++;
667 
668 	for_each_online_node(nid) {
669 		size = scratch_size_node(nid);
670 		addr = memblock_alloc_range_nid(size, CMA_MIN_ALIGNMENT_BYTES,
671 						0, MEMBLOCK_ALLOC_ACCESSIBLE,
672 						nid, true);
673 		if (!addr)
674 			goto err_free_scratch_areas;
675 
676 		kho_scratch[i].addr = addr;
677 		kho_scratch[i].size = size;
678 		i++;
679 	}
680 
681 	return;
682 
683 err_free_scratch_areas:
684 	for (i--; i >= 0; i--)
685 		memblock_phys_free(kho_scratch[i].addr, kho_scratch[i].size);
686 err_free_scratch_desc:
687 	memblock_free(kho_scratch, kho_scratch_cnt * sizeof(*kho_scratch));
688 err_disable_kho:
689 	pr_warn("Failed to reserve scratch area, disabling kexec handover\n");
690 	kho_enable = false;
691 }
692 
693 /**
694  * kho_add_subtree - record the physical address of a sub FDT in KHO root tree.
695  * @name: name of the sub tree.
696  * @fdt: the sub tree blob.
697  *
698  * Creates a new child node named @name in KHO root FDT and records
699  * the physical address of @fdt. The pages of @fdt must also be preserved
700  * by KHO for the new kernel to retrieve it after kexec.
701  *
702  * A debugfs blob entry is also created at
703  * ``/sys/kernel/debug/kho/out/sub_fdts/@name`` when kernel is configured with
704  * CONFIG_KEXEC_HANDOVER_DEBUGFS
705  *
706  * Return: 0 on success, error code on failure
707  */
708 int kho_add_subtree(const char *name, void *fdt)
709 {
710 	phys_addr_t phys = virt_to_phys(fdt);
711 	void *root_fdt = kho_out.fdt;
712 	int err = -ENOMEM;
713 	int off, fdt_err;
714 
715 	guard(mutex)(&kho_out.lock);
716 
717 	fdt_err = fdt_open_into(root_fdt, root_fdt, PAGE_SIZE);
718 	if (fdt_err < 0)
719 		return err;
720 
721 	off = fdt_add_subnode(root_fdt, 0, name);
722 	if (off < 0) {
723 		if (off == -FDT_ERR_EXISTS)
724 			err = -EEXIST;
725 		goto out_pack;
726 	}
727 
728 	err = fdt_setprop(root_fdt, off, KHO_FDT_SUB_TREE_PROP_NAME,
729 			  &phys, sizeof(phys));
730 	if (err < 0)
731 		goto out_pack;
732 
733 	WARN_ON_ONCE(kho_debugfs_fdt_add(&kho_out.dbg, name, fdt, false));
734 
735 out_pack:
736 	fdt_pack(root_fdt);
737 
738 	return err;
739 }
740 EXPORT_SYMBOL_GPL(kho_add_subtree);
741 
742 void kho_remove_subtree(void *fdt)
743 {
744 	phys_addr_t target_phys = virt_to_phys(fdt);
745 	void *root_fdt = kho_out.fdt;
746 	int off;
747 	int err;
748 
749 	guard(mutex)(&kho_out.lock);
750 
751 	err = fdt_open_into(root_fdt, root_fdt, PAGE_SIZE);
752 	if (err < 0)
753 		return;
754 
755 	for (off = fdt_first_subnode(root_fdt, 0); off >= 0;
756 	     off = fdt_next_subnode(root_fdt, off)) {
757 		const u64 *val;
758 		int len;
759 
760 		val = fdt_getprop(root_fdt, off, KHO_FDT_SUB_TREE_PROP_NAME, &len);
761 		if (!val || len != sizeof(phys_addr_t))
762 			continue;
763 
764 		if ((phys_addr_t)*val == target_phys) {
765 			fdt_del_node(root_fdt, off);
766 			kho_debugfs_fdt_remove(&kho_out.dbg, fdt);
767 			break;
768 		}
769 	}
770 
771 	fdt_pack(root_fdt);
772 }
773 EXPORT_SYMBOL_GPL(kho_remove_subtree);
774 
775 /**
776  * kho_preserve_folio - preserve a folio across kexec.
777  * @folio: folio to preserve.
778  *
779  * Instructs KHO to preserve the whole folio across kexec. The order
780  * will be preserved as well.
781  *
782  * Return: 0 on success, error code on failure
783  */
784 int kho_preserve_folio(struct folio *folio)
785 {
786 	const unsigned long pfn = folio_pfn(folio);
787 	const unsigned int order = folio_order(folio);
788 	struct kho_mem_track *track = &kho_out.track;
789 
790 	if (WARN_ON(kho_scratch_overlap(pfn << PAGE_SHIFT, PAGE_SIZE << order)))
791 		return -EINVAL;
792 
793 	return __kho_preserve_order(track, pfn, order);
794 }
795 EXPORT_SYMBOL_GPL(kho_preserve_folio);
796 
797 /**
798  * kho_unpreserve_folio - unpreserve a folio.
799  * @folio: folio to unpreserve.
800  *
801  * Instructs KHO to unpreserve a folio that was preserved by
802  * kho_preserve_folio() before. The provided @folio (pfn and order)
803  * must exactly match a previously preserved folio.
804  */
805 void kho_unpreserve_folio(struct folio *folio)
806 {
807 	const unsigned long pfn = folio_pfn(folio);
808 	const unsigned int order = folio_order(folio);
809 	struct kho_mem_track *track = &kho_out.track;
810 
811 	__kho_unpreserve_order(track, pfn, order);
812 }
813 EXPORT_SYMBOL_GPL(kho_unpreserve_folio);
814 
815 /**
816  * kho_preserve_pages - preserve contiguous pages across kexec
817  * @page: first page in the list.
818  * @nr_pages: number of pages.
819  *
820  * Preserve a contiguous list of order 0 pages. Must be restored using
821  * kho_restore_pages() to ensure the pages are restored properly as order 0.
822  *
823  * Return: 0 on success, error code on failure
824  */
825 int kho_preserve_pages(struct page *page, unsigned int nr_pages)
826 {
827 	struct kho_mem_track *track = &kho_out.track;
828 	const unsigned long start_pfn = page_to_pfn(page);
829 	const unsigned long end_pfn = start_pfn + nr_pages;
830 	unsigned long pfn = start_pfn;
831 	unsigned long failed_pfn = 0;
832 	int err = 0;
833 
834 	if (WARN_ON(kho_scratch_overlap(start_pfn << PAGE_SHIFT,
835 					nr_pages << PAGE_SHIFT))) {
836 		return -EINVAL;
837 	}
838 
839 	while (pfn < end_pfn) {
840 		const unsigned int order =
841 			min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
842 
843 		err = __kho_preserve_order(track, pfn, order);
844 		if (err) {
845 			failed_pfn = pfn;
846 			break;
847 		}
848 
849 		pfn += 1 << order;
850 	}
851 
852 	if (err)
853 		__kho_unpreserve(track, start_pfn, failed_pfn);
854 
855 	return err;
856 }
857 EXPORT_SYMBOL_GPL(kho_preserve_pages);
858 
859 /**
860  * kho_unpreserve_pages - unpreserve contiguous pages.
861  * @page: first page in the list.
862  * @nr_pages: number of pages.
863  *
864  * Instructs KHO to unpreserve @nr_pages contiguous pages starting from @page.
865  * This must be called with the same @page and @nr_pages as the corresponding
866  * kho_preserve_pages() call. Unpreserving arbitrary sub-ranges of larger
867  * preserved blocks is not supported.
868  */
869 void kho_unpreserve_pages(struct page *page, unsigned int nr_pages)
870 {
871 	struct kho_mem_track *track = &kho_out.track;
872 	const unsigned long start_pfn = page_to_pfn(page);
873 	const unsigned long end_pfn = start_pfn + nr_pages;
874 
875 	__kho_unpreserve(track, start_pfn, end_pfn);
876 }
877 EXPORT_SYMBOL_GPL(kho_unpreserve_pages);
878 
879 /* vmalloc flags KHO supports */
880 #define KHO_VMALLOC_SUPPORTED_FLAGS	(VM_ALLOC | VM_ALLOW_HUGE_VMAP)
881 
882 /* KHO internal flags for vmalloc preservations */
883 #define KHO_VMALLOC_ALLOC	0x0001
884 #define KHO_VMALLOC_HUGE_VMAP	0x0002
885 
886 static unsigned short vmalloc_flags_to_kho(unsigned int vm_flags)
887 {
888 	unsigned short kho_flags = 0;
889 
890 	if (vm_flags & VM_ALLOC)
891 		kho_flags |= KHO_VMALLOC_ALLOC;
892 	if (vm_flags & VM_ALLOW_HUGE_VMAP)
893 		kho_flags |= KHO_VMALLOC_HUGE_VMAP;
894 
895 	return kho_flags;
896 }
897 
898 static unsigned int kho_flags_to_vmalloc(unsigned short kho_flags)
899 {
900 	unsigned int vm_flags = 0;
901 
902 	if (kho_flags & KHO_VMALLOC_ALLOC)
903 		vm_flags |= VM_ALLOC;
904 	if (kho_flags & KHO_VMALLOC_HUGE_VMAP)
905 		vm_flags |= VM_ALLOW_HUGE_VMAP;
906 
907 	return vm_flags;
908 }
909 
910 static struct kho_vmalloc_chunk *new_vmalloc_chunk(struct kho_vmalloc_chunk *cur)
911 {
912 	struct kho_vmalloc_chunk *chunk;
913 	int err;
914 
915 	chunk = (struct kho_vmalloc_chunk *)get_zeroed_page(GFP_KERNEL);
916 	if (!chunk)
917 		return NULL;
918 
919 	err = kho_preserve_pages(virt_to_page(chunk), 1);
920 	if (err)
921 		goto err_free;
922 	if (cur)
923 		KHOSER_STORE_PTR(cur->hdr.next, chunk);
924 	return chunk;
925 
926 err_free:
927 	free_page((unsigned long)chunk);
928 	return NULL;
929 }
930 
931 static void kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk *chunk,
932 					 unsigned short order)
933 {
934 	struct kho_mem_track *track = &kho_out.track;
935 	unsigned long pfn = PHYS_PFN(virt_to_phys(chunk));
936 
937 	__kho_unpreserve(track, pfn, pfn + 1);
938 
939 	for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) {
940 		pfn = PHYS_PFN(chunk->phys[i]);
941 		__kho_unpreserve(track, pfn, pfn + (1 << order));
942 	}
943 }
944 
945 /**
946  * kho_preserve_vmalloc - preserve memory allocated with vmalloc() across kexec
947  * @ptr: pointer to the area in vmalloc address space
948  * @preservation: placeholder for preservation metadata
949  *
950  * Instructs KHO to preserve the area in vmalloc address space at @ptr. The
951  * physical pages mapped at @ptr will be preserved and on successful return
952  * @preservation will hold the physical address of a structure that describes
953  * the preservation.
954  *
955  * NOTE: The memory allocated with vmalloc_node() variants cannot be reliably
956  * restored on the same node
957  *
958  * Return: 0 on success, error code on failure
959  */
960 int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation)
961 {
962 	struct kho_vmalloc_chunk *chunk;
963 	struct vm_struct *vm = find_vm_area(ptr);
964 	unsigned int order, flags, nr_contig_pages;
965 	unsigned int idx = 0;
966 	int err;
967 
968 	if (!vm)
969 		return -EINVAL;
970 
971 	if (vm->flags & ~KHO_VMALLOC_SUPPORTED_FLAGS)
972 		return -EOPNOTSUPP;
973 
974 	flags = vmalloc_flags_to_kho(vm->flags);
975 	order = get_vm_area_page_order(vm);
976 
977 	chunk = new_vmalloc_chunk(NULL);
978 	if (!chunk)
979 		return -ENOMEM;
980 	KHOSER_STORE_PTR(preservation->first, chunk);
981 
982 	nr_contig_pages = (1 << order);
983 	for (int i = 0; i < vm->nr_pages; i += nr_contig_pages) {
984 		phys_addr_t phys = page_to_phys(vm->pages[i]);
985 
986 		err = kho_preserve_pages(vm->pages[i], nr_contig_pages);
987 		if (err)
988 			goto err_free;
989 
990 		chunk->phys[idx++] = phys;
991 		if (idx == ARRAY_SIZE(chunk->phys)) {
992 			chunk = new_vmalloc_chunk(chunk);
993 			if (!chunk)
994 				goto err_free;
995 			idx = 0;
996 		}
997 	}
998 
999 	preservation->total_pages = vm->nr_pages;
1000 	preservation->flags = flags;
1001 	preservation->order = order;
1002 
1003 	return 0;
1004 
1005 err_free:
1006 	kho_unpreserve_vmalloc(preservation);
1007 	return err;
1008 }
1009 EXPORT_SYMBOL_GPL(kho_preserve_vmalloc);
1010 
1011 /**
1012  * kho_unpreserve_vmalloc - unpreserve memory allocated with vmalloc()
1013  * @preservation: preservation metadata returned by kho_preserve_vmalloc()
1014  *
1015  * Instructs KHO to unpreserve the area in vmalloc address space that was
1016  * previously preserved with kho_preserve_vmalloc().
1017  */
1018 void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation)
1019 {
1020 	struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first);
1021 
1022 	while (chunk) {
1023 		struct kho_vmalloc_chunk *tmp = chunk;
1024 
1025 		kho_vmalloc_unpreserve_chunk(chunk, preservation->order);
1026 
1027 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
1028 		free_page((unsigned long)tmp);
1029 	}
1030 }
1031 EXPORT_SYMBOL_GPL(kho_unpreserve_vmalloc);
1032 
1033 /**
1034  * kho_restore_vmalloc - recreates and populates an area in vmalloc address
1035  * space from the preserved memory.
1036  * @preservation: preservation metadata.
1037  *
1038  * Recreates an area in vmalloc address space and populates it with memory that
1039  * was preserved using kho_preserve_vmalloc().
1040  *
1041  * Return: pointer to the area in the vmalloc address space, NULL on failure.
1042  */
1043 void *kho_restore_vmalloc(const struct kho_vmalloc *preservation)
1044 {
1045 	struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first);
1046 	unsigned int align, order, shift, vm_flags;
1047 	unsigned long total_pages, contig_pages;
1048 	unsigned long addr, size;
1049 	struct vm_struct *area;
1050 	struct page **pages;
1051 	unsigned int idx = 0;
1052 	int err;
1053 
1054 	vm_flags = kho_flags_to_vmalloc(preservation->flags);
1055 	if (vm_flags & ~KHO_VMALLOC_SUPPORTED_FLAGS)
1056 		return NULL;
1057 
1058 	total_pages = preservation->total_pages;
1059 	pages = kvmalloc_array(total_pages, sizeof(*pages), GFP_KERNEL);
1060 	if (!pages)
1061 		return NULL;
1062 	order = preservation->order;
1063 	contig_pages = (1 << order);
1064 	shift = PAGE_SHIFT + order;
1065 	align = 1 << shift;
1066 
1067 	while (chunk) {
1068 		struct page *page;
1069 
1070 		for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) {
1071 			phys_addr_t phys = chunk->phys[i];
1072 
1073 			if (idx + contig_pages > total_pages)
1074 				goto err_free_pages_array;
1075 
1076 			page = kho_restore_pages(phys, contig_pages);
1077 			if (!page)
1078 				goto err_free_pages_array;
1079 
1080 			for (int j = 0; j < contig_pages; j++)
1081 				pages[idx++] = page + j;
1082 
1083 			phys += contig_pages * PAGE_SIZE;
1084 		}
1085 
1086 		page = kho_restore_pages(virt_to_phys(chunk), 1);
1087 		if (!page)
1088 			goto err_free_pages_array;
1089 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
1090 		__free_page(page);
1091 	}
1092 
1093 	if (idx != total_pages)
1094 		goto err_free_pages_array;
1095 
1096 	area = __get_vm_area_node(total_pages * PAGE_SIZE, align, shift,
1097 				  vm_flags, VMALLOC_START, VMALLOC_END,
1098 				  NUMA_NO_NODE, GFP_KERNEL,
1099 				  __builtin_return_address(0));
1100 	if (!area)
1101 		goto err_free_pages_array;
1102 
1103 	addr = (unsigned long)area->addr;
1104 	size = get_vm_area_size(area);
1105 	err = vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, shift);
1106 	if (err)
1107 		goto err_free_vm_area;
1108 
1109 	area->nr_pages = total_pages;
1110 	area->pages = pages;
1111 
1112 	return area->addr;
1113 
1114 err_free_vm_area:
1115 	free_vm_area(area);
1116 err_free_pages_array:
1117 	kvfree(pages);
1118 	return NULL;
1119 }
1120 EXPORT_SYMBOL_GPL(kho_restore_vmalloc);
1121 
1122 /**
1123  * kho_alloc_preserve - Allocate, zero, and preserve memory.
1124  * @size: The number of bytes to allocate.
1125  *
1126  * Allocates a physically contiguous block of zeroed pages that is large
1127  * enough to hold @size bytes. The allocated memory is then registered with
1128  * KHO for preservation across a kexec.
1129  *
1130  * Note: The actual allocated size will be rounded up to the nearest
1131  * power-of-two page boundary.
1132  *
1133  * @return A virtual pointer to the allocated and preserved memory on success,
1134  * or an ERR_PTR() encoded error on failure.
1135  */
1136 void *kho_alloc_preserve(size_t size)
1137 {
1138 	struct folio *folio;
1139 	int order, ret;
1140 
1141 	if (!size)
1142 		return ERR_PTR(-EINVAL);
1143 
1144 	order = get_order(size);
1145 	if (order > MAX_PAGE_ORDER)
1146 		return ERR_PTR(-E2BIG);
1147 
1148 	folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, order);
1149 	if (!folio)
1150 		return ERR_PTR(-ENOMEM);
1151 
1152 	ret = kho_preserve_folio(folio);
1153 	if (ret) {
1154 		folio_put(folio);
1155 		return ERR_PTR(ret);
1156 	}
1157 
1158 	return folio_address(folio);
1159 }
1160 EXPORT_SYMBOL_GPL(kho_alloc_preserve);
1161 
1162 /**
1163  * kho_unpreserve_free - Unpreserve and free memory.
1164  * @mem:  Pointer to the memory allocated by kho_alloc_preserve().
1165  *
1166  * Unregisters the memory from KHO preservation and frees the underlying
1167  * pages back to the system. This function should be called to clean up
1168  * memory allocated with kho_alloc_preserve().
1169  */
1170 void kho_unpreserve_free(void *mem)
1171 {
1172 	struct folio *folio;
1173 
1174 	if (!mem)
1175 		return;
1176 
1177 	folio = virt_to_folio(mem);
1178 	kho_unpreserve_folio(folio);
1179 	folio_put(folio);
1180 }
1181 EXPORT_SYMBOL_GPL(kho_unpreserve_free);
1182 
1183 /**
1184  * kho_restore_free - Restore and free memory after kexec.
1185  * @mem:  Pointer to the memory (in the new kernel's address space)
1186  * that was allocated by the old kernel.
1187  *
1188  * This function is intended to be called in the new kernel (post-kexec)
1189  * to take ownership of and free a memory region that was preserved by the
1190  * old kernel using kho_alloc_preserve().
1191  *
1192  * It first restores the pages from KHO (using their physical address)
1193  * and then frees the pages back to the new kernel's page allocator.
1194  */
1195 void kho_restore_free(void *mem)
1196 {
1197 	struct folio *folio;
1198 
1199 	if (!mem)
1200 		return;
1201 
1202 	folio = kho_restore_folio(__pa(mem));
1203 	if (!WARN_ON(!folio))
1204 		folio_put(folio);
1205 }
1206 EXPORT_SYMBOL_GPL(kho_restore_free);
1207 
1208 int kho_finalize(void)
1209 {
1210 	int ret;
1211 
1212 	if (!kho_enable)
1213 		return -EOPNOTSUPP;
1214 
1215 	guard(mutex)(&kho_out.lock);
1216 	ret = kho_mem_serialize(&kho_out);
1217 	if (ret)
1218 		return ret;
1219 
1220 	kho_out.finalized = true;
1221 
1222 	return 0;
1223 }
1224 
1225 bool kho_finalized(void)
1226 {
1227 	guard(mutex)(&kho_out.lock);
1228 	return kho_out.finalized;
1229 }
1230 
1231 struct kho_in {
1232 	phys_addr_t fdt_phys;
1233 	phys_addr_t scratch_phys;
1234 	phys_addr_t mem_map_phys;
1235 	struct kho_debugfs dbg;
1236 };
1237 
1238 static struct kho_in kho_in = {
1239 };
1240 
1241 static const void *kho_get_fdt(void)
1242 {
1243 	return kho_in.fdt_phys ? phys_to_virt(kho_in.fdt_phys) : NULL;
1244 }
1245 
1246 /**
1247  * is_kho_boot - check if current kernel was booted via KHO-enabled
1248  * kexec
1249  *
1250  * This function checks if the current kernel was loaded through a kexec
1251  * operation with KHO enabled, by verifying that a valid KHO FDT
1252  * was passed.
1253  *
1254  * Note: This function returns reliable results only after
1255  * kho_populate() has been called during early boot. Before that,
1256  * it may return false even if KHO data is present.
1257  *
1258  * Return: true if booted via KHO-enabled kexec, false otherwise
1259  */
1260 bool is_kho_boot(void)
1261 {
1262 	return !!kho_get_fdt();
1263 }
1264 EXPORT_SYMBOL_GPL(is_kho_boot);
1265 
1266 /**
1267  * kho_retrieve_subtree - retrieve a preserved sub FDT by its name.
1268  * @name: the name of the sub FDT passed to kho_add_subtree().
1269  * @phys: if found, the physical address of the sub FDT is stored in @phys.
1270  *
1271  * Retrieve a preserved sub FDT named @name and store its physical
1272  * address in @phys.
1273  *
1274  * Return: 0 on success, error code on failure
1275  */
1276 int kho_retrieve_subtree(const char *name, phys_addr_t *phys)
1277 {
1278 	const void *fdt = kho_get_fdt();
1279 	const u64 *val;
1280 	int offset, len;
1281 
1282 	if (!fdt)
1283 		return -ENOENT;
1284 
1285 	if (!phys)
1286 		return -EINVAL;
1287 
1288 	offset = fdt_subnode_offset(fdt, 0, name);
1289 	if (offset < 0)
1290 		return -ENOENT;
1291 
1292 	val = fdt_getprop(fdt, offset, KHO_FDT_SUB_TREE_PROP_NAME, &len);
1293 	if (!val || len != sizeof(*val))
1294 		return -EINVAL;
1295 
1296 	*phys = (phys_addr_t)*val;
1297 
1298 	return 0;
1299 }
1300 EXPORT_SYMBOL_GPL(kho_retrieve_subtree);
1301 
1302 static __init int kho_out_fdt_setup(void)
1303 {
1304 	void *root = kho_out.fdt;
1305 	u64 empty_mem_map = 0;
1306 	int err;
1307 
1308 	err = fdt_create(root, PAGE_SIZE);
1309 	err |= fdt_finish_reservemap(root);
1310 	err |= fdt_begin_node(root, "");
1311 	err |= fdt_property_string(root, "compatible", KHO_FDT_COMPATIBLE);
1312 	err |= fdt_property(root, KHO_FDT_MEMORY_MAP_PROP_NAME, &empty_mem_map,
1313 			    sizeof(empty_mem_map));
1314 	err |= fdt_end_node(root);
1315 	err |= fdt_finish(root);
1316 
1317 	return err;
1318 }
1319 
1320 static __init int kho_init(void)
1321 {
1322 	const void *fdt = kho_get_fdt();
1323 	int err = 0;
1324 
1325 	if (!kho_enable)
1326 		return 0;
1327 
1328 	kho_out.fdt = kho_alloc_preserve(PAGE_SIZE);
1329 	if (IS_ERR(kho_out.fdt)) {
1330 		err = PTR_ERR(kho_out.fdt);
1331 		goto err_free_scratch;
1332 	}
1333 
1334 	err = kho_debugfs_init();
1335 	if (err)
1336 		goto err_free_fdt;
1337 
1338 	err = kho_out_debugfs_init(&kho_out.dbg);
1339 	if (err)
1340 		goto err_free_fdt;
1341 
1342 	err = kho_out_fdt_setup();
1343 	if (err)
1344 		goto err_free_fdt;
1345 
1346 	if (fdt) {
1347 		kho_in_debugfs_init(&kho_in.dbg, fdt);
1348 		return 0;
1349 	}
1350 
1351 	for (int i = 0; i < kho_scratch_cnt; i++) {
1352 		unsigned long base_pfn = PHYS_PFN(kho_scratch[i].addr);
1353 		unsigned long count = kho_scratch[i].size >> PAGE_SHIFT;
1354 		unsigned long pfn;
1355 
1356 		/*
1357 		 * When debug_pagealloc is enabled, __free_pages() clears the
1358 		 * corresponding PRESENT bit in the kernel page table.
1359 		 * Subsequent kmemleak scans of these pages cause the
1360 		 * non-PRESENT page faults.
1361 		 * Mark scratch areas with kmemleak_ignore_phys() to exclude
1362 		 * them from kmemleak scanning.
1363 		 */
1364 		kmemleak_ignore_phys(kho_scratch[i].addr);
1365 		for (pfn = base_pfn; pfn < base_pfn + count;
1366 		     pfn += pageblock_nr_pages)
1367 			init_cma_reserved_pageblock(pfn_to_page(pfn));
1368 	}
1369 
1370 	WARN_ON_ONCE(kho_debugfs_fdt_add(&kho_out.dbg, "fdt",
1371 					 kho_out.fdt, true));
1372 
1373 	return 0;
1374 
1375 err_free_fdt:
1376 	kho_unpreserve_free(kho_out.fdt);
1377 err_free_scratch:
1378 	kho_out.fdt = NULL;
1379 	for (int i = 0; i < kho_scratch_cnt; i++) {
1380 		void *start = __va(kho_scratch[i].addr);
1381 		void *end = start + kho_scratch[i].size;
1382 
1383 		free_reserved_area(start, end, -1, "");
1384 	}
1385 	kho_enable = false;
1386 	return err;
1387 }
1388 fs_initcall(kho_init);
1389 
1390 static void __init kho_release_scratch(void)
1391 {
1392 	phys_addr_t start, end;
1393 	u64 i;
1394 
1395 	memmap_init_kho_scratch_pages();
1396 
1397 	/*
1398 	 * Mark scratch mem as CMA before we return it. That way we
1399 	 * ensure that no kernel allocations happen on it. That means
1400 	 * we can reuse it as scratch memory again later.
1401 	 */
1402 	__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
1403 			     MEMBLOCK_KHO_SCRATCH, &start, &end, NULL) {
1404 		ulong start_pfn = pageblock_start_pfn(PFN_DOWN(start));
1405 		ulong end_pfn = pageblock_align(PFN_UP(end));
1406 		ulong pfn;
1407 
1408 		for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages)
1409 			init_pageblock_migratetype(pfn_to_page(pfn),
1410 						   MIGRATE_CMA, false);
1411 	}
1412 }
1413 
1414 void __init kho_memory_init(void)
1415 {
1416 	if (kho_in.mem_map_phys) {
1417 		kho_scratch = phys_to_virt(kho_in.scratch_phys);
1418 		kho_release_scratch();
1419 		kho_mem_deserialize(phys_to_virt(kho_in.mem_map_phys));
1420 	} else {
1421 		kho_reserve_scratch();
1422 	}
1423 }
1424 
1425 void __init kho_populate(phys_addr_t fdt_phys, u64 fdt_len,
1426 			 phys_addr_t scratch_phys, u64 scratch_len)
1427 {
1428 	struct kho_scratch *scratch = NULL;
1429 	phys_addr_t mem_map_phys;
1430 	void *fdt = NULL;
1431 	int err = 0;
1432 	unsigned int scratch_cnt = scratch_len / sizeof(*kho_scratch);
1433 
1434 	/* Validate the input FDT */
1435 	fdt = early_memremap(fdt_phys, fdt_len);
1436 	if (!fdt) {
1437 		pr_warn("setup: failed to memremap FDT (0x%llx)\n", fdt_phys);
1438 		err = -EFAULT;
1439 		goto out;
1440 	}
1441 	err = fdt_check_header(fdt);
1442 	if (err) {
1443 		pr_warn("setup: handover FDT (0x%llx) is invalid: %d\n",
1444 			fdt_phys, err);
1445 		err = -EINVAL;
1446 		goto out;
1447 	}
1448 	err = fdt_node_check_compatible(fdt, 0, KHO_FDT_COMPATIBLE);
1449 	if (err) {
1450 		pr_warn("setup: handover FDT (0x%llx) is incompatible with '%s': %d\n",
1451 			fdt_phys, KHO_FDT_COMPATIBLE, err);
1452 		err = -EINVAL;
1453 		goto out;
1454 	}
1455 
1456 	mem_map_phys = kho_get_mem_map_phys(fdt);
1457 	if (!mem_map_phys) {
1458 		err = -ENOENT;
1459 		goto out;
1460 	}
1461 
1462 	scratch = early_memremap(scratch_phys, scratch_len);
1463 	if (!scratch) {
1464 		pr_warn("setup: failed to memremap scratch (phys=0x%llx, len=%lld)\n",
1465 			scratch_phys, scratch_len);
1466 		err = -EFAULT;
1467 		goto out;
1468 	}
1469 
1470 	/*
1471 	 * We pass a safe contiguous blocks of memory to use for early boot
1472 	 * purporses from the previous kernel so that we can resize the
1473 	 * memblock array as needed.
1474 	 */
1475 	for (int i = 0; i < scratch_cnt; i++) {
1476 		struct kho_scratch *area = &scratch[i];
1477 		u64 size = area->size;
1478 
1479 		memblock_add(area->addr, size);
1480 		err = memblock_mark_kho_scratch(area->addr, size);
1481 		if (WARN_ON(err)) {
1482 			pr_warn("failed to mark the scratch region 0x%pa+0x%pa: %pe",
1483 				&area->addr, &size, ERR_PTR(err));
1484 			goto out;
1485 		}
1486 		pr_debug("Marked 0x%pa+0x%pa as scratch", &area->addr, &size);
1487 	}
1488 
1489 	memblock_reserve(scratch_phys, scratch_len);
1490 
1491 	/*
1492 	 * Now that we have a viable region of scratch memory, let's tell
1493 	 * the memblocks allocator to only use that for any allocations.
1494 	 * That way we ensure that nothing scribbles over in use data while
1495 	 * we initialize the page tables which we will need to ingest all
1496 	 * memory reservations from the previous kernel.
1497 	 */
1498 	memblock_set_kho_scratch_only();
1499 
1500 	kho_in.fdt_phys = fdt_phys;
1501 	kho_in.scratch_phys = scratch_phys;
1502 	kho_in.mem_map_phys = mem_map_phys;
1503 	kho_scratch_cnt = scratch_cnt;
1504 	pr_info("found kexec handover data.\n");
1505 
1506 out:
1507 	if (fdt)
1508 		early_memunmap(fdt, fdt_len);
1509 	if (scratch)
1510 		early_memunmap(scratch, scratch_len);
1511 	if (err)
1512 		pr_warn("disabling KHO revival: %d\n", err);
1513 }
1514 
1515 /* Helper functions for kexec_file_load */
1516 
1517 int kho_fill_kimage(struct kimage *image)
1518 {
1519 	ssize_t scratch_size;
1520 	int err = 0;
1521 	struct kexec_buf scratch;
1522 
1523 	if (!kho_enable)
1524 		return 0;
1525 
1526 	image->kho.fdt = virt_to_phys(kho_out.fdt);
1527 
1528 	scratch_size = sizeof(*kho_scratch) * kho_scratch_cnt;
1529 	scratch = (struct kexec_buf){
1530 		.image = image,
1531 		.buffer = kho_scratch,
1532 		.bufsz = scratch_size,
1533 		.mem = KEXEC_BUF_MEM_UNKNOWN,
1534 		.memsz = scratch_size,
1535 		.buf_align = SZ_64K, /* Makes it easier to map */
1536 		.buf_max = ULONG_MAX,
1537 		.top_down = true,
1538 	};
1539 	err = kexec_add_buffer(&scratch);
1540 	if (err)
1541 		return err;
1542 	image->kho.scratch = &image->segment[image->nr_segments - 1];
1543 
1544 	return 0;
1545 }
1546 
1547 static int kho_walk_scratch(struct kexec_buf *kbuf,
1548 			    int (*func)(struct resource *, void *))
1549 {
1550 	int ret = 0;
1551 	int i;
1552 
1553 	for (i = 0; i < kho_scratch_cnt; i++) {
1554 		struct resource res = {
1555 			.start = kho_scratch[i].addr,
1556 			.end = kho_scratch[i].addr + kho_scratch[i].size - 1,
1557 		};
1558 
1559 		/* Try to fit the kimage into our KHO scratch region */
1560 		ret = func(&res, kbuf);
1561 		if (ret)
1562 			break;
1563 	}
1564 
1565 	return ret;
1566 }
1567 
1568 int kho_locate_mem_hole(struct kexec_buf *kbuf,
1569 			int (*func)(struct resource *, void *))
1570 {
1571 	int ret;
1572 
1573 	if (!kho_enable || kbuf->image->type == KEXEC_TYPE_CRASH)
1574 		return 1;
1575 
1576 	ret = kho_walk_scratch(kbuf, func);
1577 
1578 	return ret == 1 ? 0 : -EADDRNOTAVAIL;
1579 }
1580