xref: /linux/kernel/liveupdate/kexec_handover.c (revision 840fe43d371fc59ef2da6b6bb88a4d480eed9a38)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kexec_handover.c - kexec handover metadata processing
4  * Copyright (C) 2023 Alexander Graf <graf@amazon.com>
5  * Copyright (C) 2025 Microsoft Corporation, Mike Rapoport <rppt@kernel.org>
6  * Copyright (C) 2025 Google LLC, Changyuan Lyu <changyuanl@google.com>
7  * Copyright (C) 2025 Pasha Tatashin <pasha.tatashin@soleen.com>
8  */
9 
10 #define pr_fmt(fmt) "KHO: " fmt
11 
12 #include <linux/cleanup.h>
13 #include <linux/cma.h>
14 #include <linux/kmemleak.h>
15 #include <linux/count_zeros.h>
16 #include <linux/kexec.h>
17 #include <linux/kexec_handover.h>
18 #include <linux/kho/abi/kexec_handover.h>
19 #include <linux/libfdt.h>
20 #include <linux/list.h>
21 #include <linux/memblock.h>
22 #include <linux/page-isolation.h>
23 #include <linux/unaligned.h>
24 #include <linux/vmalloc.h>
25 
26 #include <asm/early_ioremap.h>
27 
28 /*
29  * KHO is tightly coupled with mm init and needs access to some of mm
30  * internal APIs.
31  */
32 #include "../../mm/internal.h"
33 #include "../kexec_internal.h"
34 #include "kexec_handover_internal.h"
35 
36 /* The magic token for preserved pages */
37 #define KHO_PAGE_MAGIC 0x4b484f50U /* ASCII for 'KHOP' */
38 
39 /*
40  * KHO uses page->private, which is an unsigned long, to store page metadata.
41  * Use it to store both the magic and the order.
42  */
43 union kho_page_info {
44 	unsigned long page_private;
45 	struct {
46 		unsigned int order;
47 		unsigned int magic;
48 	};
49 };
50 
51 static_assert(sizeof(union kho_page_info) == sizeof(((struct page *)0)->private));
52 
53 static bool kho_enable __ro_after_init = IS_ENABLED(CONFIG_KEXEC_HANDOVER_ENABLE_DEFAULT);
54 
55 bool kho_is_enabled(void)
56 {
57 	return kho_enable;
58 }
59 EXPORT_SYMBOL_GPL(kho_is_enabled);
60 
61 static int __init kho_parse_enable(char *p)
62 {
63 	return kstrtobool(p, &kho_enable);
64 }
65 early_param("kho", kho_parse_enable);
66 
67 /*
68  * Keep track of memory that is to be preserved across KHO.
69  *
70  * The serializing side uses two levels of xarrays to manage chunks of per-order
71  * PAGE_SIZE byte bitmaps. For instance if PAGE_SIZE = 4096, the entire 1G order
72  * of a 8TB system would fit inside a single 4096 byte bitmap. For order 0
73  * allocations each bitmap will cover 128M of address space. Thus, for 16G of
74  * memory at most 512K of bitmap memory will be needed for order 0.
75  *
76  * This approach is fully incremental, as the serialization progresses folios
77  * can continue be aggregated to the tracker. The final step, immediately prior
78  * to kexec would serialize the xarray information into a linked list for the
79  * successor kernel to parse.
80  */
81 
82 #define PRESERVE_BITS (PAGE_SIZE * 8)
83 
84 struct kho_mem_phys_bits {
85 	DECLARE_BITMAP(preserve, PRESERVE_BITS);
86 };
87 
88 static_assert(sizeof(struct kho_mem_phys_bits) == PAGE_SIZE);
89 
90 struct kho_mem_phys {
91 	/*
92 	 * Points to kho_mem_phys_bits, a sparse bitmap array. Each bit is sized
93 	 * to order.
94 	 */
95 	struct xarray phys_bits;
96 };
97 
98 struct kho_mem_track {
99 	/* Points to kho_mem_phys, each order gets its own bitmap tree */
100 	struct xarray orders;
101 };
102 
103 struct khoser_mem_chunk;
104 
105 struct kho_out {
106 	void *fdt;
107 	bool finalized;
108 	struct mutex lock; /* protects KHO FDT finalization */
109 
110 	struct kho_mem_track track;
111 	struct kho_debugfs dbg;
112 };
113 
114 static struct kho_out kho_out = {
115 	.lock = __MUTEX_INITIALIZER(kho_out.lock),
116 	.track = {
117 		.orders = XARRAY_INIT(kho_out.track.orders, 0),
118 	},
119 	.finalized = false,
120 };
121 
122 static void *xa_load_or_alloc(struct xarray *xa, unsigned long index)
123 {
124 	void *res = xa_load(xa, index);
125 
126 	if (res)
127 		return res;
128 
129 	void *elm __free(free_page) = (void *)get_zeroed_page(GFP_KERNEL);
130 
131 	if (!elm)
132 		return ERR_PTR(-ENOMEM);
133 
134 	if (WARN_ON(kho_scratch_overlap(virt_to_phys(elm), PAGE_SIZE)))
135 		return ERR_PTR(-EINVAL);
136 
137 	res = xa_cmpxchg(xa, index, NULL, elm, GFP_KERNEL);
138 	if (xa_is_err(res))
139 		return ERR_PTR(xa_err(res));
140 	else if (res)
141 		return res;
142 
143 	return no_free_ptr(elm);
144 }
145 
146 static void __kho_unpreserve_order(struct kho_mem_track *track, unsigned long pfn,
147 				   unsigned int order)
148 {
149 	struct kho_mem_phys_bits *bits;
150 	struct kho_mem_phys *physxa;
151 	const unsigned long pfn_high = pfn >> order;
152 
153 	physxa = xa_load(&track->orders, order);
154 	if (WARN_ON_ONCE(!physxa))
155 		return;
156 
157 	bits = xa_load(&physxa->phys_bits, pfn_high / PRESERVE_BITS);
158 	if (WARN_ON_ONCE(!bits))
159 		return;
160 
161 	clear_bit(pfn_high % PRESERVE_BITS, bits->preserve);
162 }
163 
164 static void __kho_unpreserve(struct kho_mem_track *track, unsigned long pfn,
165 			     unsigned long end_pfn)
166 {
167 	unsigned int order;
168 
169 	while (pfn < end_pfn) {
170 		order = min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
171 
172 		__kho_unpreserve_order(track, pfn, order);
173 
174 		pfn += 1 << order;
175 	}
176 }
177 
178 static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
179 				unsigned int order)
180 {
181 	struct kho_mem_phys_bits *bits;
182 	struct kho_mem_phys *physxa, *new_physxa;
183 	const unsigned long pfn_high = pfn >> order;
184 
185 	might_sleep();
186 	physxa = xa_load(&track->orders, order);
187 	if (!physxa) {
188 		int err;
189 
190 		new_physxa = kzalloc(sizeof(*physxa), GFP_KERNEL);
191 		if (!new_physxa)
192 			return -ENOMEM;
193 
194 		xa_init(&new_physxa->phys_bits);
195 		physxa = xa_cmpxchg(&track->orders, order, NULL, new_physxa,
196 				    GFP_KERNEL);
197 
198 		err = xa_err(physxa);
199 		if (err || physxa) {
200 			xa_destroy(&new_physxa->phys_bits);
201 			kfree(new_physxa);
202 
203 			if (err)
204 				return err;
205 		} else {
206 			physxa = new_physxa;
207 		}
208 	}
209 
210 	bits = xa_load_or_alloc(&physxa->phys_bits, pfn_high / PRESERVE_BITS);
211 	if (IS_ERR(bits))
212 		return PTR_ERR(bits);
213 
214 	set_bit(pfn_high % PRESERVE_BITS, bits->preserve);
215 
216 	return 0;
217 }
218 
219 static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
220 {
221 	struct page *page = pfn_to_online_page(PHYS_PFN(phys));
222 	unsigned long nr_pages;
223 	unsigned int ref_cnt;
224 	union kho_page_info info;
225 
226 	if (!page)
227 		return NULL;
228 
229 	info.page_private = page->private;
230 	/*
231 	 * deserialize_bitmap() only sets the magic on the head page. This magic
232 	 * check also implicitly makes sure phys is order-aligned since for
233 	 * non-order-aligned phys addresses, magic will never be set.
234 	 */
235 	if (WARN_ON_ONCE(info.magic != KHO_PAGE_MAGIC || info.order > MAX_PAGE_ORDER))
236 		return NULL;
237 	nr_pages = (1 << info.order);
238 
239 	/* Clear private to make sure later restores on this page error out. */
240 	page->private = 0;
241 	/* Head page gets refcount of 1. */
242 	set_page_count(page, 1);
243 
244 	/*
245 	 * For higher order folios, tail pages get a page count of zero.
246 	 * For physically contiguous order-0 pages every pages gets a page
247 	 * count of 1
248 	 */
249 	ref_cnt = is_folio ? 0 : 1;
250 	for (unsigned long i = 1; i < nr_pages; i++)
251 		set_page_count(page + i, ref_cnt);
252 
253 	if (is_folio && info.order)
254 		prep_compound_page(page, info.order);
255 
256 	/* Always mark headpage's codetag as empty to avoid accounting mismatch */
257 	clear_page_tag_ref(page);
258 	if (!is_folio) {
259 		/* Also do that for the non-compound tail pages */
260 		for (unsigned int i = 1; i < nr_pages; i++)
261 			clear_page_tag_ref(page + i);
262 	}
263 
264 	adjust_managed_page_count(page, nr_pages);
265 	return page;
266 }
267 
268 /**
269  * kho_restore_folio - recreates the folio from the preserved memory.
270  * @phys: physical address of the folio.
271  *
272  * Return: pointer to the struct folio on success, NULL on failure.
273  */
274 struct folio *kho_restore_folio(phys_addr_t phys)
275 {
276 	struct page *page = kho_restore_page(phys, true);
277 
278 	return page ? page_folio(page) : NULL;
279 }
280 EXPORT_SYMBOL_GPL(kho_restore_folio);
281 
282 /**
283  * kho_restore_pages - restore list of contiguous order 0 pages.
284  * @phys: physical address of the first page.
285  * @nr_pages: number of pages.
286  *
287  * Restore a contiguous list of order 0 pages that was preserved with
288  * kho_preserve_pages().
289  *
290  * Return: 0 on success, error code on failure
291  */
292 struct page *kho_restore_pages(phys_addr_t phys, unsigned long nr_pages)
293 {
294 	const unsigned long start_pfn = PHYS_PFN(phys);
295 	const unsigned long end_pfn = start_pfn + nr_pages;
296 	unsigned long pfn = start_pfn;
297 
298 	while (pfn < end_pfn) {
299 		const unsigned int order =
300 			min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
301 		struct page *page = kho_restore_page(PFN_PHYS(pfn), false);
302 
303 		if (!page)
304 			return NULL;
305 		pfn += 1 << order;
306 	}
307 
308 	return pfn_to_page(start_pfn);
309 }
310 EXPORT_SYMBOL_GPL(kho_restore_pages);
311 
312 /* Serialize and deserialize struct kho_mem_phys across kexec
313  *
314  * Record all the bitmaps in a linked list of pages for the next kernel to
315  * process. Each chunk holds bitmaps of the same order and each block of bitmaps
316  * starts at a given physical address. This allows the bitmaps to be sparse. The
317  * xarray is used to store them in a tree while building up the data structure,
318  * but the KHO successor kernel only needs to process them once in order.
319  *
320  * All of this memory is normal kmalloc() memory and is not marked for
321  * preservation. The successor kernel will remain isolated to the scratch space
322  * until it completes processing this list. Once processed all the memory
323  * storing these ranges will be marked as free.
324  */
325 
326 struct khoser_mem_bitmap_ptr {
327 	phys_addr_t phys_start;
328 	DECLARE_KHOSER_PTR(bitmap, struct kho_mem_phys_bits *);
329 };
330 
331 struct khoser_mem_chunk_hdr {
332 	DECLARE_KHOSER_PTR(next, struct khoser_mem_chunk *);
333 	unsigned int order;
334 	unsigned int num_elms;
335 };
336 
337 #define KHOSER_BITMAP_SIZE                                   \
338 	((PAGE_SIZE - sizeof(struct khoser_mem_chunk_hdr)) / \
339 	 sizeof(struct khoser_mem_bitmap_ptr))
340 
341 struct khoser_mem_chunk {
342 	struct khoser_mem_chunk_hdr hdr;
343 	struct khoser_mem_bitmap_ptr bitmaps[KHOSER_BITMAP_SIZE];
344 };
345 
346 static_assert(sizeof(struct khoser_mem_chunk) == PAGE_SIZE);
347 
348 static struct khoser_mem_chunk *new_chunk(struct khoser_mem_chunk *cur_chunk,
349 					  unsigned long order)
350 {
351 	struct khoser_mem_chunk *chunk __free(free_page) = NULL;
352 
353 	chunk = (void *)get_zeroed_page(GFP_KERNEL);
354 	if (!chunk)
355 		return ERR_PTR(-ENOMEM);
356 
357 	if (WARN_ON(kho_scratch_overlap(virt_to_phys(chunk), PAGE_SIZE)))
358 		return ERR_PTR(-EINVAL);
359 
360 	chunk->hdr.order = order;
361 	if (cur_chunk)
362 		KHOSER_STORE_PTR(cur_chunk->hdr.next, chunk);
363 	return no_free_ptr(chunk);
364 }
365 
366 static void kho_mem_ser_free(struct khoser_mem_chunk *first_chunk)
367 {
368 	struct khoser_mem_chunk *chunk = first_chunk;
369 
370 	while (chunk) {
371 		struct khoser_mem_chunk *tmp = chunk;
372 
373 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
374 		free_page((unsigned long)tmp);
375 	}
376 }
377 
378 /*
379  *  Update memory map property, if old one is found discard it via
380  *  kho_mem_ser_free().
381  */
382 static void kho_update_memory_map(struct khoser_mem_chunk *first_chunk)
383 {
384 	void *ptr;
385 	u64 phys;
386 
387 	ptr = fdt_getprop_w(kho_out.fdt, 0, KHO_FDT_MEMORY_MAP_PROP_NAME, NULL);
388 
389 	/* Check and discard previous memory map */
390 	phys = get_unaligned((u64 *)ptr);
391 	if (phys)
392 		kho_mem_ser_free((struct khoser_mem_chunk *)phys_to_virt(phys));
393 
394 	/* Update with the new value */
395 	phys = first_chunk ? (u64)virt_to_phys(first_chunk) : 0;
396 	put_unaligned(phys, (u64 *)ptr);
397 }
398 
399 static int kho_mem_serialize(struct kho_out *kho_out)
400 {
401 	struct khoser_mem_chunk *first_chunk = NULL;
402 	struct khoser_mem_chunk *chunk = NULL;
403 	struct kho_mem_phys *physxa;
404 	unsigned long order;
405 	int err = -ENOMEM;
406 
407 	xa_for_each(&kho_out->track.orders, order, physxa) {
408 		struct kho_mem_phys_bits *bits;
409 		unsigned long phys;
410 
411 		chunk = new_chunk(chunk, order);
412 		if (IS_ERR(chunk)) {
413 			err = PTR_ERR(chunk);
414 			goto err_free;
415 		}
416 
417 		if (!first_chunk)
418 			first_chunk = chunk;
419 
420 		xa_for_each(&physxa->phys_bits, phys, bits) {
421 			struct khoser_mem_bitmap_ptr *elm;
422 
423 			if (chunk->hdr.num_elms == ARRAY_SIZE(chunk->bitmaps)) {
424 				chunk = new_chunk(chunk, order);
425 				if (IS_ERR(chunk)) {
426 					err = PTR_ERR(chunk);
427 					goto err_free;
428 				}
429 			}
430 
431 			elm = &chunk->bitmaps[chunk->hdr.num_elms];
432 			chunk->hdr.num_elms++;
433 			elm->phys_start = (phys * PRESERVE_BITS)
434 					  << (order + PAGE_SHIFT);
435 			KHOSER_STORE_PTR(elm->bitmap, bits);
436 		}
437 	}
438 
439 	kho_update_memory_map(first_chunk);
440 
441 	return 0;
442 
443 err_free:
444 	kho_mem_ser_free(first_chunk);
445 	return err;
446 }
447 
448 static void __init deserialize_bitmap(unsigned int order,
449 				      struct khoser_mem_bitmap_ptr *elm)
450 {
451 	struct kho_mem_phys_bits *bitmap = KHOSER_LOAD_PTR(elm->bitmap);
452 	unsigned long bit;
453 
454 	for_each_set_bit(bit, bitmap->preserve, PRESERVE_BITS) {
455 		int sz = 1 << (order + PAGE_SHIFT);
456 		phys_addr_t phys =
457 			elm->phys_start + (bit << (order + PAGE_SHIFT));
458 		struct page *page = phys_to_page(phys);
459 		union kho_page_info info;
460 
461 		memblock_reserve(phys, sz);
462 		memblock_reserved_mark_noinit(phys, sz);
463 		info.magic = KHO_PAGE_MAGIC;
464 		info.order = order;
465 		page->private = info.page_private;
466 	}
467 }
468 
469 /* Returns physical address of the preserved memory map from FDT */
470 static phys_addr_t __init kho_get_mem_map_phys(const void *fdt)
471 {
472 	const void *mem_ptr;
473 	int len;
474 
475 	mem_ptr = fdt_getprop(fdt, 0, KHO_FDT_MEMORY_MAP_PROP_NAME, &len);
476 	if (!mem_ptr || len != sizeof(u64)) {
477 		pr_err("failed to get preserved memory bitmaps\n");
478 		return 0;
479 	}
480 
481 	return get_unaligned((const u64 *)mem_ptr);
482 }
483 
484 static void __init kho_mem_deserialize(struct khoser_mem_chunk *chunk)
485 {
486 	while (chunk) {
487 		unsigned int i;
488 
489 		for (i = 0; i != chunk->hdr.num_elms; i++)
490 			deserialize_bitmap(chunk->hdr.order,
491 					   &chunk->bitmaps[i]);
492 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
493 	}
494 }
495 
496 /*
497  * With KHO enabled, memory can become fragmented because KHO regions may
498  * be anywhere in physical address space. The scratch regions give us a
499  * safe zones that we will never see KHO allocations from. This is where we
500  * can later safely load our new kexec images into and then use the scratch
501  * area for early allocations that happen before page allocator is
502  * initialized.
503  */
504 struct kho_scratch *kho_scratch;
505 unsigned int kho_scratch_cnt;
506 
507 /*
508  * The scratch areas are scaled by default as percent of memory allocated from
509  * memblock. A user can override the scale with command line parameter:
510  *
511  * kho_scratch=N%
512  *
513  * It is also possible to explicitly define size for a lowmem, a global and
514  * per-node scratch areas:
515  *
516  * kho_scratch=l[KMG],n[KMG],m[KMG]
517  *
518  * The explicit size definition takes precedence over scale definition.
519  */
520 static unsigned int scratch_scale __initdata = 200;
521 static phys_addr_t scratch_size_global __initdata;
522 static phys_addr_t scratch_size_pernode __initdata;
523 static phys_addr_t scratch_size_lowmem __initdata;
524 
525 static int __init kho_parse_scratch_size(char *p)
526 {
527 	size_t len;
528 	unsigned long sizes[3];
529 	size_t total_size = 0;
530 	int i;
531 
532 	if (!p)
533 		return -EINVAL;
534 
535 	len = strlen(p);
536 	if (!len)
537 		return -EINVAL;
538 
539 	/* parse nn% */
540 	if (p[len - 1] == '%') {
541 		/* unsigned int max is 4,294,967,295, 10 chars */
542 		char s_scale[11] = {};
543 		int ret = 0;
544 
545 		if (len > ARRAY_SIZE(s_scale))
546 			return -EINVAL;
547 
548 		memcpy(s_scale, p, len - 1);
549 		ret = kstrtouint(s_scale, 10, &scratch_scale);
550 		if (!ret)
551 			pr_notice("scratch scale is %d%%\n", scratch_scale);
552 		return ret;
553 	}
554 
555 	/* parse ll[KMG],mm[KMG],nn[KMG] */
556 	for (i = 0; i < ARRAY_SIZE(sizes); i++) {
557 		char *endp = p;
558 
559 		if (i > 0) {
560 			if (*p != ',')
561 				return -EINVAL;
562 			p += 1;
563 		}
564 
565 		sizes[i] = memparse(p, &endp);
566 		if (endp == p)
567 			return -EINVAL;
568 		p = endp;
569 		total_size += sizes[i];
570 	}
571 
572 	if (!total_size)
573 		return -EINVAL;
574 
575 	/* The string should be fully consumed by now. */
576 	if (*p)
577 		return -EINVAL;
578 
579 	scratch_size_lowmem = sizes[0];
580 	scratch_size_global = sizes[1];
581 	scratch_size_pernode = sizes[2];
582 	scratch_scale = 0;
583 
584 	pr_notice("scratch areas: lowmem: %lluMiB global: %lluMiB pernode: %lldMiB\n",
585 		  (u64)(scratch_size_lowmem >> 20),
586 		  (u64)(scratch_size_global >> 20),
587 		  (u64)(scratch_size_pernode >> 20));
588 
589 	return 0;
590 }
591 early_param("kho_scratch", kho_parse_scratch_size);
592 
593 static void __init scratch_size_update(void)
594 {
595 	phys_addr_t size;
596 
597 	if (!scratch_scale)
598 		return;
599 
600 	size = memblock_reserved_kern_size(ARCH_LOW_ADDRESS_LIMIT,
601 					   NUMA_NO_NODE);
602 	size = size * scratch_scale / 100;
603 	scratch_size_lowmem = round_up(size, CMA_MIN_ALIGNMENT_BYTES);
604 
605 	size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE,
606 					   NUMA_NO_NODE);
607 	size = size * scratch_scale / 100 - scratch_size_lowmem;
608 	scratch_size_global = round_up(size, CMA_MIN_ALIGNMENT_BYTES);
609 }
610 
611 static phys_addr_t __init scratch_size_node(int nid)
612 {
613 	phys_addr_t size;
614 
615 	if (scratch_scale) {
616 		size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE,
617 						   nid);
618 		size = size * scratch_scale / 100;
619 	} else {
620 		size = scratch_size_pernode;
621 	}
622 
623 	return round_up(size, CMA_MIN_ALIGNMENT_BYTES);
624 }
625 
626 /**
627  * kho_reserve_scratch - Reserve a contiguous chunk of memory for kexec
628  *
629  * With KHO we can preserve arbitrary pages in the system. To ensure we still
630  * have a large contiguous region of memory when we search the physical address
631  * space for target memory, let's make sure we always have a large CMA region
632  * active. This CMA region will only be used for movable pages which are not a
633  * problem for us during KHO because we can just move them somewhere else.
634  */
635 static void __init kho_reserve_scratch(void)
636 {
637 	phys_addr_t addr, size;
638 	int nid, i = 0;
639 
640 	if (!kho_enable)
641 		return;
642 
643 	scratch_size_update();
644 
645 	/* FIXME: deal with node hot-plug/remove */
646 	kho_scratch_cnt = num_online_nodes() + 2;
647 	size = kho_scratch_cnt * sizeof(*kho_scratch);
648 	kho_scratch = memblock_alloc(size, PAGE_SIZE);
649 	if (!kho_scratch) {
650 		pr_err("Failed to reserve scratch array\n");
651 		goto err_disable_kho;
652 	}
653 
654 	/*
655 	 * reserve scratch area in low memory for lowmem allocations in the
656 	 * next kernel
657 	 */
658 	size = scratch_size_lowmem;
659 	addr = memblock_phys_alloc_range(size, CMA_MIN_ALIGNMENT_BYTES, 0,
660 					 ARCH_LOW_ADDRESS_LIMIT);
661 	if (!addr) {
662 		pr_err("Failed to reserve lowmem scratch buffer\n");
663 		goto err_free_scratch_desc;
664 	}
665 
666 	kho_scratch[i].addr = addr;
667 	kho_scratch[i].size = size;
668 	i++;
669 
670 	/* reserve large contiguous area for allocations without nid */
671 	size = scratch_size_global;
672 	addr = memblock_phys_alloc(size, CMA_MIN_ALIGNMENT_BYTES);
673 	if (!addr) {
674 		pr_err("Failed to reserve global scratch buffer\n");
675 		goto err_free_scratch_areas;
676 	}
677 
678 	kho_scratch[i].addr = addr;
679 	kho_scratch[i].size = size;
680 	i++;
681 
682 	for_each_online_node(nid) {
683 		size = scratch_size_node(nid);
684 		addr = memblock_alloc_range_nid(size, CMA_MIN_ALIGNMENT_BYTES,
685 						0, MEMBLOCK_ALLOC_ACCESSIBLE,
686 						nid, true);
687 		if (!addr) {
688 			pr_err("Failed to reserve nid %d scratch buffer\n", nid);
689 			goto err_free_scratch_areas;
690 		}
691 
692 		kho_scratch[i].addr = addr;
693 		kho_scratch[i].size = size;
694 		i++;
695 	}
696 
697 	return;
698 
699 err_free_scratch_areas:
700 	for (i--; i >= 0; i--)
701 		memblock_phys_free(kho_scratch[i].addr, kho_scratch[i].size);
702 err_free_scratch_desc:
703 	memblock_free(kho_scratch, kho_scratch_cnt * sizeof(*kho_scratch));
704 err_disable_kho:
705 	pr_warn("Failed to reserve scratch area, disabling kexec handover\n");
706 	kho_enable = false;
707 }
708 
709 /**
710  * kho_add_subtree - record the physical address of a sub FDT in KHO root tree.
711  * @name: name of the sub tree.
712  * @fdt: the sub tree blob.
713  *
714  * Creates a new child node named @name in KHO root FDT and records
715  * the physical address of @fdt. The pages of @fdt must also be preserved
716  * by KHO for the new kernel to retrieve it after kexec.
717  *
718  * A debugfs blob entry is also created at
719  * ``/sys/kernel/debug/kho/out/sub_fdts/@name`` when kernel is configured with
720  * CONFIG_KEXEC_HANDOVER_DEBUGFS
721  *
722  * Return: 0 on success, error code on failure
723  */
724 int kho_add_subtree(const char *name, void *fdt)
725 {
726 	phys_addr_t phys = virt_to_phys(fdt);
727 	void *root_fdt = kho_out.fdt;
728 	int err = -ENOMEM;
729 	int off, fdt_err;
730 
731 	guard(mutex)(&kho_out.lock);
732 
733 	fdt_err = fdt_open_into(root_fdt, root_fdt, PAGE_SIZE);
734 	if (fdt_err < 0)
735 		return err;
736 
737 	off = fdt_add_subnode(root_fdt, 0, name);
738 	if (off < 0) {
739 		if (off == -FDT_ERR_EXISTS)
740 			err = -EEXIST;
741 		goto out_pack;
742 	}
743 
744 	err = fdt_setprop(root_fdt, off, KHO_FDT_SUB_TREE_PROP_NAME,
745 			  &phys, sizeof(phys));
746 	if (err < 0)
747 		goto out_pack;
748 
749 	WARN_ON_ONCE(kho_debugfs_fdt_add(&kho_out.dbg, name, fdt, false));
750 
751 out_pack:
752 	fdt_pack(root_fdt);
753 
754 	return err;
755 }
756 EXPORT_SYMBOL_GPL(kho_add_subtree);
757 
758 void kho_remove_subtree(void *fdt)
759 {
760 	phys_addr_t target_phys = virt_to_phys(fdt);
761 	void *root_fdt = kho_out.fdt;
762 	int off;
763 	int err;
764 
765 	guard(mutex)(&kho_out.lock);
766 
767 	err = fdt_open_into(root_fdt, root_fdt, PAGE_SIZE);
768 	if (err < 0)
769 		return;
770 
771 	for (off = fdt_first_subnode(root_fdt, 0); off >= 0;
772 	     off = fdt_next_subnode(root_fdt, off)) {
773 		const u64 *val;
774 		int len;
775 
776 		val = fdt_getprop(root_fdt, off, KHO_FDT_SUB_TREE_PROP_NAME, &len);
777 		if (!val || len != sizeof(phys_addr_t))
778 			continue;
779 
780 		if ((phys_addr_t)*val == target_phys) {
781 			fdt_del_node(root_fdt, off);
782 			kho_debugfs_fdt_remove(&kho_out.dbg, fdt);
783 			break;
784 		}
785 	}
786 
787 	fdt_pack(root_fdt);
788 }
789 EXPORT_SYMBOL_GPL(kho_remove_subtree);
790 
791 /**
792  * kho_preserve_folio - preserve a folio across kexec.
793  * @folio: folio to preserve.
794  *
795  * Instructs KHO to preserve the whole folio across kexec. The order
796  * will be preserved as well.
797  *
798  * Return: 0 on success, error code on failure
799  */
800 int kho_preserve_folio(struct folio *folio)
801 {
802 	const unsigned long pfn = folio_pfn(folio);
803 	const unsigned int order = folio_order(folio);
804 	struct kho_mem_track *track = &kho_out.track;
805 
806 	if (WARN_ON(kho_scratch_overlap(pfn << PAGE_SHIFT, PAGE_SIZE << order)))
807 		return -EINVAL;
808 
809 	return __kho_preserve_order(track, pfn, order);
810 }
811 EXPORT_SYMBOL_GPL(kho_preserve_folio);
812 
813 /**
814  * kho_unpreserve_folio - unpreserve a folio.
815  * @folio: folio to unpreserve.
816  *
817  * Instructs KHO to unpreserve a folio that was preserved by
818  * kho_preserve_folio() before. The provided @folio (pfn and order)
819  * must exactly match a previously preserved folio.
820  */
821 void kho_unpreserve_folio(struct folio *folio)
822 {
823 	const unsigned long pfn = folio_pfn(folio);
824 	const unsigned int order = folio_order(folio);
825 	struct kho_mem_track *track = &kho_out.track;
826 
827 	__kho_unpreserve_order(track, pfn, order);
828 }
829 EXPORT_SYMBOL_GPL(kho_unpreserve_folio);
830 
831 /**
832  * kho_preserve_pages - preserve contiguous pages across kexec
833  * @page: first page in the list.
834  * @nr_pages: number of pages.
835  *
836  * Preserve a contiguous list of order 0 pages. Must be restored using
837  * kho_restore_pages() to ensure the pages are restored properly as order 0.
838  *
839  * Return: 0 on success, error code on failure
840  */
841 int kho_preserve_pages(struct page *page, unsigned long nr_pages)
842 {
843 	struct kho_mem_track *track = &kho_out.track;
844 	const unsigned long start_pfn = page_to_pfn(page);
845 	const unsigned long end_pfn = start_pfn + nr_pages;
846 	unsigned long pfn = start_pfn;
847 	unsigned long failed_pfn = 0;
848 	int err = 0;
849 
850 	if (WARN_ON(kho_scratch_overlap(start_pfn << PAGE_SHIFT,
851 					nr_pages << PAGE_SHIFT))) {
852 		return -EINVAL;
853 	}
854 
855 	while (pfn < end_pfn) {
856 		const unsigned int order =
857 			min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
858 
859 		err = __kho_preserve_order(track, pfn, order);
860 		if (err) {
861 			failed_pfn = pfn;
862 			break;
863 		}
864 
865 		pfn += 1 << order;
866 	}
867 
868 	if (err)
869 		__kho_unpreserve(track, start_pfn, failed_pfn);
870 
871 	return err;
872 }
873 EXPORT_SYMBOL_GPL(kho_preserve_pages);
874 
875 /**
876  * kho_unpreserve_pages - unpreserve contiguous pages.
877  * @page: first page in the list.
878  * @nr_pages: number of pages.
879  *
880  * Instructs KHO to unpreserve @nr_pages contiguous pages starting from @page.
881  * This must be called with the same @page and @nr_pages as the corresponding
882  * kho_preserve_pages() call. Unpreserving arbitrary sub-ranges of larger
883  * preserved blocks is not supported.
884  */
885 void kho_unpreserve_pages(struct page *page, unsigned long nr_pages)
886 {
887 	struct kho_mem_track *track = &kho_out.track;
888 	const unsigned long start_pfn = page_to_pfn(page);
889 	const unsigned long end_pfn = start_pfn + nr_pages;
890 
891 	__kho_unpreserve(track, start_pfn, end_pfn);
892 }
893 EXPORT_SYMBOL_GPL(kho_unpreserve_pages);
894 
895 /* vmalloc flags KHO supports */
896 #define KHO_VMALLOC_SUPPORTED_FLAGS	(VM_ALLOC | VM_ALLOW_HUGE_VMAP)
897 
898 /* KHO internal flags for vmalloc preservations */
899 #define KHO_VMALLOC_ALLOC	0x0001
900 #define KHO_VMALLOC_HUGE_VMAP	0x0002
901 
902 static unsigned short vmalloc_flags_to_kho(unsigned int vm_flags)
903 {
904 	unsigned short kho_flags = 0;
905 
906 	if (vm_flags & VM_ALLOC)
907 		kho_flags |= KHO_VMALLOC_ALLOC;
908 	if (vm_flags & VM_ALLOW_HUGE_VMAP)
909 		kho_flags |= KHO_VMALLOC_HUGE_VMAP;
910 
911 	return kho_flags;
912 }
913 
914 static unsigned int kho_flags_to_vmalloc(unsigned short kho_flags)
915 {
916 	unsigned int vm_flags = 0;
917 
918 	if (kho_flags & KHO_VMALLOC_ALLOC)
919 		vm_flags |= VM_ALLOC;
920 	if (kho_flags & KHO_VMALLOC_HUGE_VMAP)
921 		vm_flags |= VM_ALLOW_HUGE_VMAP;
922 
923 	return vm_flags;
924 }
925 
926 static struct kho_vmalloc_chunk *new_vmalloc_chunk(struct kho_vmalloc_chunk *cur)
927 {
928 	struct kho_vmalloc_chunk *chunk;
929 	int err;
930 
931 	chunk = (struct kho_vmalloc_chunk *)get_zeroed_page(GFP_KERNEL);
932 	if (!chunk)
933 		return NULL;
934 
935 	err = kho_preserve_pages(virt_to_page(chunk), 1);
936 	if (err)
937 		goto err_free;
938 	if (cur)
939 		KHOSER_STORE_PTR(cur->hdr.next, chunk);
940 	return chunk;
941 
942 err_free:
943 	free_page((unsigned long)chunk);
944 	return NULL;
945 }
946 
947 static void kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk *chunk,
948 					 unsigned short order)
949 {
950 	struct kho_mem_track *track = &kho_out.track;
951 	unsigned long pfn = PHYS_PFN(virt_to_phys(chunk));
952 
953 	__kho_unpreserve(track, pfn, pfn + 1);
954 
955 	for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) {
956 		pfn = PHYS_PFN(chunk->phys[i]);
957 		__kho_unpreserve(track, pfn, pfn + (1 << order));
958 	}
959 }
960 
961 /**
962  * kho_preserve_vmalloc - preserve memory allocated with vmalloc() across kexec
963  * @ptr: pointer to the area in vmalloc address space
964  * @preservation: placeholder for preservation metadata
965  *
966  * Instructs KHO to preserve the area in vmalloc address space at @ptr. The
967  * physical pages mapped at @ptr will be preserved and on successful return
968  * @preservation will hold the physical address of a structure that describes
969  * the preservation.
970  *
971  * NOTE: The memory allocated with vmalloc_node() variants cannot be reliably
972  * restored on the same node
973  *
974  * Return: 0 on success, error code on failure
975  */
976 int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation)
977 {
978 	struct kho_vmalloc_chunk *chunk;
979 	struct vm_struct *vm = find_vm_area(ptr);
980 	unsigned int order, flags, nr_contig_pages;
981 	unsigned int idx = 0;
982 	int err;
983 
984 	if (!vm)
985 		return -EINVAL;
986 
987 	if (vm->flags & ~KHO_VMALLOC_SUPPORTED_FLAGS)
988 		return -EOPNOTSUPP;
989 
990 	flags = vmalloc_flags_to_kho(vm->flags);
991 	order = get_vm_area_page_order(vm);
992 
993 	chunk = new_vmalloc_chunk(NULL);
994 	if (!chunk)
995 		return -ENOMEM;
996 	KHOSER_STORE_PTR(preservation->first, chunk);
997 
998 	nr_contig_pages = (1 << order);
999 	for (int i = 0; i < vm->nr_pages; i += nr_contig_pages) {
1000 		phys_addr_t phys = page_to_phys(vm->pages[i]);
1001 
1002 		err = kho_preserve_pages(vm->pages[i], nr_contig_pages);
1003 		if (err)
1004 			goto err_free;
1005 
1006 		chunk->phys[idx++] = phys;
1007 		if (idx == ARRAY_SIZE(chunk->phys)) {
1008 			chunk = new_vmalloc_chunk(chunk);
1009 			if (!chunk) {
1010 				err = -ENOMEM;
1011 				goto err_free;
1012 			}
1013 			idx = 0;
1014 		}
1015 	}
1016 
1017 	preservation->total_pages = vm->nr_pages;
1018 	preservation->flags = flags;
1019 	preservation->order = order;
1020 
1021 	return 0;
1022 
1023 err_free:
1024 	kho_unpreserve_vmalloc(preservation);
1025 	return err;
1026 }
1027 EXPORT_SYMBOL_GPL(kho_preserve_vmalloc);
1028 
1029 /**
1030  * kho_unpreserve_vmalloc - unpreserve memory allocated with vmalloc()
1031  * @preservation: preservation metadata returned by kho_preserve_vmalloc()
1032  *
1033  * Instructs KHO to unpreserve the area in vmalloc address space that was
1034  * previously preserved with kho_preserve_vmalloc().
1035  */
1036 void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation)
1037 {
1038 	struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first);
1039 
1040 	while (chunk) {
1041 		struct kho_vmalloc_chunk *tmp = chunk;
1042 
1043 		kho_vmalloc_unpreserve_chunk(chunk, preservation->order);
1044 
1045 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
1046 		free_page((unsigned long)tmp);
1047 	}
1048 }
1049 EXPORT_SYMBOL_GPL(kho_unpreserve_vmalloc);
1050 
1051 /**
1052  * kho_restore_vmalloc - recreates and populates an area in vmalloc address
1053  * space from the preserved memory.
1054  * @preservation: preservation metadata.
1055  *
1056  * Recreates an area in vmalloc address space and populates it with memory that
1057  * was preserved using kho_preserve_vmalloc().
1058  *
1059  * Return: pointer to the area in the vmalloc address space, NULL on failure.
1060  */
1061 void *kho_restore_vmalloc(const struct kho_vmalloc *preservation)
1062 {
1063 	struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first);
1064 	unsigned int align, order, shift, vm_flags;
1065 	unsigned long total_pages, contig_pages;
1066 	unsigned long addr, size;
1067 	struct vm_struct *area;
1068 	struct page **pages;
1069 	unsigned int idx = 0;
1070 	int err;
1071 
1072 	vm_flags = kho_flags_to_vmalloc(preservation->flags);
1073 	if (vm_flags & ~KHO_VMALLOC_SUPPORTED_FLAGS)
1074 		return NULL;
1075 
1076 	total_pages = preservation->total_pages;
1077 	pages = kvmalloc_array(total_pages, sizeof(*pages), GFP_KERNEL);
1078 	if (!pages)
1079 		return NULL;
1080 	order = preservation->order;
1081 	contig_pages = (1 << order);
1082 	shift = PAGE_SHIFT + order;
1083 	align = 1 << shift;
1084 
1085 	while (chunk) {
1086 		struct page *page;
1087 
1088 		for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) {
1089 			phys_addr_t phys = chunk->phys[i];
1090 
1091 			if (idx + contig_pages > total_pages)
1092 				goto err_free_pages_array;
1093 
1094 			page = kho_restore_pages(phys, contig_pages);
1095 			if (!page)
1096 				goto err_free_pages_array;
1097 
1098 			for (int j = 0; j < contig_pages; j++)
1099 				pages[idx++] = page + j;
1100 
1101 			phys += contig_pages * PAGE_SIZE;
1102 		}
1103 
1104 		page = kho_restore_pages(virt_to_phys(chunk), 1);
1105 		if (!page)
1106 			goto err_free_pages_array;
1107 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
1108 		__free_page(page);
1109 	}
1110 
1111 	if (idx != total_pages)
1112 		goto err_free_pages_array;
1113 
1114 	area = __get_vm_area_node(total_pages * PAGE_SIZE, align, shift,
1115 				  vm_flags, VMALLOC_START, VMALLOC_END,
1116 				  NUMA_NO_NODE, GFP_KERNEL,
1117 				  __builtin_return_address(0));
1118 	if (!area)
1119 		goto err_free_pages_array;
1120 
1121 	addr = (unsigned long)area->addr;
1122 	size = get_vm_area_size(area);
1123 	err = vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, shift);
1124 	if (err)
1125 		goto err_free_vm_area;
1126 
1127 	area->nr_pages = total_pages;
1128 	area->pages = pages;
1129 
1130 	return area->addr;
1131 
1132 err_free_vm_area:
1133 	free_vm_area(area);
1134 err_free_pages_array:
1135 	kvfree(pages);
1136 	return NULL;
1137 }
1138 EXPORT_SYMBOL_GPL(kho_restore_vmalloc);
1139 
1140 /**
1141  * kho_alloc_preserve - Allocate, zero, and preserve memory.
1142  * @size: The number of bytes to allocate.
1143  *
1144  * Allocates a physically contiguous block of zeroed pages that is large
1145  * enough to hold @size bytes. The allocated memory is then registered with
1146  * KHO for preservation across a kexec.
1147  *
1148  * Note: The actual allocated size will be rounded up to the nearest
1149  * power-of-two page boundary.
1150  *
1151  * @return A virtual pointer to the allocated and preserved memory on success,
1152  * or an ERR_PTR() encoded error on failure.
1153  */
1154 void *kho_alloc_preserve(size_t size)
1155 {
1156 	struct folio *folio;
1157 	int order, ret;
1158 
1159 	if (!size)
1160 		return ERR_PTR(-EINVAL);
1161 
1162 	order = get_order(size);
1163 	if (order > MAX_PAGE_ORDER)
1164 		return ERR_PTR(-E2BIG);
1165 
1166 	folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, order);
1167 	if (!folio)
1168 		return ERR_PTR(-ENOMEM);
1169 
1170 	ret = kho_preserve_folio(folio);
1171 	if (ret) {
1172 		folio_put(folio);
1173 		return ERR_PTR(ret);
1174 	}
1175 
1176 	return folio_address(folio);
1177 }
1178 EXPORT_SYMBOL_GPL(kho_alloc_preserve);
1179 
1180 /**
1181  * kho_unpreserve_free - Unpreserve and free memory.
1182  * @mem:  Pointer to the memory allocated by kho_alloc_preserve().
1183  *
1184  * Unregisters the memory from KHO preservation and frees the underlying
1185  * pages back to the system. This function should be called to clean up
1186  * memory allocated with kho_alloc_preserve().
1187  */
1188 void kho_unpreserve_free(void *mem)
1189 {
1190 	struct folio *folio;
1191 
1192 	if (!mem)
1193 		return;
1194 
1195 	folio = virt_to_folio(mem);
1196 	kho_unpreserve_folio(folio);
1197 	folio_put(folio);
1198 }
1199 EXPORT_SYMBOL_GPL(kho_unpreserve_free);
1200 
1201 /**
1202  * kho_restore_free - Restore and free memory after kexec.
1203  * @mem:  Pointer to the memory (in the new kernel's address space)
1204  * that was allocated by the old kernel.
1205  *
1206  * This function is intended to be called in the new kernel (post-kexec)
1207  * to take ownership of and free a memory region that was preserved by the
1208  * old kernel using kho_alloc_preserve().
1209  *
1210  * It first restores the pages from KHO (using their physical address)
1211  * and then frees the pages back to the new kernel's page allocator.
1212  */
1213 void kho_restore_free(void *mem)
1214 {
1215 	struct folio *folio;
1216 
1217 	if (!mem)
1218 		return;
1219 
1220 	folio = kho_restore_folio(__pa(mem));
1221 	if (!WARN_ON(!folio))
1222 		folio_put(folio);
1223 }
1224 EXPORT_SYMBOL_GPL(kho_restore_free);
1225 
1226 int kho_finalize(void)
1227 {
1228 	int ret;
1229 
1230 	if (!kho_enable)
1231 		return -EOPNOTSUPP;
1232 
1233 	guard(mutex)(&kho_out.lock);
1234 	ret = kho_mem_serialize(&kho_out);
1235 	if (ret)
1236 		return ret;
1237 
1238 	kho_out.finalized = true;
1239 
1240 	return 0;
1241 }
1242 
1243 bool kho_finalized(void)
1244 {
1245 	guard(mutex)(&kho_out.lock);
1246 	return kho_out.finalized;
1247 }
1248 
1249 struct kho_in {
1250 	phys_addr_t fdt_phys;
1251 	phys_addr_t scratch_phys;
1252 	phys_addr_t mem_map_phys;
1253 	struct kho_debugfs dbg;
1254 };
1255 
1256 static struct kho_in kho_in = {
1257 };
1258 
1259 static const void *kho_get_fdt(void)
1260 {
1261 	return kho_in.fdt_phys ? phys_to_virt(kho_in.fdt_phys) : NULL;
1262 }
1263 
1264 /**
1265  * is_kho_boot - check if current kernel was booted via KHO-enabled
1266  * kexec
1267  *
1268  * This function checks if the current kernel was loaded through a kexec
1269  * operation with KHO enabled, by verifying that a valid KHO FDT
1270  * was passed.
1271  *
1272  * Note: This function returns reliable results only after
1273  * kho_populate() has been called during early boot. Before that,
1274  * it may return false even if KHO data is present.
1275  *
1276  * Return: true if booted via KHO-enabled kexec, false otherwise
1277  */
1278 bool is_kho_boot(void)
1279 {
1280 	return !!kho_get_fdt();
1281 }
1282 EXPORT_SYMBOL_GPL(is_kho_boot);
1283 
1284 /**
1285  * kho_retrieve_subtree - retrieve a preserved sub FDT by its name.
1286  * @name: the name of the sub FDT passed to kho_add_subtree().
1287  * @phys: if found, the physical address of the sub FDT is stored in @phys.
1288  *
1289  * Retrieve a preserved sub FDT named @name and store its physical
1290  * address in @phys.
1291  *
1292  * Return: 0 on success, error code on failure
1293  */
1294 int kho_retrieve_subtree(const char *name, phys_addr_t *phys)
1295 {
1296 	const void *fdt = kho_get_fdt();
1297 	const u64 *val;
1298 	int offset, len;
1299 
1300 	if (!fdt)
1301 		return -ENOENT;
1302 
1303 	if (!phys)
1304 		return -EINVAL;
1305 
1306 	offset = fdt_subnode_offset(fdt, 0, name);
1307 	if (offset < 0)
1308 		return -ENOENT;
1309 
1310 	val = fdt_getprop(fdt, offset, KHO_FDT_SUB_TREE_PROP_NAME, &len);
1311 	if (!val || len != sizeof(*val))
1312 		return -EINVAL;
1313 
1314 	*phys = (phys_addr_t)*val;
1315 
1316 	return 0;
1317 }
1318 EXPORT_SYMBOL_GPL(kho_retrieve_subtree);
1319 
1320 static __init int kho_out_fdt_setup(void)
1321 {
1322 	void *root = kho_out.fdt;
1323 	u64 empty_mem_map = 0;
1324 	int err;
1325 
1326 	err = fdt_create(root, PAGE_SIZE);
1327 	err |= fdt_finish_reservemap(root);
1328 	err |= fdt_begin_node(root, "");
1329 	err |= fdt_property_string(root, "compatible", KHO_FDT_COMPATIBLE);
1330 	err |= fdt_property(root, KHO_FDT_MEMORY_MAP_PROP_NAME, &empty_mem_map,
1331 			    sizeof(empty_mem_map));
1332 	err |= fdt_end_node(root);
1333 	err |= fdt_finish(root);
1334 
1335 	return err;
1336 }
1337 
1338 static __init int kho_init(void)
1339 {
1340 	const void *fdt = kho_get_fdt();
1341 	int err = 0;
1342 
1343 	if (!kho_enable)
1344 		return 0;
1345 
1346 	kho_out.fdt = kho_alloc_preserve(PAGE_SIZE);
1347 	if (IS_ERR(kho_out.fdt)) {
1348 		err = PTR_ERR(kho_out.fdt);
1349 		goto err_free_scratch;
1350 	}
1351 
1352 	err = kho_debugfs_init();
1353 	if (err)
1354 		goto err_free_fdt;
1355 
1356 	err = kho_out_debugfs_init(&kho_out.dbg);
1357 	if (err)
1358 		goto err_free_fdt;
1359 
1360 	err = kho_out_fdt_setup();
1361 	if (err)
1362 		goto err_free_fdt;
1363 
1364 	if (fdt) {
1365 		kho_in_debugfs_init(&kho_in.dbg, fdt);
1366 		return 0;
1367 	}
1368 
1369 	for (int i = 0; i < kho_scratch_cnt; i++) {
1370 		unsigned long base_pfn = PHYS_PFN(kho_scratch[i].addr);
1371 		unsigned long count = kho_scratch[i].size >> PAGE_SHIFT;
1372 		unsigned long pfn;
1373 
1374 		/*
1375 		 * When debug_pagealloc is enabled, __free_pages() clears the
1376 		 * corresponding PRESENT bit in the kernel page table.
1377 		 * Subsequent kmemleak scans of these pages cause the
1378 		 * non-PRESENT page faults.
1379 		 * Mark scratch areas with kmemleak_ignore_phys() to exclude
1380 		 * them from kmemleak scanning.
1381 		 */
1382 		kmemleak_ignore_phys(kho_scratch[i].addr);
1383 		for (pfn = base_pfn; pfn < base_pfn + count;
1384 		     pfn += pageblock_nr_pages)
1385 			init_cma_reserved_pageblock(pfn_to_page(pfn));
1386 	}
1387 
1388 	WARN_ON_ONCE(kho_debugfs_fdt_add(&kho_out.dbg, "fdt",
1389 					 kho_out.fdt, true));
1390 
1391 	return 0;
1392 
1393 err_free_fdt:
1394 	kho_unpreserve_free(kho_out.fdt);
1395 err_free_scratch:
1396 	kho_out.fdt = NULL;
1397 	for (int i = 0; i < kho_scratch_cnt; i++) {
1398 		void *start = __va(kho_scratch[i].addr);
1399 		void *end = start + kho_scratch[i].size;
1400 
1401 		free_reserved_area(start, end, -1, "");
1402 	}
1403 	kho_enable = false;
1404 	return err;
1405 }
1406 fs_initcall(kho_init);
1407 
1408 static void __init kho_release_scratch(void)
1409 {
1410 	phys_addr_t start, end;
1411 	u64 i;
1412 
1413 	memmap_init_kho_scratch_pages();
1414 
1415 	/*
1416 	 * Mark scratch mem as CMA before we return it. That way we
1417 	 * ensure that no kernel allocations happen on it. That means
1418 	 * we can reuse it as scratch memory again later.
1419 	 */
1420 	__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
1421 			     MEMBLOCK_KHO_SCRATCH, &start, &end, NULL) {
1422 		ulong start_pfn = pageblock_start_pfn(PFN_DOWN(start));
1423 		ulong end_pfn = pageblock_align(PFN_UP(end));
1424 		ulong pfn;
1425 
1426 		for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages)
1427 			init_pageblock_migratetype(pfn_to_page(pfn),
1428 						   MIGRATE_CMA, false);
1429 	}
1430 }
1431 
1432 void __init kho_memory_init(void)
1433 {
1434 	if (kho_in.mem_map_phys) {
1435 		kho_scratch = phys_to_virt(kho_in.scratch_phys);
1436 		kho_release_scratch();
1437 		kho_mem_deserialize(phys_to_virt(kho_in.mem_map_phys));
1438 	} else {
1439 		kho_reserve_scratch();
1440 	}
1441 }
1442 
1443 void __init kho_populate(phys_addr_t fdt_phys, u64 fdt_len,
1444 			 phys_addr_t scratch_phys, u64 scratch_len)
1445 {
1446 	struct kho_scratch *scratch = NULL;
1447 	phys_addr_t mem_map_phys;
1448 	void *fdt = NULL;
1449 	int err = 0;
1450 	unsigned int scratch_cnt = scratch_len / sizeof(*kho_scratch);
1451 
1452 	/* Validate the input FDT */
1453 	fdt = early_memremap(fdt_phys, fdt_len);
1454 	if (!fdt) {
1455 		pr_warn("setup: failed to memremap FDT (0x%llx)\n", fdt_phys);
1456 		err = -EFAULT;
1457 		goto out;
1458 	}
1459 	err = fdt_check_header(fdt);
1460 	if (err) {
1461 		pr_warn("setup: handover FDT (0x%llx) is invalid: %d\n",
1462 			fdt_phys, err);
1463 		err = -EINVAL;
1464 		goto out;
1465 	}
1466 	err = fdt_node_check_compatible(fdt, 0, KHO_FDT_COMPATIBLE);
1467 	if (err) {
1468 		pr_warn("setup: handover FDT (0x%llx) is incompatible with '%s': %d\n",
1469 			fdt_phys, KHO_FDT_COMPATIBLE, err);
1470 		err = -EINVAL;
1471 		goto out;
1472 	}
1473 
1474 	mem_map_phys = kho_get_mem_map_phys(fdt);
1475 	if (!mem_map_phys) {
1476 		err = -ENOENT;
1477 		goto out;
1478 	}
1479 
1480 	scratch = early_memremap(scratch_phys, scratch_len);
1481 	if (!scratch) {
1482 		pr_warn("setup: failed to memremap scratch (phys=0x%llx, len=%lld)\n",
1483 			scratch_phys, scratch_len);
1484 		err = -EFAULT;
1485 		goto out;
1486 	}
1487 
1488 	/*
1489 	 * We pass a safe contiguous blocks of memory to use for early boot
1490 	 * purporses from the previous kernel so that we can resize the
1491 	 * memblock array as needed.
1492 	 */
1493 	for (int i = 0; i < scratch_cnt; i++) {
1494 		struct kho_scratch *area = &scratch[i];
1495 		u64 size = area->size;
1496 
1497 		memblock_add(area->addr, size);
1498 		err = memblock_mark_kho_scratch(area->addr, size);
1499 		if (WARN_ON(err)) {
1500 			pr_warn("failed to mark the scratch region 0x%pa+0x%pa: %pe",
1501 				&area->addr, &size, ERR_PTR(err));
1502 			goto out;
1503 		}
1504 		pr_debug("Marked 0x%pa+0x%pa as scratch", &area->addr, &size);
1505 	}
1506 
1507 	memblock_reserve(scratch_phys, scratch_len);
1508 
1509 	/*
1510 	 * Now that we have a viable region of scratch memory, let's tell
1511 	 * the memblocks allocator to only use that for any allocations.
1512 	 * That way we ensure that nothing scribbles over in use data while
1513 	 * we initialize the page tables which we will need to ingest all
1514 	 * memory reservations from the previous kernel.
1515 	 */
1516 	memblock_set_kho_scratch_only();
1517 
1518 	kho_in.fdt_phys = fdt_phys;
1519 	kho_in.scratch_phys = scratch_phys;
1520 	kho_in.mem_map_phys = mem_map_phys;
1521 	kho_scratch_cnt = scratch_cnt;
1522 	pr_info("found kexec handover data.\n");
1523 
1524 out:
1525 	if (fdt)
1526 		early_memunmap(fdt, fdt_len);
1527 	if (scratch)
1528 		early_memunmap(scratch, scratch_len);
1529 	if (err)
1530 		pr_warn("disabling KHO revival: %d\n", err);
1531 }
1532 
1533 /* Helper functions for kexec_file_load */
1534 
1535 int kho_fill_kimage(struct kimage *image)
1536 {
1537 	ssize_t scratch_size;
1538 	int err = 0;
1539 	struct kexec_buf scratch;
1540 
1541 	if (!kho_enable)
1542 		return 0;
1543 
1544 	image->kho.fdt = virt_to_phys(kho_out.fdt);
1545 
1546 	scratch_size = sizeof(*kho_scratch) * kho_scratch_cnt;
1547 	scratch = (struct kexec_buf){
1548 		.image = image,
1549 		.buffer = kho_scratch,
1550 		.bufsz = scratch_size,
1551 		.mem = KEXEC_BUF_MEM_UNKNOWN,
1552 		.memsz = scratch_size,
1553 		.buf_align = SZ_64K, /* Makes it easier to map */
1554 		.buf_max = ULONG_MAX,
1555 		.top_down = true,
1556 	};
1557 	err = kexec_add_buffer(&scratch);
1558 	if (err)
1559 		return err;
1560 	image->kho.scratch = &image->segment[image->nr_segments - 1];
1561 
1562 	return 0;
1563 }
1564 
1565 static int kho_walk_scratch(struct kexec_buf *kbuf,
1566 			    int (*func)(struct resource *, void *))
1567 {
1568 	int ret = 0;
1569 	int i;
1570 
1571 	for (i = 0; i < kho_scratch_cnt; i++) {
1572 		struct resource res = {
1573 			.start = kho_scratch[i].addr,
1574 			.end = kho_scratch[i].addr + kho_scratch[i].size - 1,
1575 		};
1576 
1577 		/* Try to fit the kimage into our KHO scratch region */
1578 		ret = func(&res, kbuf);
1579 		if (ret)
1580 			break;
1581 	}
1582 
1583 	return ret;
1584 }
1585 
1586 int kho_locate_mem_hole(struct kexec_buf *kbuf,
1587 			int (*func)(struct resource *, void *))
1588 {
1589 	int ret;
1590 
1591 	if (!kho_enable || kbuf->image->type == KEXEC_TYPE_CRASH)
1592 		return 1;
1593 
1594 	ret = kho_walk_scratch(kbuf, func);
1595 
1596 	return ret == 1 ? 0 : -EADDRNOTAVAIL;
1597 }
1598