xref: /linux/kernel/liveupdate/kexec_handover.c (revision 509d3f45847627f4c5cdce004c3ec79262b5239c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kexec_handover.c - kexec handover metadata processing
4  * Copyright (C) 2023 Alexander Graf <graf@amazon.com>
5  * Copyright (C) 2025 Microsoft Corporation, Mike Rapoport <rppt@kernel.org>
6  * Copyright (C) 2025 Google LLC, Changyuan Lyu <changyuanl@google.com>
7  * Copyright (C) 2025 Pasha Tatashin <pasha.tatashin@soleen.com>
8  */
9 
10 #define pr_fmt(fmt) "KHO: " fmt
11 
12 #include <linux/cleanup.h>
13 #include <linux/cma.h>
14 #include <linux/kmemleak.h>
15 #include <linux/count_zeros.h>
16 #include <linux/kexec.h>
17 #include <linux/kexec_handover.h>
18 #include <linux/libfdt.h>
19 #include <linux/list.h>
20 #include <linux/memblock.h>
21 #include <linux/page-isolation.h>
22 #include <linux/unaligned.h>
23 #include <linux/vmalloc.h>
24 
25 #include <asm/early_ioremap.h>
26 
27 #include "kexec_handover_internal.h"
28 /*
29  * KHO is tightly coupled with mm init and needs access to some of mm
30  * internal APIs.
31  */
32 #include "../../mm/internal.h"
33 #include "../kexec_internal.h"
34 #include "kexec_handover_internal.h"
35 
36 #define KHO_FDT_COMPATIBLE "kho-v1"
37 #define PROP_PRESERVED_MEMORY_MAP "preserved-memory-map"
38 #define PROP_SUB_FDT "fdt"
39 
40 #define KHO_PAGE_MAGIC 0x4b484f50U /* ASCII for 'KHOP' */
41 
42 /*
43  * KHO uses page->private, which is an unsigned long, to store page metadata.
44  * Use it to store both the magic and the order.
45  */
46 union kho_page_info {
47 	unsigned long page_private;
48 	struct {
49 		unsigned int order;
50 		unsigned int magic;
51 	};
52 };
53 
54 static_assert(sizeof(union kho_page_info) == sizeof(((struct page *)0)->private));
55 
56 static bool kho_enable __ro_after_init = IS_ENABLED(CONFIG_KEXEC_HANDOVER_ENABLE_DEFAULT);
57 
kho_is_enabled(void)58 bool kho_is_enabled(void)
59 {
60 	return kho_enable;
61 }
62 EXPORT_SYMBOL_GPL(kho_is_enabled);
63 
kho_parse_enable(char * p)64 static int __init kho_parse_enable(char *p)
65 {
66 	return kstrtobool(p, &kho_enable);
67 }
68 early_param("kho", kho_parse_enable);
69 
70 /*
71  * Keep track of memory that is to be preserved across KHO.
72  *
73  * The serializing side uses two levels of xarrays to manage chunks of per-order
74  * PAGE_SIZE byte bitmaps. For instance if PAGE_SIZE = 4096, the entire 1G order
75  * of a 8TB system would fit inside a single 4096 byte bitmap. For order 0
76  * allocations each bitmap will cover 128M of address space. Thus, for 16G of
77  * memory at most 512K of bitmap memory will be needed for order 0.
78  *
79  * This approach is fully incremental, as the serialization progresses folios
80  * can continue be aggregated to the tracker. The final step, immediately prior
81  * to kexec would serialize the xarray information into a linked list for the
82  * successor kernel to parse.
83  */
84 
85 #define PRESERVE_BITS (PAGE_SIZE * 8)
86 
87 struct kho_mem_phys_bits {
88 	DECLARE_BITMAP(preserve, PRESERVE_BITS);
89 };
90 
91 static_assert(sizeof(struct kho_mem_phys_bits) == PAGE_SIZE);
92 
93 struct kho_mem_phys {
94 	/*
95 	 * Points to kho_mem_phys_bits, a sparse bitmap array. Each bit is sized
96 	 * to order.
97 	 */
98 	struct xarray phys_bits;
99 };
100 
101 struct kho_mem_track {
102 	/* Points to kho_mem_phys, each order gets its own bitmap tree */
103 	struct xarray orders;
104 };
105 
106 struct khoser_mem_chunk;
107 
108 struct kho_out {
109 	void *fdt;
110 	bool finalized;
111 	struct mutex lock; /* protects KHO FDT finalization */
112 
113 	struct kho_mem_track track;
114 	struct kho_debugfs dbg;
115 };
116 
117 static struct kho_out kho_out = {
118 	.lock = __MUTEX_INITIALIZER(kho_out.lock),
119 	.track = {
120 		.orders = XARRAY_INIT(kho_out.track.orders, 0),
121 	},
122 	.finalized = false,
123 };
124 
xa_load_or_alloc(struct xarray * xa,unsigned long index)125 static void *xa_load_or_alloc(struct xarray *xa, unsigned long index)
126 {
127 	void *res = xa_load(xa, index);
128 
129 	if (res)
130 		return res;
131 
132 	void *elm __free(free_page) = (void *)get_zeroed_page(GFP_KERNEL);
133 
134 	if (!elm)
135 		return ERR_PTR(-ENOMEM);
136 
137 	if (WARN_ON(kho_scratch_overlap(virt_to_phys(elm), PAGE_SIZE)))
138 		return ERR_PTR(-EINVAL);
139 
140 	res = xa_cmpxchg(xa, index, NULL, elm, GFP_KERNEL);
141 	if (xa_is_err(res))
142 		return ERR_PTR(xa_err(res));
143 	else if (res)
144 		return res;
145 
146 	return no_free_ptr(elm);
147 }
148 
__kho_unpreserve_order(struct kho_mem_track * track,unsigned long pfn,unsigned int order)149 static void __kho_unpreserve_order(struct kho_mem_track *track, unsigned long pfn,
150 				   unsigned int order)
151 {
152 	struct kho_mem_phys_bits *bits;
153 	struct kho_mem_phys *physxa;
154 	const unsigned long pfn_high = pfn >> order;
155 
156 	physxa = xa_load(&track->orders, order);
157 	if (WARN_ON_ONCE(!physxa))
158 		return;
159 
160 	bits = xa_load(&physxa->phys_bits, pfn_high / PRESERVE_BITS);
161 	if (WARN_ON_ONCE(!bits))
162 		return;
163 
164 	clear_bit(pfn_high % PRESERVE_BITS, bits->preserve);
165 }
166 
__kho_unpreserve(struct kho_mem_track * track,unsigned long pfn,unsigned long end_pfn)167 static void __kho_unpreserve(struct kho_mem_track *track, unsigned long pfn,
168 			     unsigned long end_pfn)
169 {
170 	unsigned int order;
171 
172 	while (pfn < end_pfn) {
173 		order = min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
174 
175 		__kho_unpreserve_order(track, pfn, order);
176 
177 		pfn += 1 << order;
178 	}
179 }
180 
__kho_preserve_order(struct kho_mem_track * track,unsigned long pfn,unsigned int order)181 static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
182 				unsigned int order)
183 {
184 	struct kho_mem_phys_bits *bits;
185 	struct kho_mem_phys *physxa, *new_physxa;
186 	const unsigned long pfn_high = pfn >> order;
187 
188 	might_sleep();
189 	physxa = xa_load(&track->orders, order);
190 	if (!physxa) {
191 		int err;
192 
193 		new_physxa = kzalloc(sizeof(*physxa), GFP_KERNEL);
194 		if (!new_physxa)
195 			return -ENOMEM;
196 
197 		xa_init(&new_physxa->phys_bits);
198 		physxa = xa_cmpxchg(&track->orders, order, NULL, new_physxa,
199 				    GFP_KERNEL);
200 
201 		err = xa_err(physxa);
202 		if (err || physxa) {
203 			xa_destroy(&new_physxa->phys_bits);
204 			kfree(new_physxa);
205 
206 			if (err)
207 				return err;
208 		} else {
209 			physxa = new_physxa;
210 		}
211 	}
212 
213 	bits = xa_load_or_alloc(&physxa->phys_bits, pfn_high / PRESERVE_BITS);
214 	if (IS_ERR(bits))
215 		return PTR_ERR(bits);
216 
217 	set_bit(pfn_high % PRESERVE_BITS, bits->preserve);
218 
219 	return 0;
220 }
221 
kho_restore_page(phys_addr_t phys,bool is_folio)222 static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
223 {
224 	struct page *page = pfn_to_online_page(PHYS_PFN(phys));
225 	unsigned int nr_pages, ref_cnt;
226 	union kho_page_info info;
227 
228 	if (!page)
229 		return NULL;
230 
231 	info.page_private = page->private;
232 	/*
233 	 * deserialize_bitmap() only sets the magic on the head page. This magic
234 	 * check also implicitly makes sure phys is order-aligned since for
235 	 * non-order-aligned phys addresses, magic will never be set.
236 	 */
237 	if (WARN_ON_ONCE(info.magic != KHO_PAGE_MAGIC || info.order > MAX_PAGE_ORDER))
238 		return NULL;
239 	nr_pages = (1 << info.order);
240 
241 	/* Clear private to make sure later restores on this page error out. */
242 	page->private = 0;
243 	/* Head page gets refcount of 1. */
244 	set_page_count(page, 1);
245 
246 	/*
247 	 * For higher order folios, tail pages get a page count of zero.
248 	 * For physically contiguous order-0 pages every pages gets a page
249 	 * count of 1
250 	 */
251 	ref_cnt = is_folio ? 0 : 1;
252 	for (unsigned int i = 1; i < nr_pages; i++)
253 		set_page_count(page + i, ref_cnt);
254 
255 	if (is_folio && info.order)
256 		prep_compound_page(page, info.order);
257 
258 	adjust_managed_page_count(page, nr_pages);
259 	return page;
260 }
261 
262 /**
263  * kho_restore_folio - recreates the folio from the preserved memory.
264  * @phys: physical address of the folio.
265  *
266  * Return: pointer to the struct folio on success, NULL on failure.
267  */
kho_restore_folio(phys_addr_t phys)268 struct folio *kho_restore_folio(phys_addr_t phys)
269 {
270 	struct page *page = kho_restore_page(phys, true);
271 
272 	return page ? page_folio(page) : NULL;
273 }
274 EXPORT_SYMBOL_GPL(kho_restore_folio);
275 
276 /**
277  * kho_restore_pages - restore list of contiguous order 0 pages.
278  * @phys: physical address of the first page.
279  * @nr_pages: number of pages.
280  *
281  * Restore a contiguous list of order 0 pages that was preserved with
282  * kho_preserve_pages().
283  *
284  * Return: 0 on success, error code on failure
285  */
kho_restore_pages(phys_addr_t phys,unsigned int nr_pages)286 struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages)
287 {
288 	const unsigned long start_pfn = PHYS_PFN(phys);
289 	const unsigned long end_pfn = start_pfn + nr_pages;
290 	unsigned long pfn = start_pfn;
291 
292 	while (pfn < end_pfn) {
293 		const unsigned int order =
294 			min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
295 		struct page *page = kho_restore_page(PFN_PHYS(pfn), false);
296 
297 		if (!page)
298 			return NULL;
299 		pfn += 1 << order;
300 	}
301 
302 	return pfn_to_page(start_pfn);
303 }
304 EXPORT_SYMBOL_GPL(kho_restore_pages);
305 
306 /* Serialize and deserialize struct kho_mem_phys across kexec
307  *
308  * Record all the bitmaps in a linked list of pages for the next kernel to
309  * process. Each chunk holds bitmaps of the same order and each block of bitmaps
310  * starts at a given physical address. This allows the bitmaps to be sparse. The
311  * xarray is used to store them in a tree while building up the data structure,
312  * but the KHO successor kernel only needs to process them once in order.
313  *
314  * All of this memory is normal kmalloc() memory and is not marked for
315  * preservation. The successor kernel will remain isolated to the scratch space
316  * until it completes processing this list. Once processed all the memory
317  * storing these ranges will be marked as free.
318  */
319 
320 struct khoser_mem_bitmap_ptr {
321 	phys_addr_t phys_start;
322 	DECLARE_KHOSER_PTR(bitmap, struct kho_mem_phys_bits *);
323 };
324 
325 struct khoser_mem_chunk_hdr {
326 	DECLARE_KHOSER_PTR(next, struct khoser_mem_chunk *);
327 	unsigned int order;
328 	unsigned int num_elms;
329 };
330 
331 #define KHOSER_BITMAP_SIZE                                   \
332 	((PAGE_SIZE - sizeof(struct khoser_mem_chunk_hdr)) / \
333 	 sizeof(struct khoser_mem_bitmap_ptr))
334 
335 struct khoser_mem_chunk {
336 	struct khoser_mem_chunk_hdr hdr;
337 	struct khoser_mem_bitmap_ptr bitmaps[KHOSER_BITMAP_SIZE];
338 };
339 
340 static_assert(sizeof(struct khoser_mem_chunk) == PAGE_SIZE);
341 
new_chunk(struct khoser_mem_chunk * cur_chunk,unsigned long order)342 static struct khoser_mem_chunk *new_chunk(struct khoser_mem_chunk *cur_chunk,
343 					  unsigned long order)
344 {
345 	struct khoser_mem_chunk *chunk __free(free_page) = NULL;
346 
347 	chunk = (void *)get_zeroed_page(GFP_KERNEL);
348 	if (!chunk)
349 		return ERR_PTR(-ENOMEM);
350 
351 	if (WARN_ON(kho_scratch_overlap(virt_to_phys(chunk), PAGE_SIZE)))
352 		return ERR_PTR(-EINVAL);
353 
354 	chunk->hdr.order = order;
355 	if (cur_chunk)
356 		KHOSER_STORE_PTR(cur_chunk->hdr.next, chunk);
357 	return no_free_ptr(chunk);
358 }
359 
kho_mem_ser_free(struct khoser_mem_chunk * first_chunk)360 static void kho_mem_ser_free(struct khoser_mem_chunk *first_chunk)
361 {
362 	struct khoser_mem_chunk *chunk = first_chunk;
363 
364 	while (chunk) {
365 		struct khoser_mem_chunk *tmp = chunk;
366 
367 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
368 		free_page((unsigned long)tmp);
369 	}
370 }
371 
372 /*
373  *  Update memory map property, if old one is found discard it via
374  *  kho_mem_ser_free().
375  */
kho_update_memory_map(struct khoser_mem_chunk * first_chunk)376 static void kho_update_memory_map(struct khoser_mem_chunk *first_chunk)
377 {
378 	void *ptr;
379 	u64 phys;
380 
381 	ptr = fdt_getprop_w(kho_out.fdt, 0, PROP_PRESERVED_MEMORY_MAP, NULL);
382 
383 	/* Check and discard previous memory map */
384 	phys = get_unaligned((u64 *)ptr);
385 	if (phys)
386 		kho_mem_ser_free((struct khoser_mem_chunk *)phys_to_virt(phys));
387 
388 	/* Update with the new value */
389 	phys = first_chunk ? (u64)virt_to_phys(first_chunk) : 0;
390 	put_unaligned(phys, (u64 *)ptr);
391 }
392 
kho_mem_serialize(struct kho_out * kho_out)393 static int kho_mem_serialize(struct kho_out *kho_out)
394 {
395 	struct khoser_mem_chunk *first_chunk = NULL;
396 	struct khoser_mem_chunk *chunk = NULL;
397 	struct kho_mem_phys *physxa;
398 	unsigned long order;
399 	int err = -ENOMEM;
400 
401 	xa_for_each(&kho_out->track.orders, order, physxa) {
402 		struct kho_mem_phys_bits *bits;
403 		unsigned long phys;
404 
405 		chunk = new_chunk(chunk, order);
406 		if (IS_ERR(chunk)) {
407 			err = PTR_ERR(chunk);
408 			goto err_free;
409 		}
410 
411 		if (!first_chunk)
412 			first_chunk = chunk;
413 
414 		xa_for_each(&physxa->phys_bits, phys, bits) {
415 			struct khoser_mem_bitmap_ptr *elm;
416 
417 			if (chunk->hdr.num_elms == ARRAY_SIZE(chunk->bitmaps)) {
418 				chunk = new_chunk(chunk, order);
419 				if (IS_ERR(chunk)) {
420 					err = PTR_ERR(chunk);
421 					goto err_free;
422 				}
423 			}
424 
425 			elm = &chunk->bitmaps[chunk->hdr.num_elms];
426 			chunk->hdr.num_elms++;
427 			elm->phys_start = (phys * PRESERVE_BITS)
428 					  << (order + PAGE_SHIFT);
429 			KHOSER_STORE_PTR(elm->bitmap, bits);
430 		}
431 	}
432 
433 	kho_update_memory_map(first_chunk);
434 
435 	return 0;
436 
437 err_free:
438 	kho_mem_ser_free(first_chunk);
439 	return err;
440 }
441 
deserialize_bitmap(unsigned int order,struct khoser_mem_bitmap_ptr * elm)442 static void __init deserialize_bitmap(unsigned int order,
443 				      struct khoser_mem_bitmap_ptr *elm)
444 {
445 	struct kho_mem_phys_bits *bitmap = KHOSER_LOAD_PTR(elm->bitmap);
446 	unsigned long bit;
447 
448 	for_each_set_bit(bit, bitmap->preserve, PRESERVE_BITS) {
449 		int sz = 1 << (order + PAGE_SHIFT);
450 		phys_addr_t phys =
451 			elm->phys_start + (bit << (order + PAGE_SHIFT));
452 		struct page *page = phys_to_page(phys);
453 		union kho_page_info info;
454 
455 		memblock_reserve(phys, sz);
456 		memblock_reserved_mark_noinit(phys, sz);
457 		info.magic = KHO_PAGE_MAGIC;
458 		info.order = order;
459 		page->private = info.page_private;
460 	}
461 }
462 
463 /* Return true if memory was deserizlied */
kho_mem_deserialize(const void * fdt)464 static bool __init kho_mem_deserialize(const void *fdt)
465 {
466 	struct khoser_mem_chunk *chunk;
467 	const void *mem_ptr;
468 	u64 mem;
469 	int len;
470 
471 	mem_ptr = fdt_getprop(fdt, 0, PROP_PRESERVED_MEMORY_MAP, &len);
472 	if (!mem_ptr || len != sizeof(u64)) {
473 		pr_err("failed to get preserved memory bitmaps\n");
474 		return false;
475 	}
476 
477 	mem = get_unaligned((const u64 *)mem_ptr);
478 	chunk = mem ? phys_to_virt(mem) : NULL;
479 
480 	/* No preserved physical pages were passed, no deserialization */
481 	if (!chunk)
482 		return false;
483 
484 	while (chunk) {
485 		unsigned int i;
486 
487 		for (i = 0; i != chunk->hdr.num_elms; i++)
488 			deserialize_bitmap(chunk->hdr.order,
489 					   &chunk->bitmaps[i]);
490 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
491 	}
492 
493 	return true;
494 }
495 
496 /*
497  * With KHO enabled, memory can become fragmented because KHO regions may
498  * be anywhere in physical address space. The scratch regions give us a
499  * safe zones that we will never see KHO allocations from. This is where we
500  * can later safely load our new kexec images into and then use the scratch
501  * area for early allocations that happen before page allocator is
502  * initialized.
503  */
504 struct kho_scratch *kho_scratch;
505 unsigned int kho_scratch_cnt;
506 
507 /*
508  * The scratch areas are scaled by default as percent of memory allocated from
509  * memblock. A user can override the scale with command line parameter:
510  *
511  * kho_scratch=N%
512  *
513  * It is also possible to explicitly define size for a lowmem, a global and
514  * per-node scratch areas:
515  *
516  * kho_scratch=l[KMG],n[KMG],m[KMG]
517  *
518  * The explicit size definition takes precedence over scale definition.
519  */
520 static unsigned int scratch_scale __initdata = 200;
521 static phys_addr_t scratch_size_global __initdata;
522 static phys_addr_t scratch_size_pernode __initdata;
523 static phys_addr_t scratch_size_lowmem __initdata;
524 
kho_parse_scratch_size(char * p)525 static int __init kho_parse_scratch_size(char *p)
526 {
527 	size_t len;
528 	unsigned long sizes[3];
529 	size_t total_size = 0;
530 	int i;
531 
532 	if (!p)
533 		return -EINVAL;
534 
535 	len = strlen(p);
536 	if (!len)
537 		return -EINVAL;
538 
539 	/* parse nn% */
540 	if (p[len - 1] == '%') {
541 		/* unsigned int max is 4,294,967,295, 10 chars */
542 		char s_scale[11] = {};
543 		int ret = 0;
544 
545 		if (len > ARRAY_SIZE(s_scale))
546 			return -EINVAL;
547 
548 		memcpy(s_scale, p, len - 1);
549 		ret = kstrtouint(s_scale, 10, &scratch_scale);
550 		if (!ret)
551 			pr_notice("scratch scale is %d%%\n", scratch_scale);
552 		return ret;
553 	}
554 
555 	/* parse ll[KMG],mm[KMG],nn[KMG] */
556 	for (i = 0; i < ARRAY_SIZE(sizes); i++) {
557 		char *endp = p;
558 
559 		if (i > 0) {
560 			if (*p != ',')
561 				return -EINVAL;
562 			p += 1;
563 		}
564 
565 		sizes[i] = memparse(p, &endp);
566 		if (endp == p)
567 			return -EINVAL;
568 		p = endp;
569 		total_size += sizes[i];
570 	}
571 
572 	if (!total_size)
573 		return -EINVAL;
574 
575 	/* The string should be fully consumed by now. */
576 	if (*p)
577 		return -EINVAL;
578 
579 	scratch_size_lowmem = sizes[0];
580 	scratch_size_global = sizes[1];
581 	scratch_size_pernode = sizes[2];
582 	scratch_scale = 0;
583 
584 	pr_notice("scratch areas: lowmem: %lluMiB global: %lluMiB pernode: %lldMiB\n",
585 		  (u64)(scratch_size_lowmem >> 20),
586 		  (u64)(scratch_size_global >> 20),
587 		  (u64)(scratch_size_pernode >> 20));
588 
589 	return 0;
590 }
591 early_param("kho_scratch", kho_parse_scratch_size);
592 
scratch_size_update(void)593 static void __init scratch_size_update(void)
594 {
595 	phys_addr_t size;
596 
597 	if (!scratch_scale)
598 		return;
599 
600 	size = memblock_reserved_kern_size(ARCH_LOW_ADDRESS_LIMIT,
601 					   NUMA_NO_NODE);
602 	size = size * scratch_scale / 100;
603 	scratch_size_lowmem = round_up(size, CMA_MIN_ALIGNMENT_BYTES);
604 
605 	size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE,
606 					   NUMA_NO_NODE);
607 	size = size * scratch_scale / 100 - scratch_size_lowmem;
608 	scratch_size_global = round_up(size, CMA_MIN_ALIGNMENT_BYTES);
609 }
610 
scratch_size_node(int nid)611 static phys_addr_t __init scratch_size_node(int nid)
612 {
613 	phys_addr_t size;
614 
615 	if (scratch_scale) {
616 		size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE,
617 						   nid);
618 		size = size * scratch_scale / 100;
619 	} else {
620 		size = scratch_size_pernode;
621 	}
622 
623 	return round_up(size, CMA_MIN_ALIGNMENT_BYTES);
624 }
625 
626 /**
627  * kho_reserve_scratch - Reserve a contiguous chunk of memory for kexec
628  *
629  * With KHO we can preserve arbitrary pages in the system. To ensure we still
630  * have a large contiguous region of memory when we search the physical address
631  * space for target memory, let's make sure we always have a large CMA region
632  * active. This CMA region will only be used for movable pages which are not a
633  * problem for us during KHO because we can just move them somewhere else.
634  */
kho_reserve_scratch(void)635 static void __init kho_reserve_scratch(void)
636 {
637 	phys_addr_t addr, size;
638 	int nid, i = 0;
639 
640 	if (!kho_enable)
641 		return;
642 
643 	scratch_size_update();
644 
645 	/* FIXME: deal with node hot-plug/remove */
646 	kho_scratch_cnt = num_online_nodes() + 2;
647 	size = kho_scratch_cnt * sizeof(*kho_scratch);
648 	kho_scratch = memblock_alloc(size, PAGE_SIZE);
649 	if (!kho_scratch)
650 		goto err_disable_kho;
651 
652 	/*
653 	 * reserve scratch area in low memory for lowmem allocations in the
654 	 * next kernel
655 	 */
656 	size = scratch_size_lowmem;
657 	addr = memblock_phys_alloc_range(size, CMA_MIN_ALIGNMENT_BYTES, 0,
658 					 ARCH_LOW_ADDRESS_LIMIT);
659 	if (!addr)
660 		goto err_free_scratch_desc;
661 
662 	kho_scratch[i].addr = addr;
663 	kho_scratch[i].size = size;
664 	i++;
665 
666 	/* reserve large contiguous area for allocations without nid */
667 	size = scratch_size_global;
668 	addr = memblock_phys_alloc(size, CMA_MIN_ALIGNMENT_BYTES);
669 	if (!addr)
670 		goto err_free_scratch_areas;
671 
672 	kho_scratch[i].addr = addr;
673 	kho_scratch[i].size = size;
674 	i++;
675 
676 	for_each_online_node(nid) {
677 		size = scratch_size_node(nid);
678 		addr = memblock_alloc_range_nid(size, CMA_MIN_ALIGNMENT_BYTES,
679 						0, MEMBLOCK_ALLOC_ACCESSIBLE,
680 						nid, true);
681 		if (!addr)
682 			goto err_free_scratch_areas;
683 
684 		kho_scratch[i].addr = addr;
685 		kho_scratch[i].size = size;
686 		i++;
687 	}
688 
689 	return;
690 
691 err_free_scratch_areas:
692 	for (i--; i >= 0; i--)
693 		memblock_phys_free(kho_scratch[i].addr, kho_scratch[i].size);
694 err_free_scratch_desc:
695 	memblock_free(kho_scratch, kho_scratch_cnt * sizeof(*kho_scratch));
696 err_disable_kho:
697 	pr_warn("Failed to reserve scratch area, disabling kexec handover\n");
698 	kho_enable = false;
699 }
700 
701 /**
702  * kho_add_subtree - record the physical address of a sub FDT in KHO root tree.
703  * @name: name of the sub tree.
704  * @fdt: the sub tree blob.
705  *
706  * Creates a new child node named @name in KHO root FDT and records
707  * the physical address of @fdt. The pages of @fdt must also be preserved
708  * by KHO for the new kernel to retrieve it after kexec.
709  *
710  * A debugfs blob entry is also created at
711  * ``/sys/kernel/debug/kho/out/sub_fdts/@name`` when kernel is configured with
712  * CONFIG_KEXEC_HANDOVER_DEBUGFS
713  *
714  * Return: 0 on success, error code on failure
715  */
kho_add_subtree(const char * name,void * fdt)716 int kho_add_subtree(const char *name, void *fdt)
717 {
718 	phys_addr_t phys = virt_to_phys(fdt);
719 	void *root_fdt = kho_out.fdt;
720 	int err = -ENOMEM;
721 	int off, fdt_err;
722 
723 	guard(mutex)(&kho_out.lock);
724 
725 	fdt_err = fdt_open_into(root_fdt, root_fdt, PAGE_SIZE);
726 	if (fdt_err < 0)
727 		return err;
728 
729 	off = fdt_add_subnode(root_fdt, 0, name);
730 	if (off < 0) {
731 		if (off == -FDT_ERR_EXISTS)
732 			err = -EEXIST;
733 		goto out_pack;
734 	}
735 
736 	err = fdt_setprop(root_fdt, off, PROP_SUB_FDT, &phys, sizeof(phys));
737 	if (err < 0)
738 		goto out_pack;
739 
740 	WARN_ON_ONCE(kho_debugfs_fdt_add(&kho_out.dbg, name, fdt, false));
741 
742 out_pack:
743 	fdt_pack(root_fdt);
744 
745 	return err;
746 }
747 EXPORT_SYMBOL_GPL(kho_add_subtree);
748 
kho_remove_subtree(void * fdt)749 void kho_remove_subtree(void *fdt)
750 {
751 	phys_addr_t target_phys = virt_to_phys(fdt);
752 	void *root_fdt = kho_out.fdt;
753 	int off;
754 	int err;
755 
756 	guard(mutex)(&kho_out.lock);
757 
758 	err = fdt_open_into(root_fdt, root_fdt, PAGE_SIZE);
759 	if (err < 0)
760 		return;
761 
762 	for (off = fdt_first_subnode(root_fdt, 0); off >= 0;
763 	     off = fdt_next_subnode(root_fdt, off)) {
764 		const u64 *val;
765 		int len;
766 
767 		val = fdt_getprop(root_fdt, off, PROP_SUB_FDT, &len);
768 		if (!val || len != sizeof(phys_addr_t))
769 			continue;
770 
771 		if ((phys_addr_t)*val == target_phys) {
772 			fdt_del_node(root_fdt, off);
773 			kho_debugfs_fdt_remove(&kho_out.dbg, fdt);
774 			break;
775 		}
776 	}
777 
778 	fdt_pack(root_fdt);
779 }
780 EXPORT_SYMBOL_GPL(kho_remove_subtree);
781 
782 /**
783  * kho_preserve_folio - preserve a folio across kexec.
784  * @folio: folio to preserve.
785  *
786  * Instructs KHO to preserve the whole folio across kexec. The order
787  * will be preserved as well.
788  *
789  * Return: 0 on success, error code on failure
790  */
kho_preserve_folio(struct folio * folio)791 int kho_preserve_folio(struct folio *folio)
792 {
793 	const unsigned long pfn = folio_pfn(folio);
794 	const unsigned int order = folio_order(folio);
795 	struct kho_mem_track *track = &kho_out.track;
796 
797 	if (WARN_ON(kho_scratch_overlap(pfn << PAGE_SHIFT, PAGE_SIZE << order)))
798 		return -EINVAL;
799 
800 	return __kho_preserve_order(track, pfn, order);
801 }
802 EXPORT_SYMBOL_GPL(kho_preserve_folio);
803 
804 /**
805  * kho_unpreserve_folio - unpreserve a folio.
806  * @folio: folio to unpreserve.
807  *
808  * Instructs KHO to unpreserve a folio that was preserved by
809  * kho_preserve_folio() before. The provided @folio (pfn and order)
810  * must exactly match a previously preserved folio.
811  */
kho_unpreserve_folio(struct folio * folio)812 void kho_unpreserve_folio(struct folio *folio)
813 {
814 	const unsigned long pfn = folio_pfn(folio);
815 	const unsigned int order = folio_order(folio);
816 	struct kho_mem_track *track = &kho_out.track;
817 
818 	__kho_unpreserve_order(track, pfn, order);
819 }
820 EXPORT_SYMBOL_GPL(kho_unpreserve_folio);
821 
822 /**
823  * kho_preserve_pages - preserve contiguous pages across kexec
824  * @page: first page in the list.
825  * @nr_pages: number of pages.
826  *
827  * Preserve a contiguous list of order 0 pages. Must be restored using
828  * kho_restore_pages() to ensure the pages are restored properly as order 0.
829  *
830  * Return: 0 on success, error code on failure
831  */
kho_preserve_pages(struct page * page,unsigned int nr_pages)832 int kho_preserve_pages(struct page *page, unsigned int nr_pages)
833 {
834 	struct kho_mem_track *track = &kho_out.track;
835 	const unsigned long start_pfn = page_to_pfn(page);
836 	const unsigned long end_pfn = start_pfn + nr_pages;
837 	unsigned long pfn = start_pfn;
838 	unsigned long failed_pfn = 0;
839 	int err = 0;
840 
841 	if (WARN_ON(kho_scratch_overlap(start_pfn << PAGE_SHIFT,
842 					nr_pages << PAGE_SHIFT))) {
843 		return -EINVAL;
844 	}
845 
846 	while (pfn < end_pfn) {
847 		const unsigned int order =
848 			min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
849 
850 		err = __kho_preserve_order(track, pfn, order);
851 		if (err) {
852 			failed_pfn = pfn;
853 			break;
854 		}
855 
856 		pfn += 1 << order;
857 	}
858 
859 	if (err)
860 		__kho_unpreserve(track, start_pfn, failed_pfn);
861 
862 	return err;
863 }
864 EXPORT_SYMBOL_GPL(kho_preserve_pages);
865 
866 /**
867  * kho_unpreserve_pages - unpreserve contiguous pages.
868  * @page: first page in the list.
869  * @nr_pages: number of pages.
870  *
871  * Instructs KHO to unpreserve @nr_pages contiguous pages starting from @page.
872  * This must be called with the same @page and @nr_pages as the corresponding
873  * kho_preserve_pages() call. Unpreserving arbitrary sub-ranges of larger
874  * preserved blocks is not supported.
875  */
kho_unpreserve_pages(struct page * page,unsigned int nr_pages)876 void kho_unpreserve_pages(struct page *page, unsigned int nr_pages)
877 {
878 	struct kho_mem_track *track = &kho_out.track;
879 	const unsigned long start_pfn = page_to_pfn(page);
880 	const unsigned long end_pfn = start_pfn + nr_pages;
881 
882 	__kho_unpreserve(track, start_pfn, end_pfn);
883 }
884 EXPORT_SYMBOL_GPL(kho_unpreserve_pages);
885 
886 struct kho_vmalloc_hdr {
887 	DECLARE_KHOSER_PTR(next, struct kho_vmalloc_chunk *);
888 };
889 
890 #define KHO_VMALLOC_SIZE				\
891 	((PAGE_SIZE - sizeof(struct kho_vmalloc_hdr)) / \
892 	 sizeof(phys_addr_t))
893 
894 struct kho_vmalloc_chunk {
895 	struct kho_vmalloc_hdr hdr;
896 	phys_addr_t phys[KHO_VMALLOC_SIZE];
897 };
898 
899 static_assert(sizeof(struct kho_vmalloc_chunk) == PAGE_SIZE);
900 
901 /* vmalloc flags KHO supports */
902 #define KHO_VMALLOC_SUPPORTED_FLAGS	(VM_ALLOC | VM_ALLOW_HUGE_VMAP)
903 
904 /* KHO internal flags for vmalloc preservations */
905 #define KHO_VMALLOC_ALLOC	0x0001
906 #define KHO_VMALLOC_HUGE_VMAP	0x0002
907 
vmalloc_flags_to_kho(unsigned int vm_flags)908 static unsigned short vmalloc_flags_to_kho(unsigned int vm_flags)
909 {
910 	unsigned short kho_flags = 0;
911 
912 	if (vm_flags & VM_ALLOC)
913 		kho_flags |= KHO_VMALLOC_ALLOC;
914 	if (vm_flags & VM_ALLOW_HUGE_VMAP)
915 		kho_flags |= KHO_VMALLOC_HUGE_VMAP;
916 
917 	return kho_flags;
918 }
919 
kho_flags_to_vmalloc(unsigned short kho_flags)920 static unsigned int kho_flags_to_vmalloc(unsigned short kho_flags)
921 {
922 	unsigned int vm_flags = 0;
923 
924 	if (kho_flags & KHO_VMALLOC_ALLOC)
925 		vm_flags |= VM_ALLOC;
926 	if (kho_flags & KHO_VMALLOC_HUGE_VMAP)
927 		vm_flags |= VM_ALLOW_HUGE_VMAP;
928 
929 	return vm_flags;
930 }
931 
new_vmalloc_chunk(struct kho_vmalloc_chunk * cur)932 static struct kho_vmalloc_chunk *new_vmalloc_chunk(struct kho_vmalloc_chunk *cur)
933 {
934 	struct kho_vmalloc_chunk *chunk;
935 	int err;
936 
937 	chunk = (struct kho_vmalloc_chunk *)get_zeroed_page(GFP_KERNEL);
938 	if (!chunk)
939 		return NULL;
940 
941 	err = kho_preserve_pages(virt_to_page(chunk), 1);
942 	if (err)
943 		goto err_free;
944 	if (cur)
945 		KHOSER_STORE_PTR(cur->hdr.next, chunk);
946 	return chunk;
947 
948 err_free:
949 	free_page((unsigned long)chunk);
950 	return NULL;
951 }
952 
kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk * chunk,unsigned short order)953 static void kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk *chunk,
954 					 unsigned short order)
955 {
956 	struct kho_mem_track *track = &kho_out.track;
957 	unsigned long pfn = PHYS_PFN(virt_to_phys(chunk));
958 
959 	__kho_unpreserve(track, pfn, pfn + 1);
960 
961 	for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) {
962 		pfn = PHYS_PFN(chunk->phys[i]);
963 		__kho_unpreserve(track, pfn, pfn + (1 << order));
964 	}
965 }
966 
967 /**
968  * kho_preserve_vmalloc - preserve memory allocated with vmalloc() across kexec
969  * @ptr: pointer to the area in vmalloc address space
970  * @preservation: placeholder for preservation metadata
971  *
972  * Instructs KHO to preserve the area in vmalloc address space at @ptr. The
973  * physical pages mapped at @ptr will be preserved and on successful return
974  * @preservation will hold the physical address of a structure that describes
975  * the preservation.
976  *
977  * NOTE: The memory allocated with vmalloc_node() variants cannot be reliably
978  * restored on the same node
979  *
980  * Return: 0 on success, error code on failure
981  */
kho_preserve_vmalloc(void * ptr,struct kho_vmalloc * preservation)982 int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation)
983 {
984 	struct kho_vmalloc_chunk *chunk;
985 	struct vm_struct *vm = find_vm_area(ptr);
986 	unsigned int order, flags, nr_contig_pages;
987 	unsigned int idx = 0;
988 	int err;
989 
990 	if (!vm)
991 		return -EINVAL;
992 
993 	if (vm->flags & ~KHO_VMALLOC_SUPPORTED_FLAGS)
994 		return -EOPNOTSUPP;
995 
996 	flags = vmalloc_flags_to_kho(vm->flags);
997 	order = get_vm_area_page_order(vm);
998 
999 	chunk = new_vmalloc_chunk(NULL);
1000 	if (!chunk)
1001 		return -ENOMEM;
1002 	KHOSER_STORE_PTR(preservation->first, chunk);
1003 
1004 	nr_contig_pages = (1 << order);
1005 	for (int i = 0; i < vm->nr_pages; i += nr_contig_pages) {
1006 		phys_addr_t phys = page_to_phys(vm->pages[i]);
1007 
1008 		err = kho_preserve_pages(vm->pages[i], nr_contig_pages);
1009 		if (err)
1010 			goto err_free;
1011 
1012 		chunk->phys[idx++] = phys;
1013 		if (idx == ARRAY_SIZE(chunk->phys)) {
1014 			chunk = new_vmalloc_chunk(chunk);
1015 			if (!chunk)
1016 				goto err_free;
1017 			idx = 0;
1018 		}
1019 	}
1020 
1021 	preservation->total_pages = vm->nr_pages;
1022 	preservation->flags = flags;
1023 	preservation->order = order;
1024 
1025 	return 0;
1026 
1027 err_free:
1028 	kho_unpreserve_vmalloc(preservation);
1029 	return err;
1030 }
1031 EXPORT_SYMBOL_GPL(kho_preserve_vmalloc);
1032 
1033 /**
1034  * kho_unpreserve_vmalloc - unpreserve memory allocated with vmalloc()
1035  * @preservation: preservation metadata returned by kho_preserve_vmalloc()
1036  *
1037  * Instructs KHO to unpreserve the area in vmalloc address space that was
1038  * previously preserved with kho_preserve_vmalloc().
1039  */
kho_unpreserve_vmalloc(struct kho_vmalloc * preservation)1040 void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation)
1041 {
1042 	struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first);
1043 
1044 	while (chunk) {
1045 		struct kho_vmalloc_chunk *tmp = chunk;
1046 
1047 		kho_vmalloc_unpreserve_chunk(chunk, preservation->order);
1048 
1049 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
1050 		free_page((unsigned long)tmp);
1051 	}
1052 }
1053 EXPORT_SYMBOL_GPL(kho_unpreserve_vmalloc);
1054 
1055 /**
1056  * kho_restore_vmalloc - recreates and populates an area in vmalloc address
1057  * space from the preserved memory.
1058  * @preservation: preservation metadata.
1059  *
1060  * Recreates an area in vmalloc address space and populates it with memory that
1061  * was preserved using kho_preserve_vmalloc().
1062  *
1063  * Return: pointer to the area in the vmalloc address space, NULL on failure.
1064  */
kho_restore_vmalloc(const struct kho_vmalloc * preservation)1065 void *kho_restore_vmalloc(const struct kho_vmalloc *preservation)
1066 {
1067 	struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first);
1068 	unsigned int align, order, shift, vm_flags;
1069 	unsigned long total_pages, contig_pages;
1070 	unsigned long addr, size;
1071 	struct vm_struct *area;
1072 	struct page **pages;
1073 	unsigned int idx = 0;
1074 	int err;
1075 
1076 	vm_flags = kho_flags_to_vmalloc(preservation->flags);
1077 	if (vm_flags & ~KHO_VMALLOC_SUPPORTED_FLAGS)
1078 		return NULL;
1079 
1080 	total_pages = preservation->total_pages;
1081 	pages = kvmalloc_array(total_pages, sizeof(*pages), GFP_KERNEL);
1082 	if (!pages)
1083 		return NULL;
1084 	order = preservation->order;
1085 	contig_pages = (1 << order);
1086 	shift = PAGE_SHIFT + order;
1087 	align = 1 << shift;
1088 
1089 	while (chunk) {
1090 		struct page *page;
1091 
1092 		for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) {
1093 			phys_addr_t phys = chunk->phys[i];
1094 
1095 			if (idx + contig_pages > total_pages)
1096 				goto err_free_pages_array;
1097 
1098 			page = kho_restore_pages(phys, contig_pages);
1099 			if (!page)
1100 				goto err_free_pages_array;
1101 
1102 			for (int j = 0; j < contig_pages; j++)
1103 				pages[idx++] = page + j;
1104 
1105 			phys += contig_pages * PAGE_SIZE;
1106 		}
1107 
1108 		page = kho_restore_pages(virt_to_phys(chunk), 1);
1109 		if (!page)
1110 			goto err_free_pages_array;
1111 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
1112 		__free_page(page);
1113 	}
1114 
1115 	if (idx != total_pages)
1116 		goto err_free_pages_array;
1117 
1118 	area = __get_vm_area_node(total_pages * PAGE_SIZE, align, shift,
1119 				  vm_flags, VMALLOC_START, VMALLOC_END,
1120 				  NUMA_NO_NODE, GFP_KERNEL,
1121 				  __builtin_return_address(0));
1122 	if (!area)
1123 		goto err_free_pages_array;
1124 
1125 	addr = (unsigned long)area->addr;
1126 	size = get_vm_area_size(area);
1127 	err = vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, shift);
1128 	if (err)
1129 		goto err_free_vm_area;
1130 
1131 	area->nr_pages = total_pages;
1132 	area->pages = pages;
1133 
1134 	return area->addr;
1135 
1136 err_free_vm_area:
1137 	free_vm_area(area);
1138 err_free_pages_array:
1139 	kvfree(pages);
1140 	return NULL;
1141 }
1142 EXPORT_SYMBOL_GPL(kho_restore_vmalloc);
1143 
1144 /**
1145  * kho_alloc_preserve - Allocate, zero, and preserve memory.
1146  * @size: The number of bytes to allocate.
1147  *
1148  * Allocates a physically contiguous block of zeroed pages that is large
1149  * enough to hold @size bytes. The allocated memory is then registered with
1150  * KHO for preservation across a kexec.
1151  *
1152  * Note: The actual allocated size will be rounded up to the nearest
1153  * power-of-two page boundary.
1154  *
1155  * @return A virtual pointer to the allocated and preserved memory on success,
1156  * or an ERR_PTR() encoded error on failure.
1157  */
kho_alloc_preserve(size_t size)1158 void *kho_alloc_preserve(size_t size)
1159 {
1160 	struct folio *folio;
1161 	int order, ret;
1162 
1163 	if (!size)
1164 		return ERR_PTR(-EINVAL);
1165 
1166 	order = get_order(size);
1167 	if (order > MAX_PAGE_ORDER)
1168 		return ERR_PTR(-E2BIG);
1169 
1170 	folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, order);
1171 	if (!folio)
1172 		return ERR_PTR(-ENOMEM);
1173 
1174 	ret = kho_preserve_folio(folio);
1175 	if (ret) {
1176 		folio_put(folio);
1177 		return ERR_PTR(ret);
1178 	}
1179 
1180 	return folio_address(folio);
1181 }
1182 EXPORT_SYMBOL_GPL(kho_alloc_preserve);
1183 
1184 /**
1185  * kho_unpreserve_free - Unpreserve and free memory.
1186  * @mem:  Pointer to the memory allocated by kho_alloc_preserve().
1187  *
1188  * Unregisters the memory from KHO preservation and frees the underlying
1189  * pages back to the system. This function should be called to clean up
1190  * memory allocated with kho_alloc_preserve().
1191  */
kho_unpreserve_free(void * mem)1192 void kho_unpreserve_free(void *mem)
1193 {
1194 	struct folio *folio;
1195 
1196 	if (!mem)
1197 		return;
1198 
1199 	folio = virt_to_folio(mem);
1200 	kho_unpreserve_folio(folio);
1201 	folio_put(folio);
1202 }
1203 EXPORT_SYMBOL_GPL(kho_unpreserve_free);
1204 
1205 /**
1206  * kho_restore_free - Restore and free memory after kexec.
1207  * @mem:  Pointer to the memory (in the new kernel's address space)
1208  * that was allocated by the old kernel.
1209  *
1210  * This function is intended to be called in the new kernel (post-kexec)
1211  * to take ownership of and free a memory region that was preserved by the
1212  * old kernel using kho_alloc_preserve().
1213  *
1214  * It first restores the pages from KHO (using their physical address)
1215  * and then frees the pages back to the new kernel's page allocator.
1216  */
kho_restore_free(void * mem)1217 void kho_restore_free(void *mem)
1218 {
1219 	struct folio *folio;
1220 
1221 	if (!mem)
1222 		return;
1223 
1224 	folio = kho_restore_folio(__pa(mem));
1225 	if (!WARN_ON(!folio))
1226 		folio_put(folio);
1227 }
1228 EXPORT_SYMBOL_GPL(kho_restore_free);
1229 
kho_finalize(void)1230 int kho_finalize(void)
1231 {
1232 	int ret;
1233 
1234 	if (!kho_enable)
1235 		return -EOPNOTSUPP;
1236 
1237 	guard(mutex)(&kho_out.lock);
1238 	ret = kho_mem_serialize(&kho_out);
1239 	if (ret)
1240 		return ret;
1241 
1242 	kho_out.finalized = true;
1243 
1244 	return 0;
1245 }
1246 
kho_finalized(void)1247 bool kho_finalized(void)
1248 {
1249 	guard(mutex)(&kho_out.lock);
1250 	return kho_out.finalized;
1251 }
1252 
1253 struct kho_in {
1254 	phys_addr_t fdt_phys;
1255 	phys_addr_t scratch_phys;
1256 	struct kho_debugfs dbg;
1257 };
1258 
1259 static struct kho_in kho_in = {
1260 };
1261 
kho_get_fdt(void)1262 static const void *kho_get_fdt(void)
1263 {
1264 	return kho_in.fdt_phys ? phys_to_virt(kho_in.fdt_phys) : NULL;
1265 }
1266 
1267 /**
1268  * is_kho_boot - check if current kernel was booted via KHO-enabled
1269  * kexec
1270  *
1271  * This function checks if the current kernel was loaded through a kexec
1272  * operation with KHO enabled, by verifying that a valid KHO FDT
1273  * was passed.
1274  *
1275  * Note: This function returns reliable results only after
1276  * kho_populate() has been called during early boot. Before that,
1277  * it may return false even if KHO data is present.
1278  *
1279  * Return: true if booted via KHO-enabled kexec, false otherwise
1280  */
is_kho_boot(void)1281 bool is_kho_boot(void)
1282 {
1283 	return !!kho_get_fdt();
1284 }
1285 EXPORT_SYMBOL_GPL(is_kho_boot);
1286 
1287 /**
1288  * kho_retrieve_subtree - retrieve a preserved sub FDT by its name.
1289  * @name: the name of the sub FDT passed to kho_add_subtree().
1290  * @phys: if found, the physical address of the sub FDT is stored in @phys.
1291  *
1292  * Retrieve a preserved sub FDT named @name and store its physical
1293  * address in @phys.
1294  *
1295  * Return: 0 on success, error code on failure
1296  */
kho_retrieve_subtree(const char * name,phys_addr_t * phys)1297 int kho_retrieve_subtree(const char *name, phys_addr_t *phys)
1298 {
1299 	const void *fdt = kho_get_fdt();
1300 	const u64 *val;
1301 	int offset, len;
1302 
1303 	if (!fdt)
1304 		return -ENOENT;
1305 
1306 	if (!phys)
1307 		return -EINVAL;
1308 
1309 	offset = fdt_subnode_offset(fdt, 0, name);
1310 	if (offset < 0)
1311 		return -ENOENT;
1312 
1313 	val = fdt_getprop(fdt, offset, PROP_SUB_FDT, &len);
1314 	if (!val || len != sizeof(*val))
1315 		return -EINVAL;
1316 
1317 	*phys = (phys_addr_t)*val;
1318 
1319 	return 0;
1320 }
1321 EXPORT_SYMBOL_GPL(kho_retrieve_subtree);
1322 
kho_out_fdt_setup(void)1323 static __init int kho_out_fdt_setup(void)
1324 {
1325 	void *root = kho_out.fdt;
1326 	u64 empty_mem_map = 0;
1327 	int err;
1328 
1329 	err = fdt_create(root, PAGE_SIZE);
1330 	err |= fdt_finish_reservemap(root);
1331 	err |= fdt_begin_node(root, "");
1332 	err |= fdt_property_string(root, "compatible", KHO_FDT_COMPATIBLE);
1333 	err |= fdt_property(root, PROP_PRESERVED_MEMORY_MAP, &empty_mem_map,
1334 			    sizeof(empty_mem_map));
1335 	err |= fdt_end_node(root);
1336 	err |= fdt_finish(root);
1337 
1338 	return err;
1339 }
1340 
kho_init(void)1341 static __init int kho_init(void)
1342 {
1343 	const void *fdt = kho_get_fdt();
1344 	int err = 0;
1345 
1346 	if (!kho_enable)
1347 		return 0;
1348 
1349 	kho_out.fdt = kho_alloc_preserve(PAGE_SIZE);
1350 	if (IS_ERR(kho_out.fdt)) {
1351 		err = PTR_ERR(kho_out.fdt);
1352 		goto err_free_scratch;
1353 	}
1354 
1355 	err = kho_debugfs_init();
1356 	if (err)
1357 		goto err_free_fdt;
1358 
1359 	err = kho_out_debugfs_init(&kho_out.dbg);
1360 	if (err)
1361 		goto err_free_fdt;
1362 
1363 	err = kho_out_fdt_setup();
1364 	if (err)
1365 		goto err_free_fdt;
1366 
1367 	if (fdt) {
1368 		kho_in_debugfs_init(&kho_in.dbg, fdt);
1369 		return 0;
1370 	}
1371 
1372 	for (int i = 0; i < kho_scratch_cnt; i++) {
1373 		unsigned long base_pfn = PHYS_PFN(kho_scratch[i].addr);
1374 		unsigned long count = kho_scratch[i].size >> PAGE_SHIFT;
1375 		unsigned long pfn;
1376 
1377 		/*
1378 		 * When debug_pagealloc is enabled, __free_pages() clears the
1379 		 * corresponding PRESENT bit in the kernel page table.
1380 		 * Subsequent kmemleak scans of these pages cause the
1381 		 * non-PRESENT page faults.
1382 		 * Mark scratch areas with kmemleak_ignore_phys() to exclude
1383 		 * them from kmemleak scanning.
1384 		 */
1385 		kmemleak_ignore_phys(kho_scratch[i].addr);
1386 		for (pfn = base_pfn; pfn < base_pfn + count;
1387 		     pfn += pageblock_nr_pages)
1388 			init_cma_reserved_pageblock(pfn_to_page(pfn));
1389 	}
1390 
1391 	WARN_ON_ONCE(kho_debugfs_fdt_add(&kho_out.dbg, "fdt",
1392 					 kho_out.fdt, true));
1393 
1394 	return 0;
1395 
1396 err_free_fdt:
1397 	kho_unpreserve_free(kho_out.fdt);
1398 err_free_scratch:
1399 	kho_out.fdt = NULL;
1400 	for (int i = 0; i < kho_scratch_cnt; i++) {
1401 		void *start = __va(kho_scratch[i].addr);
1402 		void *end = start + kho_scratch[i].size;
1403 
1404 		free_reserved_area(start, end, -1, "");
1405 	}
1406 	kho_enable = false;
1407 	return err;
1408 }
1409 fs_initcall(kho_init);
1410 
kho_release_scratch(void)1411 static void __init kho_release_scratch(void)
1412 {
1413 	phys_addr_t start, end;
1414 	u64 i;
1415 
1416 	memmap_init_kho_scratch_pages();
1417 
1418 	/*
1419 	 * Mark scratch mem as CMA before we return it. That way we
1420 	 * ensure that no kernel allocations happen on it. That means
1421 	 * we can reuse it as scratch memory again later.
1422 	 */
1423 	__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
1424 			     MEMBLOCK_KHO_SCRATCH, &start, &end, NULL) {
1425 		ulong start_pfn = pageblock_start_pfn(PFN_DOWN(start));
1426 		ulong end_pfn = pageblock_align(PFN_UP(end));
1427 		ulong pfn;
1428 
1429 		for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages)
1430 			init_pageblock_migratetype(pfn_to_page(pfn),
1431 						   MIGRATE_CMA, false);
1432 	}
1433 }
1434 
kho_memory_init(void)1435 void __init kho_memory_init(void)
1436 {
1437 	if (kho_in.scratch_phys) {
1438 		kho_scratch = phys_to_virt(kho_in.scratch_phys);
1439 		kho_release_scratch();
1440 
1441 		if (!kho_mem_deserialize(kho_get_fdt()))
1442 			kho_in.fdt_phys = 0;
1443 	} else {
1444 		kho_reserve_scratch();
1445 	}
1446 }
1447 
kho_populate(phys_addr_t fdt_phys,u64 fdt_len,phys_addr_t scratch_phys,u64 scratch_len)1448 void __init kho_populate(phys_addr_t fdt_phys, u64 fdt_len,
1449 			 phys_addr_t scratch_phys, u64 scratch_len)
1450 {
1451 	void *fdt = NULL;
1452 	struct kho_scratch *scratch = NULL;
1453 	int err = 0;
1454 	unsigned int scratch_cnt = scratch_len / sizeof(*kho_scratch);
1455 
1456 	/* Validate the input FDT */
1457 	fdt = early_memremap(fdt_phys, fdt_len);
1458 	if (!fdt) {
1459 		pr_warn("setup: failed to memremap FDT (0x%llx)\n", fdt_phys);
1460 		err = -EFAULT;
1461 		goto out;
1462 	}
1463 	err = fdt_check_header(fdt);
1464 	if (err) {
1465 		pr_warn("setup: handover FDT (0x%llx) is invalid: %d\n",
1466 			fdt_phys, err);
1467 		err = -EINVAL;
1468 		goto out;
1469 	}
1470 	err = fdt_node_check_compatible(fdt, 0, KHO_FDT_COMPATIBLE);
1471 	if (err) {
1472 		pr_warn("setup: handover FDT (0x%llx) is incompatible with '%s': %d\n",
1473 			fdt_phys, KHO_FDT_COMPATIBLE, err);
1474 		err = -EINVAL;
1475 		goto out;
1476 	}
1477 
1478 	scratch = early_memremap(scratch_phys, scratch_len);
1479 	if (!scratch) {
1480 		pr_warn("setup: failed to memremap scratch (phys=0x%llx, len=%lld)\n",
1481 			scratch_phys, scratch_len);
1482 		err = -EFAULT;
1483 		goto out;
1484 	}
1485 
1486 	/*
1487 	 * We pass a safe contiguous blocks of memory to use for early boot
1488 	 * purporses from the previous kernel so that we can resize the
1489 	 * memblock array as needed.
1490 	 */
1491 	for (int i = 0; i < scratch_cnt; i++) {
1492 		struct kho_scratch *area = &scratch[i];
1493 		u64 size = area->size;
1494 
1495 		memblock_add(area->addr, size);
1496 		err = memblock_mark_kho_scratch(area->addr, size);
1497 		if (WARN_ON(err)) {
1498 			pr_warn("failed to mark the scratch region 0x%pa+0x%pa: %pe",
1499 				&area->addr, &size, ERR_PTR(err));
1500 			goto out;
1501 		}
1502 		pr_debug("Marked 0x%pa+0x%pa as scratch", &area->addr, &size);
1503 	}
1504 
1505 	memblock_reserve(scratch_phys, scratch_len);
1506 
1507 	/*
1508 	 * Now that we have a viable region of scratch memory, let's tell
1509 	 * the memblocks allocator to only use that for any allocations.
1510 	 * That way we ensure that nothing scribbles over in use data while
1511 	 * we initialize the page tables which we will need to ingest all
1512 	 * memory reservations from the previous kernel.
1513 	 */
1514 	memblock_set_kho_scratch_only();
1515 
1516 	kho_in.fdt_phys = fdt_phys;
1517 	kho_in.scratch_phys = scratch_phys;
1518 	kho_scratch_cnt = scratch_cnt;
1519 	pr_info("found kexec handover data.\n");
1520 
1521 out:
1522 	if (fdt)
1523 		early_memunmap(fdt, fdt_len);
1524 	if (scratch)
1525 		early_memunmap(scratch, scratch_len);
1526 	if (err)
1527 		pr_warn("disabling KHO revival: %d\n", err);
1528 }
1529 
1530 /* Helper functions for kexec_file_load */
1531 
kho_fill_kimage(struct kimage * image)1532 int kho_fill_kimage(struct kimage *image)
1533 {
1534 	ssize_t scratch_size;
1535 	int err = 0;
1536 	struct kexec_buf scratch;
1537 
1538 	if (!kho_enable)
1539 		return 0;
1540 
1541 	image->kho.fdt = virt_to_phys(kho_out.fdt);
1542 
1543 	scratch_size = sizeof(*kho_scratch) * kho_scratch_cnt;
1544 	scratch = (struct kexec_buf){
1545 		.image = image,
1546 		.buffer = kho_scratch,
1547 		.bufsz = scratch_size,
1548 		.mem = KEXEC_BUF_MEM_UNKNOWN,
1549 		.memsz = scratch_size,
1550 		.buf_align = SZ_64K, /* Makes it easier to map */
1551 		.buf_max = ULONG_MAX,
1552 		.top_down = true,
1553 	};
1554 	err = kexec_add_buffer(&scratch);
1555 	if (err)
1556 		return err;
1557 	image->kho.scratch = &image->segment[image->nr_segments - 1];
1558 
1559 	return 0;
1560 }
1561 
kho_walk_scratch(struct kexec_buf * kbuf,int (* func)(struct resource *,void *))1562 static int kho_walk_scratch(struct kexec_buf *kbuf,
1563 			    int (*func)(struct resource *, void *))
1564 {
1565 	int ret = 0;
1566 	int i;
1567 
1568 	for (i = 0; i < kho_scratch_cnt; i++) {
1569 		struct resource res = {
1570 			.start = kho_scratch[i].addr,
1571 			.end = kho_scratch[i].addr + kho_scratch[i].size - 1,
1572 		};
1573 
1574 		/* Try to fit the kimage into our KHO scratch region */
1575 		ret = func(&res, kbuf);
1576 		if (ret)
1577 			break;
1578 	}
1579 
1580 	return ret;
1581 }
1582 
kho_locate_mem_hole(struct kexec_buf * kbuf,int (* func)(struct resource *,void *))1583 int kho_locate_mem_hole(struct kexec_buf *kbuf,
1584 			int (*func)(struct resource *, void *))
1585 {
1586 	int ret;
1587 
1588 	if (!kho_enable || kbuf->image->type == KEXEC_TYPE_CRASH)
1589 		return 1;
1590 
1591 	ret = kho_walk_scratch(kbuf, func);
1592 
1593 	return ret == 1 ? 0 : -EADDRNOTAVAIL;
1594 }
1595