xref: /linux/kernel/kexec_handover.c (revision 537d196186e0a0ce28e494ca1881885accc35a12)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kexec_handover.c - kexec handover metadata processing
4  * Copyright (C) 2023 Alexander Graf <graf@amazon.com>
5  * Copyright (C) 2025 Microsoft Corporation, Mike Rapoport <rppt@kernel.org>
6  * Copyright (C) 2025 Google LLC, Changyuan Lyu <changyuanl@google.com>
7  */
8 
9 #define pr_fmt(fmt) "KHO: " fmt
10 
11 #include <linux/cleanup.h>
12 #include <linux/cma.h>
13 #include <linux/count_zeros.h>
14 #include <linux/debugfs.h>
15 #include <linux/kexec.h>
16 #include <linux/kexec_handover.h>
17 #include <linux/libfdt.h>
18 #include <linux/list.h>
19 #include <linux/memblock.h>
20 #include <linux/notifier.h>
21 #include <linux/page-isolation.h>
22 #include <linux/vmalloc.h>
23 
24 #include <asm/early_ioremap.h>
25 
26 #include "kexec_handover_internal.h"
27 /*
28  * KHO is tightly coupled with mm init and needs access to some of mm
29  * internal APIs.
30  */
31 #include "../mm/internal.h"
32 #include "kexec_internal.h"
33 
34 #define KHO_FDT_COMPATIBLE "kho-v1"
35 #define PROP_PRESERVED_MEMORY_MAP "preserved-memory-map"
36 #define PROP_SUB_FDT "fdt"
37 
38 #define KHO_PAGE_MAGIC 0x4b484f50U /* ASCII for 'KHOP' */
39 
40 /*
41  * KHO uses page->private, which is an unsigned long, to store page metadata.
42  * Use it to store both the magic and the order.
43  */
44 union kho_page_info {
45 	unsigned long page_private;
46 	struct {
47 		unsigned int order;
48 		unsigned int magic;
49 	};
50 };
51 
52 static_assert(sizeof(union kho_page_info) == sizeof(((struct page *)0)->private));
53 
54 static bool kho_enable __ro_after_init;
55 
kho_is_enabled(void)56 bool kho_is_enabled(void)
57 {
58 	return kho_enable;
59 }
60 EXPORT_SYMBOL_GPL(kho_is_enabled);
61 
kho_parse_enable(char * p)62 static int __init kho_parse_enable(char *p)
63 {
64 	return kstrtobool(p, &kho_enable);
65 }
66 early_param("kho", kho_parse_enable);
67 
68 /*
69  * Keep track of memory that is to be preserved across KHO.
70  *
71  * The serializing side uses two levels of xarrays to manage chunks of per-order
72  * PAGE_SIZE byte bitmaps. For instance if PAGE_SIZE = 4096, the entire 1G order
73  * of a 8TB system would fit inside a single 4096 byte bitmap. For order 0
74  * allocations each bitmap will cover 128M of address space. Thus, for 16G of
75  * memory at most 512K of bitmap memory will be needed for order 0.
76  *
77  * This approach is fully incremental, as the serialization progresses folios
78  * can continue be aggregated to the tracker. The final step, immediately prior
79  * to kexec would serialize the xarray information into a linked list for the
80  * successor kernel to parse.
81  */
82 
83 #define PRESERVE_BITS (PAGE_SIZE * 8)
84 
85 struct kho_mem_phys_bits {
86 	DECLARE_BITMAP(preserve, PRESERVE_BITS);
87 };
88 
89 static_assert(sizeof(struct kho_mem_phys_bits) == PAGE_SIZE);
90 
91 struct kho_mem_phys {
92 	/*
93 	 * Points to kho_mem_phys_bits, a sparse bitmap array. Each bit is sized
94 	 * to order.
95 	 */
96 	struct xarray phys_bits;
97 };
98 
99 struct kho_mem_track {
100 	/* Points to kho_mem_phys, each order gets its own bitmap tree */
101 	struct xarray orders;
102 };
103 
104 struct khoser_mem_chunk;
105 
106 struct kho_serialization {
107 	struct page *fdt;
108 	struct list_head fdt_list;
109 	struct dentry *sub_fdt_dir;
110 	struct kho_mem_track track;
111 	/* First chunk of serialized preserved memory map */
112 	struct khoser_mem_chunk *preserved_mem_map;
113 };
114 
115 struct kho_out {
116 	struct blocking_notifier_head chain_head;
117 
118 	struct dentry *dir;
119 
120 	struct mutex lock; /* protects KHO FDT finalization */
121 
122 	struct kho_serialization ser;
123 	bool finalized;
124 };
125 
126 static struct kho_out kho_out = {
127 	.chain_head = BLOCKING_NOTIFIER_INIT(kho_out.chain_head),
128 	.lock = __MUTEX_INITIALIZER(kho_out.lock),
129 	.ser = {
130 		.fdt_list = LIST_HEAD_INIT(kho_out.ser.fdt_list),
131 		.track = {
132 			.orders = XARRAY_INIT(kho_out.ser.track.orders, 0),
133 		},
134 	},
135 	.finalized = false,
136 };
137 
xa_load_or_alloc(struct xarray * xa,unsigned long index)138 static void *xa_load_or_alloc(struct xarray *xa, unsigned long index)
139 {
140 	void *res = xa_load(xa, index);
141 
142 	if (res)
143 		return res;
144 
145 	void *elm __free(free_page) = (void *)get_zeroed_page(GFP_KERNEL);
146 
147 	if (!elm)
148 		return ERR_PTR(-ENOMEM);
149 
150 	if (WARN_ON(kho_scratch_overlap(virt_to_phys(elm), PAGE_SIZE)))
151 		return ERR_PTR(-EINVAL);
152 
153 	res = xa_cmpxchg(xa, index, NULL, elm, GFP_KERNEL);
154 	if (xa_is_err(res))
155 		return ERR_PTR(xa_err(res));
156 	else if (res)
157 		return res;
158 
159 	return no_free_ptr(elm);
160 }
161 
__kho_unpreserve(struct kho_mem_track * track,unsigned long pfn,unsigned long end_pfn)162 static void __kho_unpreserve(struct kho_mem_track *track, unsigned long pfn,
163 			     unsigned long end_pfn)
164 {
165 	struct kho_mem_phys_bits *bits;
166 	struct kho_mem_phys *physxa;
167 
168 	while (pfn < end_pfn) {
169 		const unsigned int order =
170 			min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
171 		const unsigned long pfn_high = pfn >> order;
172 
173 		physxa = xa_load(&track->orders, order);
174 		if (WARN_ON_ONCE(!physxa))
175 			return;
176 
177 		bits = xa_load(&physxa->phys_bits, pfn_high / PRESERVE_BITS);
178 		if (WARN_ON_ONCE(!bits))
179 			return;
180 
181 		clear_bit(pfn_high % PRESERVE_BITS, bits->preserve);
182 
183 		pfn += 1 << order;
184 	}
185 }
186 
__kho_preserve_order(struct kho_mem_track * track,unsigned long pfn,unsigned int order)187 static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
188 				unsigned int order)
189 {
190 	struct kho_mem_phys_bits *bits;
191 	struct kho_mem_phys *physxa, *new_physxa;
192 	const unsigned long pfn_high = pfn >> order;
193 
194 	might_sleep();
195 
196 	if (kho_out.finalized)
197 		return -EBUSY;
198 
199 	physxa = xa_load(&track->orders, order);
200 	if (!physxa) {
201 		int err;
202 
203 		new_physxa = kzalloc(sizeof(*physxa), GFP_KERNEL);
204 		if (!new_physxa)
205 			return -ENOMEM;
206 
207 		xa_init(&new_physxa->phys_bits);
208 		physxa = xa_cmpxchg(&track->orders, order, NULL, new_physxa,
209 				    GFP_KERNEL);
210 
211 		err = xa_err(physxa);
212 		if (err || physxa) {
213 			xa_destroy(&new_physxa->phys_bits);
214 			kfree(new_physxa);
215 
216 			if (err)
217 				return err;
218 		} else {
219 			physxa = new_physxa;
220 		}
221 	}
222 
223 	bits = xa_load_or_alloc(&physxa->phys_bits, pfn_high / PRESERVE_BITS);
224 	if (IS_ERR(bits))
225 		return PTR_ERR(bits);
226 
227 	set_bit(pfn_high % PRESERVE_BITS, bits->preserve);
228 
229 	return 0;
230 }
231 
kho_restore_page(phys_addr_t phys)232 static struct page *kho_restore_page(phys_addr_t phys)
233 {
234 	struct page *page = pfn_to_online_page(PHYS_PFN(phys));
235 	union kho_page_info info;
236 	unsigned int nr_pages;
237 
238 	if (!page)
239 		return NULL;
240 
241 	info.page_private = page->private;
242 	/*
243 	 * deserialize_bitmap() only sets the magic on the head page. This magic
244 	 * check also implicitly makes sure phys is order-aligned since for
245 	 * non-order-aligned phys addresses, magic will never be set.
246 	 */
247 	if (WARN_ON_ONCE(info.magic != KHO_PAGE_MAGIC || info.order > MAX_PAGE_ORDER))
248 		return NULL;
249 	nr_pages = (1 << info.order);
250 
251 	/* Clear private to make sure later restores on this page error out. */
252 	page->private = 0;
253 	/* Head page gets refcount of 1. */
254 	set_page_count(page, 1);
255 
256 	/* For higher order folios, tail pages get a page count of zero. */
257 	for (unsigned int i = 1; i < nr_pages; i++)
258 		set_page_count(page + i, 0);
259 
260 	if (info.order > 0)
261 		prep_compound_page(page, info.order);
262 
263 	adjust_managed_page_count(page, nr_pages);
264 	return page;
265 }
266 
267 /**
268  * kho_restore_folio - recreates the folio from the preserved memory.
269  * @phys: physical address of the folio.
270  *
271  * Return: pointer to the struct folio on success, NULL on failure.
272  */
kho_restore_folio(phys_addr_t phys)273 struct folio *kho_restore_folio(phys_addr_t phys)
274 {
275 	struct page *page = kho_restore_page(phys);
276 
277 	return page ? page_folio(page) : NULL;
278 }
279 EXPORT_SYMBOL_GPL(kho_restore_folio);
280 
281 /**
282  * kho_restore_pages - restore list of contiguous order 0 pages.
283  * @phys: physical address of the first page.
284  * @nr_pages: number of pages.
285  *
286  * Restore a contiguous list of order 0 pages that was preserved with
287  * kho_preserve_pages().
288  *
289  * Return: 0 on success, error code on failure
290  */
kho_restore_pages(phys_addr_t phys,unsigned int nr_pages)291 struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages)
292 {
293 	const unsigned long start_pfn = PHYS_PFN(phys);
294 	const unsigned long end_pfn = start_pfn + nr_pages;
295 	unsigned long pfn = start_pfn;
296 
297 	while (pfn < end_pfn) {
298 		const unsigned int order =
299 			min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
300 		struct page *page = kho_restore_page(PFN_PHYS(pfn));
301 
302 		if (!page)
303 			return NULL;
304 		split_page(page, order);
305 		pfn += 1 << order;
306 	}
307 
308 	return pfn_to_page(start_pfn);
309 }
310 EXPORT_SYMBOL_GPL(kho_restore_pages);
311 
312 /* Serialize and deserialize struct kho_mem_phys across kexec
313  *
314  * Record all the bitmaps in a linked list of pages for the next kernel to
315  * process. Each chunk holds bitmaps of the same order and each block of bitmaps
316  * starts at a given physical address. This allows the bitmaps to be sparse. The
317  * xarray is used to store them in a tree while building up the data structure,
318  * but the KHO successor kernel only needs to process them once in order.
319  *
320  * All of this memory is normal kmalloc() memory and is not marked for
321  * preservation. The successor kernel will remain isolated to the scratch space
322  * until it completes processing this list. Once processed all the memory
323  * storing these ranges will be marked as free.
324  */
325 
326 struct khoser_mem_bitmap_ptr {
327 	phys_addr_t phys_start;
328 	DECLARE_KHOSER_PTR(bitmap, struct kho_mem_phys_bits *);
329 };
330 
331 struct khoser_mem_chunk_hdr {
332 	DECLARE_KHOSER_PTR(next, struct khoser_mem_chunk *);
333 	unsigned int order;
334 	unsigned int num_elms;
335 };
336 
337 #define KHOSER_BITMAP_SIZE                                   \
338 	((PAGE_SIZE - sizeof(struct khoser_mem_chunk_hdr)) / \
339 	 sizeof(struct khoser_mem_bitmap_ptr))
340 
341 struct khoser_mem_chunk {
342 	struct khoser_mem_chunk_hdr hdr;
343 	struct khoser_mem_bitmap_ptr bitmaps[KHOSER_BITMAP_SIZE];
344 };
345 
346 static_assert(sizeof(struct khoser_mem_chunk) == PAGE_SIZE);
347 
new_chunk(struct khoser_mem_chunk * cur_chunk,unsigned long order)348 static struct khoser_mem_chunk *new_chunk(struct khoser_mem_chunk *cur_chunk,
349 					  unsigned long order)
350 {
351 	struct khoser_mem_chunk *chunk __free(free_page) = NULL;
352 
353 	chunk = (void *)get_zeroed_page(GFP_KERNEL);
354 	if (!chunk)
355 		return ERR_PTR(-ENOMEM);
356 
357 	if (WARN_ON(kho_scratch_overlap(virt_to_phys(chunk), PAGE_SIZE)))
358 		return ERR_PTR(-EINVAL);
359 
360 	chunk->hdr.order = order;
361 	if (cur_chunk)
362 		KHOSER_STORE_PTR(cur_chunk->hdr.next, chunk);
363 	return no_free_ptr(chunk);
364 }
365 
kho_mem_ser_free(struct khoser_mem_chunk * first_chunk)366 static void kho_mem_ser_free(struct khoser_mem_chunk *first_chunk)
367 {
368 	struct khoser_mem_chunk *chunk = first_chunk;
369 
370 	while (chunk) {
371 		struct khoser_mem_chunk *tmp = chunk;
372 
373 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
374 		kfree(tmp);
375 	}
376 }
377 
kho_mem_serialize(struct kho_serialization * ser)378 static int kho_mem_serialize(struct kho_serialization *ser)
379 {
380 	struct khoser_mem_chunk *first_chunk = NULL;
381 	struct khoser_mem_chunk *chunk = NULL;
382 	struct kho_mem_phys *physxa;
383 	unsigned long order;
384 	int err = -ENOMEM;
385 
386 	xa_for_each(&ser->track.orders, order, physxa) {
387 		struct kho_mem_phys_bits *bits;
388 		unsigned long phys;
389 
390 		chunk = new_chunk(chunk, order);
391 		if (IS_ERR(chunk)) {
392 			err = PTR_ERR(chunk);
393 			goto err_free;
394 		}
395 
396 		if (!first_chunk)
397 			first_chunk = chunk;
398 
399 		xa_for_each(&physxa->phys_bits, phys, bits) {
400 			struct khoser_mem_bitmap_ptr *elm;
401 
402 			if (chunk->hdr.num_elms == ARRAY_SIZE(chunk->bitmaps)) {
403 				chunk = new_chunk(chunk, order);
404 				if (IS_ERR(chunk)) {
405 					err = PTR_ERR(chunk);
406 					goto err_free;
407 				}
408 			}
409 
410 			elm = &chunk->bitmaps[chunk->hdr.num_elms];
411 			chunk->hdr.num_elms++;
412 			elm->phys_start = (phys * PRESERVE_BITS)
413 					  << (order + PAGE_SHIFT);
414 			KHOSER_STORE_PTR(elm->bitmap, bits);
415 		}
416 	}
417 
418 	ser->preserved_mem_map = first_chunk;
419 
420 	return 0;
421 
422 err_free:
423 	kho_mem_ser_free(first_chunk);
424 	return err;
425 }
426 
deserialize_bitmap(unsigned int order,struct khoser_mem_bitmap_ptr * elm)427 static void __init deserialize_bitmap(unsigned int order,
428 				      struct khoser_mem_bitmap_ptr *elm)
429 {
430 	struct kho_mem_phys_bits *bitmap = KHOSER_LOAD_PTR(elm->bitmap);
431 	unsigned long bit;
432 
433 	for_each_set_bit(bit, bitmap->preserve, PRESERVE_BITS) {
434 		int sz = 1 << (order + PAGE_SHIFT);
435 		phys_addr_t phys =
436 			elm->phys_start + (bit << (order + PAGE_SHIFT));
437 		struct page *page = phys_to_page(phys);
438 		union kho_page_info info;
439 
440 		memblock_reserve(phys, sz);
441 		memblock_reserved_mark_noinit(phys, sz);
442 		info.magic = KHO_PAGE_MAGIC;
443 		info.order = order;
444 		page->private = info.page_private;
445 	}
446 }
447 
kho_mem_deserialize(const void * fdt)448 static void __init kho_mem_deserialize(const void *fdt)
449 {
450 	struct khoser_mem_chunk *chunk;
451 	const phys_addr_t *mem;
452 	int len;
453 
454 	mem = fdt_getprop(fdt, 0, PROP_PRESERVED_MEMORY_MAP, &len);
455 
456 	if (!mem || len != sizeof(*mem)) {
457 		pr_err("failed to get preserved memory bitmaps\n");
458 		return;
459 	}
460 
461 	chunk = *mem ? phys_to_virt(*mem) : NULL;
462 	while (chunk) {
463 		unsigned int i;
464 
465 		for (i = 0; i != chunk->hdr.num_elms; i++)
466 			deserialize_bitmap(chunk->hdr.order,
467 					   &chunk->bitmaps[i]);
468 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
469 	}
470 }
471 
472 /*
473  * With KHO enabled, memory can become fragmented because KHO regions may
474  * be anywhere in physical address space. The scratch regions give us a
475  * safe zones that we will never see KHO allocations from. This is where we
476  * can later safely load our new kexec images into and then use the scratch
477  * area for early allocations that happen before page allocator is
478  * initialized.
479  */
480 struct kho_scratch *kho_scratch;
481 unsigned int kho_scratch_cnt;
482 
483 /*
484  * The scratch areas are scaled by default as percent of memory allocated from
485  * memblock. A user can override the scale with command line parameter:
486  *
487  * kho_scratch=N%
488  *
489  * It is also possible to explicitly define size for a lowmem, a global and
490  * per-node scratch areas:
491  *
492  * kho_scratch=l[KMG],n[KMG],m[KMG]
493  *
494  * The explicit size definition takes precedence over scale definition.
495  */
496 static unsigned int scratch_scale __initdata = 200;
497 static phys_addr_t scratch_size_global __initdata;
498 static phys_addr_t scratch_size_pernode __initdata;
499 static phys_addr_t scratch_size_lowmem __initdata;
500 
kho_parse_scratch_size(char * p)501 static int __init kho_parse_scratch_size(char *p)
502 {
503 	size_t len;
504 	unsigned long sizes[3];
505 	size_t total_size = 0;
506 	int i;
507 
508 	if (!p)
509 		return -EINVAL;
510 
511 	len = strlen(p);
512 	if (!len)
513 		return -EINVAL;
514 
515 	/* parse nn% */
516 	if (p[len - 1] == '%') {
517 		/* unsigned int max is 4,294,967,295, 10 chars */
518 		char s_scale[11] = {};
519 		int ret = 0;
520 
521 		if (len > ARRAY_SIZE(s_scale))
522 			return -EINVAL;
523 
524 		memcpy(s_scale, p, len - 1);
525 		ret = kstrtouint(s_scale, 10, &scratch_scale);
526 		if (!ret)
527 			pr_notice("scratch scale is %d%%\n", scratch_scale);
528 		return ret;
529 	}
530 
531 	/* parse ll[KMG],mm[KMG],nn[KMG] */
532 	for (i = 0; i < ARRAY_SIZE(sizes); i++) {
533 		char *endp = p;
534 
535 		if (i > 0) {
536 			if (*p != ',')
537 				return -EINVAL;
538 			p += 1;
539 		}
540 
541 		sizes[i] = memparse(p, &endp);
542 		if (endp == p)
543 			return -EINVAL;
544 		p = endp;
545 		total_size += sizes[i];
546 	}
547 
548 	if (!total_size)
549 		return -EINVAL;
550 
551 	/* The string should be fully consumed by now. */
552 	if (*p)
553 		return -EINVAL;
554 
555 	scratch_size_lowmem = sizes[0];
556 	scratch_size_global = sizes[1];
557 	scratch_size_pernode = sizes[2];
558 	scratch_scale = 0;
559 
560 	pr_notice("scratch areas: lowmem: %lluMiB global: %lluMiB pernode: %lldMiB\n",
561 		  (u64)(scratch_size_lowmem >> 20),
562 		  (u64)(scratch_size_global >> 20),
563 		  (u64)(scratch_size_pernode >> 20));
564 
565 	return 0;
566 }
567 early_param("kho_scratch", kho_parse_scratch_size);
568 
scratch_size_update(void)569 static void __init scratch_size_update(void)
570 {
571 	phys_addr_t size;
572 
573 	if (!scratch_scale)
574 		return;
575 
576 	size = memblock_reserved_kern_size(ARCH_LOW_ADDRESS_LIMIT,
577 					   NUMA_NO_NODE);
578 	size = size * scratch_scale / 100;
579 	scratch_size_lowmem = round_up(size, CMA_MIN_ALIGNMENT_BYTES);
580 
581 	size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE,
582 					   NUMA_NO_NODE);
583 	size = size * scratch_scale / 100 - scratch_size_lowmem;
584 	scratch_size_global = round_up(size, CMA_MIN_ALIGNMENT_BYTES);
585 }
586 
scratch_size_node(int nid)587 static phys_addr_t __init scratch_size_node(int nid)
588 {
589 	phys_addr_t size;
590 
591 	if (scratch_scale) {
592 		size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE,
593 						   nid);
594 		size = size * scratch_scale / 100;
595 	} else {
596 		size = scratch_size_pernode;
597 	}
598 
599 	return round_up(size, CMA_MIN_ALIGNMENT_BYTES);
600 }
601 
602 /**
603  * kho_reserve_scratch - Reserve a contiguous chunk of memory for kexec
604  *
605  * With KHO we can preserve arbitrary pages in the system. To ensure we still
606  * have a large contiguous region of memory when we search the physical address
607  * space for target memory, let's make sure we always have a large CMA region
608  * active. This CMA region will only be used for movable pages which are not a
609  * problem for us during KHO because we can just move them somewhere else.
610  */
kho_reserve_scratch(void)611 static void __init kho_reserve_scratch(void)
612 {
613 	phys_addr_t addr, size;
614 	int nid, i = 0;
615 
616 	if (!kho_enable)
617 		return;
618 
619 	scratch_size_update();
620 
621 	/* FIXME: deal with node hot-plug/remove */
622 	kho_scratch_cnt = num_online_nodes() + 2;
623 	size = kho_scratch_cnt * sizeof(*kho_scratch);
624 	kho_scratch = memblock_alloc(size, PAGE_SIZE);
625 	if (!kho_scratch)
626 		goto err_disable_kho;
627 
628 	/*
629 	 * reserve scratch area in low memory for lowmem allocations in the
630 	 * next kernel
631 	 */
632 	size = scratch_size_lowmem;
633 	addr = memblock_phys_alloc_range(size, CMA_MIN_ALIGNMENT_BYTES, 0,
634 					 ARCH_LOW_ADDRESS_LIMIT);
635 	if (!addr)
636 		goto err_free_scratch_desc;
637 
638 	kho_scratch[i].addr = addr;
639 	kho_scratch[i].size = size;
640 	i++;
641 
642 	/* reserve large contiguous area for allocations without nid */
643 	size = scratch_size_global;
644 	addr = memblock_phys_alloc(size, CMA_MIN_ALIGNMENT_BYTES);
645 	if (!addr)
646 		goto err_free_scratch_areas;
647 
648 	kho_scratch[i].addr = addr;
649 	kho_scratch[i].size = size;
650 	i++;
651 
652 	for_each_online_node(nid) {
653 		size = scratch_size_node(nid);
654 		addr = memblock_alloc_range_nid(size, CMA_MIN_ALIGNMENT_BYTES,
655 						0, MEMBLOCK_ALLOC_ACCESSIBLE,
656 						nid, true);
657 		if (!addr)
658 			goto err_free_scratch_areas;
659 
660 		kho_scratch[i].addr = addr;
661 		kho_scratch[i].size = size;
662 		i++;
663 	}
664 
665 	return;
666 
667 err_free_scratch_areas:
668 	for (i--; i >= 0; i--)
669 		memblock_phys_free(kho_scratch[i].addr, kho_scratch[i].size);
670 err_free_scratch_desc:
671 	memblock_free(kho_scratch, kho_scratch_cnt * sizeof(*kho_scratch));
672 err_disable_kho:
673 	pr_warn("Failed to reserve scratch area, disabling kexec handover\n");
674 	kho_enable = false;
675 }
676 
677 struct fdt_debugfs {
678 	struct list_head list;
679 	struct debugfs_blob_wrapper wrapper;
680 	struct dentry *file;
681 };
682 
kho_debugfs_fdt_add(struct list_head * list,struct dentry * dir,const char * name,const void * fdt)683 static int kho_debugfs_fdt_add(struct list_head *list, struct dentry *dir,
684 			       const char *name, const void *fdt)
685 {
686 	struct fdt_debugfs *f;
687 	struct dentry *file;
688 
689 	f = kmalloc(sizeof(*f), GFP_KERNEL);
690 	if (!f)
691 		return -ENOMEM;
692 
693 	f->wrapper.data = (void *)fdt;
694 	f->wrapper.size = fdt_totalsize(fdt);
695 
696 	file = debugfs_create_blob(name, 0400, dir, &f->wrapper);
697 	if (IS_ERR(file)) {
698 		kfree(f);
699 		return PTR_ERR(file);
700 	}
701 
702 	f->file = file;
703 	list_add(&f->list, list);
704 
705 	return 0;
706 }
707 
708 /**
709  * kho_add_subtree - record the physical address of a sub FDT in KHO root tree.
710  * @ser: serialization control object passed by KHO notifiers.
711  * @name: name of the sub tree.
712  * @fdt: the sub tree blob.
713  *
714  * Creates a new child node named @name in KHO root FDT and records
715  * the physical address of @fdt. The pages of @fdt must also be preserved
716  * by KHO for the new kernel to retrieve it after kexec.
717  *
718  * A debugfs blob entry is also created at
719  * ``/sys/kernel/debug/kho/out/sub_fdts/@name``.
720  *
721  * Return: 0 on success, error code on failure
722  */
kho_add_subtree(struct kho_serialization * ser,const char * name,void * fdt)723 int kho_add_subtree(struct kho_serialization *ser, const char *name, void *fdt)
724 {
725 	int err = 0;
726 	u64 phys = (u64)virt_to_phys(fdt);
727 	void *root = page_to_virt(ser->fdt);
728 
729 	err |= fdt_begin_node(root, name);
730 	err |= fdt_property(root, PROP_SUB_FDT, &phys, sizeof(phys));
731 	err |= fdt_end_node(root);
732 
733 	if (err)
734 		return err;
735 
736 	return kho_debugfs_fdt_add(&ser->fdt_list, ser->sub_fdt_dir, name, fdt);
737 }
738 EXPORT_SYMBOL_GPL(kho_add_subtree);
739 
register_kho_notifier(struct notifier_block * nb)740 int register_kho_notifier(struct notifier_block *nb)
741 {
742 	return blocking_notifier_chain_register(&kho_out.chain_head, nb);
743 }
744 EXPORT_SYMBOL_GPL(register_kho_notifier);
745 
unregister_kho_notifier(struct notifier_block * nb)746 int unregister_kho_notifier(struct notifier_block *nb)
747 {
748 	return blocking_notifier_chain_unregister(&kho_out.chain_head, nb);
749 }
750 EXPORT_SYMBOL_GPL(unregister_kho_notifier);
751 
752 /**
753  * kho_preserve_folio - preserve a folio across kexec.
754  * @folio: folio to preserve.
755  *
756  * Instructs KHO to preserve the whole folio across kexec. The order
757  * will be preserved as well.
758  *
759  * Return: 0 on success, error code on failure
760  */
kho_preserve_folio(struct folio * folio)761 int kho_preserve_folio(struct folio *folio)
762 {
763 	const unsigned long pfn = folio_pfn(folio);
764 	const unsigned int order = folio_order(folio);
765 	struct kho_mem_track *track = &kho_out.ser.track;
766 
767 	if (WARN_ON(kho_scratch_overlap(pfn << PAGE_SHIFT, PAGE_SIZE << order)))
768 		return -EINVAL;
769 
770 	return __kho_preserve_order(track, pfn, order);
771 }
772 EXPORT_SYMBOL_GPL(kho_preserve_folio);
773 
774 /**
775  * kho_preserve_pages - preserve contiguous pages across kexec
776  * @page: first page in the list.
777  * @nr_pages: number of pages.
778  *
779  * Preserve a contiguous list of order 0 pages. Must be restored using
780  * kho_restore_pages() to ensure the pages are restored properly as order 0.
781  *
782  * Return: 0 on success, error code on failure
783  */
kho_preserve_pages(struct page * page,unsigned int nr_pages)784 int kho_preserve_pages(struct page *page, unsigned int nr_pages)
785 {
786 	struct kho_mem_track *track = &kho_out.ser.track;
787 	const unsigned long start_pfn = page_to_pfn(page);
788 	const unsigned long end_pfn = start_pfn + nr_pages;
789 	unsigned long pfn = start_pfn;
790 	unsigned long failed_pfn = 0;
791 	int err = 0;
792 
793 	if (WARN_ON(kho_scratch_overlap(start_pfn << PAGE_SHIFT,
794 					nr_pages << PAGE_SHIFT))) {
795 		return -EINVAL;
796 	}
797 
798 	while (pfn < end_pfn) {
799 		const unsigned int order =
800 			min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
801 
802 		err = __kho_preserve_order(track, pfn, order);
803 		if (err) {
804 			failed_pfn = pfn;
805 			break;
806 		}
807 
808 		pfn += 1 << order;
809 	}
810 
811 	if (err)
812 		__kho_unpreserve(track, start_pfn, failed_pfn);
813 
814 	return err;
815 }
816 EXPORT_SYMBOL_GPL(kho_preserve_pages);
817 
818 struct kho_vmalloc_hdr {
819 	DECLARE_KHOSER_PTR(next, struct kho_vmalloc_chunk *);
820 };
821 
822 #define KHO_VMALLOC_SIZE				\
823 	((PAGE_SIZE - sizeof(struct kho_vmalloc_hdr)) / \
824 	 sizeof(phys_addr_t))
825 
826 struct kho_vmalloc_chunk {
827 	struct kho_vmalloc_hdr hdr;
828 	phys_addr_t phys[KHO_VMALLOC_SIZE];
829 };
830 
831 static_assert(sizeof(struct kho_vmalloc_chunk) == PAGE_SIZE);
832 
833 /* vmalloc flags KHO supports */
834 #define KHO_VMALLOC_SUPPORTED_FLAGS	(VM_ALLOC | VM_ALLOW_HUGE_VMAP)
835 
836 /* KHO internal flags for vmalloc preservations */
837 #define KHO_VMALLOC_ALLOC	0x0001
838 #define KHO_VMALLOC_HUGE_VMAP	0x0002
839 
vmalloc_flags_to_kho(unsigned int vm_flags)840 static unsigned short vmalloc_flags_to_kho(unsigned int vm_flags)
841 {
842 	unsigned short kho_flags = 0;
843 
844 	if (vm_flags & VM_ALLOC)
845 		kho_flags |= KHO_VMALLOC_ALLOC;
846 	if (vm_flags & VM_ALLOW_HUGE_VMAP)
847 		kho_flags |= KHO_VMALLOC_HUGE_VMAP;
848 
849 	return kho_flags;
850 }
851 
kho_flags_to_vmalloc(unsigned short kho_flags)852 static unsigned int kho_flags_to_vmalloc(unsigned short kho_flags)
853 {
854 	unsigned int vm_flags = 0;
855 
856 	if (kho_flags & KHO_VMALLOC_ALLOC)
857 		vm_flags |= VM_ALLOC;
858 	if (kho_flags & KHO_VMALLOC_HUGE_VMAP)
859 		vm_flags |= VM_ALLOW_HUGE_VMAP;
860 
861 	return vm_flags;
862 }
863 
new_vmalloc_chunk(struct kho_vmalloc_chunk * cur)864 static struct kho_vmalloc_chunk *new_vmalloc_chunk(struct kho_vmalloc_chunk *cur)
865 {
866 	struct kho_vmalloc_chunk *chunk;
867 	int err;
868 
869 	chunk = (struct kho_vmalloc_chunk *)get_zeroed_page(GFP_KERNEL);
870 	if (!chunk)
871 		return NULL;
872 
873 	err = kho_preserve_pages(virt_to_page(chunk), 1);
874 	if (err)
875 		goto err_free;
876 	if (cur)
877 		KHOSER_STORE_PTR(cur->hdr.next, chunk);
878 	return chunk;
879 
880 err_free:
881 	free_page((unsigned long)chunk);
882 	return NULL;
883 }
884 
kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk * chunk,unsigned short order)885 static void kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk *chunk,
886 					 unsigned short order)
887 {
888 	struct kho_mem_track *track = &kho_out.ser.track;
889 	unsigned long pfn = PHYS_PFN(virt_to_phys(chunk));
890 
891 	__kho_unpreserve(track, pfn, pfn + 1);
892 
893 	for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) {
894 		pfn = PHYS_PFN(chunk->phys[i]);
895 		__kho_unpreserve(track, pfn, pfn + (1 << order));
896 	}
897 }
898 
kho_vmalloc_free_chunks(struct kho_vmalloc * kho_vmalloc)899 static void kho_vmalloc_free_chunks(struct kho_vmalloc *kho_vmalloc)
900 {
901 	struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(kho_vmalloc->first);
902 
903 	while (chunk) {
904 		struct kho_vmalloc_chunk *tmp = chunk;
905 
906 		kho_vmalloc_unpreserve_chunk(chunk, kho_vmalloc->order);
907 
908 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
909 		free_page((unsigned long)tmp);
910 	}
911 }
912 
913 /**
914  * kho_preserve_vmalloc - preserve memory allocated with vmalloc() across kexec
915  * @ptr: pointer to the area in vmalloc address space
916  * @preservation: placeholder for preservation metadata
917  *
918  * Instructs KHO to preserve the area in vmalloc address space at @ptr. The
919  * physical pages mapped at @ptr will be preserved and on successful return
920  * @preservation will hold the physical address of a structure that describes
921  * the preservation.
922  *
923  * NOTE: The memory allocated with vmalloc_node() variants cannot be reliably
924  * restored on the same node
925  *
926  * Return: 0 on success, error code on failure
927  */
kho_preserve_vmalloc(void * ptr,struct kho_vmalloc * preservation)928 int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation)
929 {
930 	struct kho_vmalloc_chunk *chunk;
931 	struct vm_struct *vm = find_vm_area(ptr);
932 	unsigned int order, flags, nr_contig_pages;
933 	unsigned int idx = 0;
934 	int err;
935 
936 	if (!vm)
937 		return -EINVAL;
938 
939 	if (vm->flags & ~KHO_VMALLOC_SUPPORTED_FLAGS)
940 		return -EOPNOTSUPP;
941 
942 	flags = vmalloc_flags_to_kho(vm->flags);
943 	order = get_vm_area_page_order(vm);
944 
945 	chunk = new_vmalloc_chunk(NULL);
946 	if (!chunk)
947 		return -ENOMEM;
948 	KHOSER_STORE_PTR(preservation->first, chunk);
949 
950 	nr_contig_pages = (1 << order);
951 	for (int i = 0; i < vm->nr_pages; i += nr_contig_pages) {
952 		phys_addr_t phys = page_to_phys(vm->pages[i]);
953 
954 		err = kho_preserve_pages(vm->pages[i], nr_contig_pages);
955 		if (err)
956 			goto err_free;
957 
958 		chunk->phys[idx++] = phys;
959 		if (idx == ARRAY_SIZE(chunk->phys)) {
960 			chunk = new_vmalloc_chunk(chunk);
961 			if (!chunk)
962 				goto err_free;
963 			idx = 0;
964 		}
965 	}
966 
967 	preservation->total_pages = vm->nr_pages;
968 	preservation->flags = flags;
969 	preservation->order = order;
970 
971 	return 0;
972 
973 err_free:
974 	kho_vmalloc_free_chunks(preservation);
975 	return err;
976 }
977 EXPORT_SYMBOL_GPL(kho_preserve_vmalloc);
978 
979 /**
980  * kho_restore_vmalloc - recreates and populates an area in vmalloc address
981  * space from the preserved memory.
982  * @preservation: preservation metadata.
983  *
984  * Recreates an area in vmalloc address space and populates it with memory that
985  * was preserved using kho_preserve_vmalloc().
986  *
987  * Return: pointer to the area in the vmalloc address space, NULL on failure.
988  */
kho_restore_vmalloc(const struct kho_vmalloc * preservation)989 void *kho_restore_vmalloc(const struct kho_vmalloc *preservation)
990 {
991 	struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first);
992 	unsigned int align, order, shift, vm_flags;
993 	unsigned long total_pages, contig_pages;
994 	unsigned long addr, size;
995 	struct vm_struct *area;
996 	struct page **pages;
997 	unsigned int idx = 0;
998 	int err;
999 
1000 	vm_flags = kho_flags_to_vmalloc(preservation->flags);
1001 	if (vm_flags & ~KHO_VMALLOC_SUPPORTED_FLAGS)
1002 		return NULL;
1003 
1004 	total_pages = preservation->total_pages;
1005 	pages = kvmalloc_array(total_pages, sizeof(*pages), GFP_KERNEL);
1006 	if (!pages)
1007 		return NULL;
1008 	order = preservation->order;
1009 	contig_pages = (1 << order);
1010 	shift = PAGE_SHIFT + order;
1011 	align = 1 << shift;
1012 
1013 	while (chunk) {
1014 		struct page *page;
1015 
1016 		for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) {
1017 			phys_addr_t phys = chunk->phys[i];
1018 
1019 			if (idx + contig_pages > total_pages)
1020 				goto err_free_pages_array;
1021 
1022 			page = kho_restore_pages(phys, contig_pages);
1023 			if (!page)
1024 				goto err_free_pages_array;
1025 
1026 			for (int j = 0; j < contig_pages; j++)
1027 				pages[idx++] = page;
1028 
1029 			phys += contig_pages * PAGE_SIZE;
1030 		}
1031 
1032 		page = kho_restore_pages(virt_to_phys(chunk), 1);
1033 		if (!page)
1034 			goto err_free_pages_array;
1035 		chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
1036 		__free_page(page);
1037 	}
1038 
1039 	if (idx != total_pages)
1040 		goto err_free_pages_array;
1041 
1042 	area = __get_vm_area_node(total_pages * PAGE_SIZE, align, shift,
1043 				  vm_flags, VMALLOC_START, VMALLOC_END,
1044 				  NUMA_NO_NODE, GFP_KERNEL,
1045 				  __builtin_return_address(0));
1046 	if (!area)
1047 		goto err_free_pages_array;
1048 
1049 	addr = (unsigned long)area->addr;
1050 	size = get_vm_area_size(area);
1051 	err = vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, shift);
1052 	if (err)
1053 		goto err_free_vm_area;
1054 
1055 	area->nr_pages = total_pages;
1056 	area->pages = pages;
1057 
1058 	return area->addr;
1059 
1060 err_free_vm_area:
1061 	free_vm_area(area);
1062 err_free_pages_array:
1063 	kvfree(pages);
1064 	return NULL;
1065 }
1066 EXPORT_SYMBOL_GPL(kho_restore_vmalloc);
1067 
1068 /* Handling for debug/kho/out */
1069 
1070 static struct dentry *debugfs_root;
1071 
kho_out_update_debugfs_fdt(void)1072 static int kho_out_update_debugfs_fdt(void)
1073 {
1074 	int err = 0;
1075 	struct fdt_debugfs *ff, *tmp;
1076 
1077 	if (kho_out.finalized) {
1078 		err = kho_debugfs_fdt_add(&kho_out.ser.fdt_list, kho_out.dir,
1079 					  "fdt", page_to_virt(kho_out.ser.fdt));
1080 	} else {
1081 		list_for_each_entry_safe(ff, tmp, &kho_out.ser.fdt_list, list) {
1082 			debugfs_remove(ff->file);
1083 			list_del(&ff->list);
1084 			kfree(ff);
1085 		}
1086 	}
1087 
1088 	return err;
1089 }
1090 
kho_abort(void)1091 static int kho_abort(void)
1092 {
1093 	int err;
1094 	unsigned long order;
1095 	struct kho_mem_phys *physxa;
1096 
1097 	xa_for_each(&kho_out.ser.track.orders, order, physxa) {
1098 		struct kho_mem_phys_bits *bits;
1099 		unsigned long phys;
1100 
1101 		xa_for_each(&physxa->phys_bits, phys, bits)
1102 			kfree(bits);
1103 
1104 		xa_destroy(&physxa->phys_bits);
1105 		kfree(physxa);
1106 	}
1107 	xa_destroy(&kho_out.ser.track.orders);
1108 
1109 	if (kho_out.ser.preserved_mem_map) {
1110 		kho_mem_ser_free(kho_out.ser.preserved_mem_map);
1111 		kho_out.ser.preserved_mem_map = NULL;
1112 	}
1113 
1114 	err = blocking_notifier_call_chain(&kho_out.chain_head, KEXEC_KHO_ABORT,
1115 					   NULL);
1116 	err = notifier_to_errno(err);
1117 
1118 	if (err)
1119 		pr_err("Failed to abort KHO finalization: %d\n", err);
1120 
1121 	return err;
1122 }
1123 
kho_finalize(void)1124 static int kho_finalize(void)
1125 {
1126 	int err = 0;
1127 	u64 *preserved_mem_map;
1128 	void *fdt = page_to_virt(kho_out.ser.fdt);
1129 
1130 	err |= fdt_create(fdt, PAGE_SIZE);
1131 	err |= fdt_finish_reservemap(fdt);
1132 	err |= fdt_begin_node(fdt, "");
1133 	err |= fdt_property_string(fdt, "compatible", KHO_FDT_COMPATIBLE);
1134 	/**
1135 	 * Reserve the preserved-memory-map property in the root FDT, so
1136 	 * that all property definitions will precede subnodes created by
1137 	 * KHO callers.
1138 	 */
1139 	err |= fdt_property_placeholder(fdt, PROP_PRESERVED_MEMORY_MAP,
1140 					sizeof(*preserved_mem_map),
1141 					(void **)&preserved_mem_map);
1142 	if (err)
1143 		goto abort;
1144 
1145 	err = kho_preserve_folio(page_folio(kho_out.ser.fdt));
1146 	if (err)
1147 		goto abort;
1148 
1149 	err = blocking_notifier_call_chain(&kho_out.chain_head,
1150 					   KEXEC_KHO_FINALIZE, &kho_out.ser);
1151 	err = notifier_to_errno(err);
1152 	if (err)
1153 		goto abort;
1154 
1155 	err = kho_mem_serialize(&kho_out.ser);
1156 	if (err)
1157 		goto abort;
1158 
1159 	*preserved_mem_map = (u64)virt_to_phys(kho_out.ser.preserved_mem_map);
1160 
1161 	err |= fdt_end_node(fdt);
1162 	err |= fdt_finish(fdt);
1163 
1164 abort:
1165 	if (err) {
1166 		pr_err("Failed to convert KHO state tree: %d\n", err);
1167 		kho_abort();
1168 	}
1169 
1170 	return err;
1171 }
1172 
kho_out_finalize_get(void * data,u64 * val)1173 static int kho_out_finalize_get(void *data, u64 *val)
1174 {
1175 	mutex_lock(&kho_out.lock);
1176 	*val = kho_out.finalized;
1177 	mutex_unlock(&kho_out.lock);
1178 
1179 	return 0;
1180 }
1181 
kho_out_finalize_set(void * data,u64 _val)1182 static int kho_out_finalize_set(void *data, u64 _val)
1183 {
1184 	int ret = 0;
1185 	bool val = !!_val;
1186 
1187 	mutex_lock(&kho_out.lock);
1188 
1189 	if (val == kho_out.finalized) {
1190 		if (kho_out.finalized)
1191 			ret = -EEXIST;
1192 		else
1193 			ret = -ENOENT;
1194 		goto unlock;
1195 	}
1196 
1197 	if (val)
1198 		ret = kho_finalize();
1199 	else
1200 		ret = kho_abort();
1201 
1202 	if (ret)
1203 		goto unlock;
1204 
1205 	kho_out.finalized = val;
1206 	ret = kho_out_update_debugfs_fdt();
1207 
1208 unlock:
1209 	mutex_unlock(&kho_out.lock);
1210 	return ret;
1211 }
1212 
1213 DEFINE_DEBUGFS_ATTRIBUTE(fops_kho_out_finalize, kho_out_finalize_get,
1214 			 kho_out_finalize_set, "%llu\n");
1215 
scratch_phys_show(struct seq_file * m,void * v)1216 static int scratch_phys_show(struct seq_file *m, void *v)
1217 {
1218 	for (int i = 0; i < kho_scratch_cnt; i++)
1219 		seq_printf(m, "0x%llx\n", kho_scratch[i].addr);
1220 
1221 	return 0;
1222 }
1223 DEFINE_SHOW_ATTRIBUTE(scratch_phys);
1224 
scratch_len_show(struct seq_file * m,void * v)1225 static int scratch_len_show(struct seq_file *m, void *v)
1226 {
1227 	for (int i = 0; i < kho_scratch_cnt; i++)
1228 		seq_printf(m, "0x%llx\n", kho_scratch[i].size);
1229 
1230 	return 0;
1231 }
1232 DEFINE_SHOW_ATTRIBUTE(scratch_len);
1233 
kho_out_debugfs_init(void)1234 static __init int kho_out_debugfs_init(void)
1235 {
1236 	struct dentry *dir, *f, *sub_fdt_dir;
1237 
1238 	dir = debugfs_create_dir("out", debugfs_root);
1239 	if (IS_ERR(dir))
1240 		return -ENOMEM;
1241 
1242 	sub_fdt_dir = debugfs_create_dir("sub_fdts", dir);
1243 	if (IS_ERR(sub_fdt_dir))
1244 		goto err_rmdir;
1245 
1246 	f = debugfs_create_file("scratch_phys", 0400, dir, NULL,
1247 				&scratch_phys_fops);
1248 	if (IS_ERR(f))
1249 		goto err_rmdir;
1250 
1251 	f = debugfs_create_file("scratch_len", 0400, dir, NULL,
1252 				&scratch_len_fops);
1253 	if (IS_ERR(f))
1254 		goto err_rmdir;
1255 
1256 	f = debugfs_create_file("finalize", 0600, dir, NULL,
1257 				&fops_kho_out_finalize);
1258 	if (IS_ERR(f))
1259 		goto err_rmdir;
1260 
1261 	kho_out.dir = dir;
1262 	kho_out.ser.sub_fdt_dir = sub_fdt_dir;
1263 	return 0;
1264 
1265 err_rmdir:
1266 	debugfs_remove_recursive(dir);
1267 	return -ENOENT;
1268 }
1269 
1270 struct kho_in {
1271 	struct dentry *dir;
1272 	phys_addr_t fdt_phys;
1273 	phys_addr_t scratch_phys;
1274 	struct list_head fdt_list;
1275 };
1276 
1277 static struct kho_in kho_in = {
1278 	.fdt_list = LIST_HEAD_INIT(kho_in.fdt_list),
1279 };
1280 
kho_get_fdt(void)1281 static const void *kho_get_fdt(void)
1282 {
1283 	return kho_in.fdt_phys ? phys_to_virt(kho_in.fdt_phys) : NULL;
1284 }
1285 
1286 /**
1287  * is_kho_boot - check if current kernel was booted via KHO-enabled
1288  * kexec
1289  *
1290  * This function checks if the current kernel was loaded through a kexec
1291  * operation with KHO enabled, by verifying that a valid KHO FDT
1292  * was passed.
1293  *
1294  * Note: This function returns reliable results only after
1295  * kho_populate() has been called during early boot. Before that,
1296  * it may return false even if KHO data is present.
1297  *
1298  * Return: true if booted via KHO-enabled kexec, false otherwise
1299  */
is_kho_boot(void)1300 bool is_kho_boot(void)
1301 {
1302 	return !!kho_get_fdt();
1303 }
1304 EXPORT_SYMBOL_GPL(is_kho_boot);
1305 
1306 /**
1307  * kho_retrieve_subtree - retrieve a preserved sub FDT by its name.
1308  * @name: the name of the sub FDT passed to kho_add_subtree().
1309  * @phys: if found, the physical address of the sub FDT is stored in @phys.
1310  *
1311  * Retrieve a preserved sub FDT named @name and store its physical
1312  * address in @phys.
1313  *
1314  * Return: 0 on success, error code on failure
1315  */
kho_retrieve_subtree(const char * name,phys_addr_t * phys)1316 int kho_retrieve_subtree(const char *name, phys_addr_t *phys)
1317 {
1318 	const void *fdt = kho_get_fdt();
1319 	const u64 *val;
1320 	int offset, len;
1321 
1322 	if (!fdt)
1323 		return -ENOENT;
1324 
1325 	if (!phys)
1326 		return -EINVAL;
1327 
1328 	offset = fdt_subnode_offset(fdt, 0, name);
1329 	if (offset < 0)
1330 		return -ENOENT;
1331 
1332 	val = fdt_getprop(fdt, offset, PROP_SUB_FDT, &len);
1333 	if (!val || len != sizeof(*val))
1334 		return -EINVAL;
1335 
1336 	*phys = (phys_addr_t)*val;
1337 
1338 	return 0;
1339 }
1340 EXPORT_SYMBOL_GPL(kho_retrieve_subtree);
1341 
1342 /* Handling for debugfs/kho/in */
1343 
kho_in_debugfs_init(const void * fdt)1344 static __init int kho_in_debugfs_init(const void *fdt)
1345 {
1346 	struct dentry *sub_fdt_dir;
1347 	int err, child;
1348 
1349 	kho_in.dir = debugfs_create_dir("in", debugfs_root);
1350 	if (IS_ERR(kho_in.dir))
1351 		return PTR_ERR(kho_in.dir);
1352 
1353 	sub_fdt_dir = debugfs_create_dir("sub_fdts", kho_in.dir);
1354 	if (IS_ERR(sub_fdt_dir)) {
1355 		err = PTR_ERR(sub_fdt_dir);
1356 		goto err_rmdir;
1357 	}
1358 
1359 	err = kho_debugfs_fdt_add(&kho_in.fdt_list, kho_in.dir, "fdt", fdt);
1360 	if (err)
1361 		goto err_rmdir;
1362 
1363 	fdt_for_each_subnode(child, fdt, 0) {
1364 		int len = 0;
1365 		const char *name = fdt_get_name(fdt, child, NULL);
1366 		const u64 *fdt_phys;
1367 
1368 		fdt_phys = fdt_getprop(fdt, child, "fdt", &len);
1369 		if (!fdt_phys)
1370 			continue;
1371 		if (len != sizeof(*fdt_phys)) {
1372 			pr_warn("node `%s`'s prop `fdt` has invalid length: %d\n",
1373 				name, len);
1374 			continue;
1375 		}
1376 		err = kho_debugfs_fdt_add(&kho_in.fdt_list, sub_fdt_dir, name,
1377 					  phys_to_virt(*fdt_phys));
1378 		if (err) {
1379 			pr_warn("failed to add fdt `%s` to debugfs: %d\n", name,
1380 				err);
1381 			continue;
1382 		}
1383 	}
1384 
1385 	return 0;
1386 
1387 err_rmdir:
1388 	debugfs_remove_recursive(kho_in.dir);
1389 	return err;
1390 }
1391 
kho_init(void)1392 static __init int kho_init(void)
1393 {
1394 	int err = 0;
1395 	const void *fdt = kho_get_fdt();
1396 
1397 	if (!kho_enable)
1398 		return 0;
1399 
1400 	kho_out.ser.fdt = alloc_page(GFP_KERNEL);
1401 	if (!kho_out.ser.fdt) {
1402 		err = -ENOMEM;
1403 		goto err_free_scratch;
1404 	}
1405 
1406 	debugfs_root = debugfs_create_dir("kho", NULL);
1407 	if (IS_ERR(debugfs_root)) {
1408 		err = -ENOENT;
1409 		goto err_free_fdt;
1410 	}
1411 
1412 	err = kho_out_debugfs_init();
1413 	if (err)
1414 		goto err_free_fdt;
1415 
1416 	if (fdt) {
1417 		err = kho_in_debugfs_init(fdt);
1418 		/*
1419 		 * Failure to create /sys/kernel/debug/kho/in does not prevent
1420 		 * reviving state from KHO and setting up KHO for the next
1421 		 * kexec.
1422 		 */
1423 		if (err)
1424 			pr_err("failed exposing handover FDT in debugfs: %d\n",
1425 			       err);
1426 
1427 		return 0;
1428 	}
1429 
1430 	for (int i = 0; i < kho_scratch_cnt; i++) {
1431 		unsigned long base_pfn = PHYS_PFN(kho_scratch[i].addr);
1432 		unsigned long count = kho_scratch[i].size >> PAGE_SHIFT;
1433 		unsigned long pfn;
1434 
1435 		for (pfn = base_pfn; pfn < base_pfn + count;
1436 		     pfn += pageblock_nr_pages)
1437 			init_cma_reserved_pageblock(pfn_to_page(pfn));
1438 	}
1439 
1440 	return 0;
1441 
1442 err_free_fdt:
1443 	put_page(kho_out.ser.fdt);
1444 	kho_out.ser.fdt = NULL;
1445 err_free_scratch:
1446 	for (int i = 0; i < kho_scratch_cnt; i++) {
1447 		void *start = __va(kho_scratch[i].addr);
1448 		void *end = start + kho_scratch[i].size;
1449 
1450 		free_reserved_area(start, end, -1, "");
1451 	}
1452 	kho_enable = false;
1453 	return err;
1454 }
1455 late_initcall(kho_init);
1456 
kho_release_scratch(void)1457 static void __init kho_release_scratch(void)
1458 {
1459 	phys_addr_t start, end;
1460 	u64 i;
1461 
1462 	memmap_init_kho_scratch_pages();
1463 
1464 	/*
1465 	 * Mark scratch mem as CMA before we return it. That way we
1466 	 * ensure that no kernel allocations happen on it. That means
1467 	 * we can reuse it as scratch memory again later.
1468 	 */
1469 	__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
1470 			     MEMBLOCK_KHO_SCRATCH, &start, &end, NULL) {
1471 		ulong start_pfn = pageblock_start_pfn(PFN_DOWN(start));
1472 		ulong end_pfn = pageblock_align(PFN_UP(end));
1473 		ulong pfn;
1474 
1475 		for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages)
1476 			init_pageblock_migratetype(pfn_to_page(pfn),
1477 						   MIGRATE_CMA, false);
1478 	}
1479 }
1480 
kho_memory_init(void)1481 void __init kho_memory_init(void)
1482 {
1483 	struct folio *folio;
1484 
1485 	if (kho_in.scratch_phys) {
1486 		kho_scratch = phys_to_virt(kho_in.scratch_phys);
1487 		kho_release_scratch();
1488 
1489 		kho_mem_deserialize(kho_get_fdt());
1490 		folio = kho_restore_folio(kho_in.fdt_phys);
1491 		if (!folio)
1492 			pr_warn("failed to restore folio for KHO fdt\n");
1493 	} else {
1494 		kho_reserve_scratch();
1495 	}
1496 }
1497 
kho_populate(phys_addr_t fdt_phys,u64 fdt_len,phys_addr_t scratch_phys,u64 scratch_len)1498 void __init kho_populate(phys_addr_t fdt_phys, u64 fdt_len,
1499 			 phys_addr_t scratch_phys, u64 scratch_len)
1500 {
1501 	void *fdt = NULL;
1502 	struct kho_scratch *scratch = NULL;
1503 	int err = 0;
1504 	unsigned int scratch_cnt = scratch_len / sizeof(*kho_scratch);
1505 
1506 	/* Validate the input FDT */
1507 	fdt = early_memremap(fdt_phys, fdt_len);
1508 	if (!fdt) {
1509 		pr_warn("setup: failed to memremap FDT (0x%llx)\n", fdt_phys);
1510 		err = -EFAULT;
1511 		goto out;
1512 	}
1513 	err = fdt_check_header(fdt);
1514 	if (err) {
1515 		pr_warn("setup: handover FDT (0x%llx) is invalid: %d\n",
1516 			fdt_phys, err);
1517 		err = -EINVAL;
1518 		goto out;
1519 	}
1520 	err = fdt_node_check_compatible(fdt, 0, KHO_FDT_COMPATIBLE);
1521 	if (err) {
1522 		pr_warn("setup: handover FDT (0x%llx) is incompatible with '%s': %d\n",
1523 			fdt_phys, KHO_FDT_COMPATIBLE, err);
1524 		err = -EINVAL;
1525 		goto out;
1526 	}
1527 
1528 	scratch = early_memremap(scratch_phys, scratch_len);
1529 	if (!scratch) {
1530 		pr_warn("setup: failed to memremap scratch (phys=0x%llx, len=%lld)\n",
1531 			scratch_phys, scratch_len);
1532 		err = -EFAULT;
1533 		goto out;
1534 	}
1535 
1536 	/*
1537 	 * We pass a safe contiguous blocks of memory to use for early boot
1538 	 * purporses from the previous kernel so that we can resize the
1539 	 * memblock array as needed.
1540 	 */
1541 	for (int i = 0; i < scratch_cnt; i++) {
1542 		struct kho_scratch *area = &scratch[i];
1543 		u64 size = area->size;
1544 
1545 		memblock_add(area->addr, size);
1546 		err = memblock_mark_kho_scratch(area->addr, size);
1547 		if (WARN_ON(err)) {
1548 			pr_warn("failed to mark the scratch region 0x%pa+0x%pa: %d",
1549 				&area->addr, &size, err);
1550 			goto out;
1551 		}
1552 		pr_debug("Marked 0x%pa+0x%pa as scratch", &area->addr, &size);
1553 	}
1554 
1555 	memblock_reserve(scratch_phys, scratch_len);
1556 
1557 	/*
1558 	 * Now that we have a viable region of scratch memory, let's tell
1559 	 * the memblocks allocator to only use that for any allocations.
1560 	 * That way we ensure that nothing scribbles over in use data while
1561 	 * we initialize the page tables which we will need to ingest all
1562 	 * memory reservations from the previous kernel.
1563 	 */
1564 	memblock_set_kho_scratch_only();
1565 
1566 	kho_in.fdt_phys = fdt_phys;
1567 	kho_in.scratch_phys = scratch_phys;
1568 	kho_scratch_cnt = scratch_cnt;
1569 	pr_info("found kexec handover data. Will skip init for some devices\n");
1570 
1571 out:
1572 	if (fdt)
1573 		early_memunmap(fdt, fdt_len);
1574 	if (scratch)
1575 		early_memunmap(scratch, scratch_len);
1576 	if (err)
1577 		pr_warn("disabling KHO revival: %d\n", err);
1578 }
1579 
1580 /* Helper functions for kexec_file_load */
1581 
kho_fill_kimage(struct kimage * image)1582 int kho_fill_kimage(struct kimage *image)
1583 {
1584 	ssize_t scratch_size;
1585 	int err = 0;
1586 	struct kexec_buf scratch;
1587 
1588 	if (!kho_out.finalized)
1589 		return 0;
1590 
1591 	image->kho.fdt = page_to_phys(kho_out.ser.fdt);
1592 
1593 	scratch_size = sizeof(*kho_scratch) * kho_scratch_cnt;
1594 	scratch = (struct kexec_buf){
1595 		.image = image,
1596 		.buffer = kho_scratch,
1597 		.bufsz = scratch_size,
1598 		.mem = KEXEC_BUF_MEM_UNKNOWN,
1599 		.memsz = scratch_size,
1600 		.buf_align = SZ_64K, /* Makes it easier to map */
1601 		.buf_max = ULONG_MAX,
1602 		.top_down = true,
1603 	};
1604 	err = kexec_add_buffer(&scratch);
1605 	if (err)
1606 		return err;
1607 	image->kho.scratch = &image->segment[image->nr_segments - 1];
1608 
1609 	return 0;
1610 }
1611 
kho_walk_scratch(struct kexec_buf * kbuf,int (* func)(struct resource *,void *))1612 static int kho_walk_scratch(struct kexec_buf *kbuf,
1613 			    int (*func)(struct resource *, void *))
1614 {
1615 	int ret = 0;
1616 	int i;
1617 
1618 	for (i = 0; i < kho_scratch_cnt; i++) {
1619 		struct resource res = {
1620 			.start = kho_scratch[i].addr,
1621 			.end = kho_scratch[i].addr + kho_scratch[i].size - 1,
1622 		};
1623 
1624 		/* Try to fit the kimage into our KHO scratch region */
1625 		ret = func(&res, kbuf);
1626 		if (ret)
1627 			break;
1628 	}
1629 
1630 	return ret;
1631 }
1632 
kho_locate_mem_hole(struct kexec_buf * kbuf,int (* func)(struct resource *,void *))1633 int kho_locate_mem_hole(struct kexec_buf *kbuf,
1634 			int (*func)(struct resource *, void *))
1635 {
1636 	int ret;
1637 
1638 	if (!kho_enable || kbuf->image->type == KEXEC_TYPE_CRASH)
1639 		return 1;
1640 
1641 	ret = kho_walk_scratch(kbuf, func);
1642 
1643 	return ret == 1 ? 0 : -EADDRNOTAVAIL;
1644 }
1645