1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kexec_handover.c - kexec handover metadata processing
4 * Copyright (C) 2023 Alexander Graf <graf@amazon.com>
5 * Copyright (C) 2025 Microsoft Corporation, Mike Rapoport <rppt@kernel.org>
6 * Copyright (C) 2025 Google LLC, Changyuan Lyu <changyuanl@google.com>
7 * Copyright (C) 2025 Pasha Tatashin <pasha.tatashin@soleen.com>
8 * Copyright (C) 2026 Google LLC, Jason Miu <jasonmiu@google.com>
9 */
10
11 #define pr_fmt(fmt) "KHO: " fmt
12
13 #include <linux/cleanup.h>
14 #include <linux/cma.h>
15 #include <linux/kmemleak.h>
16 #include <linux/count_zeros.h>
17 #include <linux/kasan.h>
18 #include <linux/kexec.h>
19 #include <linux/kexec_handover.h>
20 #include <linux/kho_radix_tree.h>
21 #include <linux/utsname.h>
22 #include <linux/kho/abi/kexec_handover.h>
23 #include <linux/kho/abi/kexec_metadata.h>
24 #include <linux/libfdt.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/page-isolation.h>
28 #include <linux/unaligned.h>
29 #include <linux/vmalloc.h>
30
31 #include <asm/early_ioremap.h>
32
33 /*
34 * KHO is tightly coupled with mm init and needs access to some of mm
35 * internal APIs.
36 */
37 #include "../../mm/internal.h"
38 #include "../kexec_internal.h"
39 #include "kexec_handover_internal.h"
40
41 /* The magic token for preserved pages */
42 #define KHO_PAGE_MAGIC 0x4b484f50U /* ASCII for 'KHOP' */
43
44 /*
45 * KHO uses page->private, which is an unsigned long, to store page metadata.
46 * Use it to store both the magic and the order.
47 */
48 union kho_page_info {
49 unsigned long page_private;
50 struct {
51 unsigned int order;
52 unsigned int magic;
53 };
54 };
55
56 static_assert(sizeof(union kho_page_info) == sizeof(((struct page *)0)->private));
57
58 static bool kho_enable __ro_after_init = IS_ENABLED(CONFIG_KEXEC_HANDOVER_ENABLE_DEFAULT);
59
kho_is_enabled(void)60 bool kho_is_enabled(void)
61 {
62 return kho_enable;
63 }
64 EXPORT_SYMBOL_GPL(kho_is_enabled);
65
kho_parse_enable(char * p)66 static int __init kho_parse_enable(char *p)
67 {
68 return kstrtobool(p, &kho_enable);
69 }
70 early_param("kho", kho_parse_enable);
71
72 struct kho_out {
73 void *fdt;
74 struct mutex lock; /* protects KHO FDT */
75
76 struct kho_radix_tree radix_tree;
77 struct kho_debugfs dbg;
78 };
79
80 static struct kho_out kho_out = {
81 .lock = __MUTEX_INITIALIZER(kho_out.lock),
82 .radix_tree = {
83 .lock = __MUTEX_INITIALIZER(kho_out.radix_tree.lock),
84 },
85 };
86
87 /**
88 * kho_radix_encode_key - Encodes a physical address and order into a radix key.
89 * @phys: The physical address of the page.
90 * @order: The order of the page.
91 *
92 * This function combines a page's physical address and its order into a
93 * single unsigned long, which is used as a key for all radix tree
94 * operations.
95 *
96 * Return: The encoded unsigned long radix key.
97 */
kho_radix_encode_key(phys_addr_t phys,unsigned int order)98 static unsigned long kho_radix_encode_key(phys_addr_t phys, unsigned int order)
99 {
100 /* Order bits part */
101 unsigned long h = 1UL << (KHO_ORDER_0_LOG2 - order);
102 /* Shifted physical address part */
103 unsigned long l = phys >> (PAGE_SHIFT + order);
104
105 return h | l;
106 }
107
108 /**
109 * kho_radix_decode_key - Decodes a radix key back into a physical address and order.
110 * @key: The unsigned long key to decode.
111 * @order: An output parameter, a pointer to an unsigned int where the decoded
112 * page order will be stored.
113 *
114 * This function reverses the encoding performed by kho_radix_encode_key(),
115 * extracting the original physical address and page order from a given key.
116 *
117 * Return: The decoded physical address.
118 */
kho_radix_decode_key(unsigned long key,unsigned int * order)119 static phys_addr_t kho_radix_decode_key(unsigned long key, unsigned int *order)
120 {
121 unsigned int order_bit = fls64(key);
122 phys_addr_t phys;
123
124 /* order_bit is numbered starting at 1 from fls64 */
125 *order = KHO_ORDER_0_LOG2 - order_bit + 1;
126 /* The order is discarded by the shift */
127 phys = key << (PAGE_SHIFT + *order);
128
129 return phys;
130 }
131
kho_radix_get_bitmap_index(unsigned long key)132 static unsigned long kho_radix_get_bitmap_index(unsigned long key)
133 {
134 return key % (1 << KHO_BITMAP_SIZE_LOG2);
135 }
136
kho_radix_get_table_index(unsigned long key,unsigned int level)137 static unsigned long kho_radix_get_table_index(unsigned long key,
138 unsigned int level)
139 {
140 int s;
141
142 s = ((level - 1) * KHO_TABLE_SIZE_LOG2) + KHO_BITMAP_SIZE_LOG2;
143 return (key >> s) % (1 << KHO_TABLE_SIZE_LOG2);
144 }
145
146 /**
147 * kho_radix_add_page - Marks a page as preserved in the radix tree.
148 * @tree: The KHO radix tree.
149 * @pfn: The page frame number of the page to preserve.
150 * @order: The order of the page.
151 *
152 * This function traverses the radix tree based on the key derived from @pfn
153 * and @order. It sets the corresponding bit in the leaf bitmap to mark the
154 * page for preservation. If intermediate nodes do not exist along the path,
155 * they are allocated and added to the tree.
156 *
157 * Return: 0 on success, or a negative error code on failure.
158 */
kho_radix_add_page(struct kho_radix_tree * tree,unsigned long pfn,unsigned int order)159 int kho_radix_add_page(struct kho_radix_tree *tree,
160 unsigned long pfn, unsigned int order)
161 {
162 /* Newly allocated nodes for error cleanup */
163 struct kho_radix_node *intermediate_nodes[KHO_TREE_MAX_DEPTH] = { 0 };
164 unsigned long key = kho_radix_encode_key(PFN_PHYS(pfn), order);
165 struct kho_radix_node *anchor_node = NULL;
166 struct kho_radix_node *node = tree->root;
167 struct kho_radix_node *new_node;
168 unsigned int i, idx, anchor_idx;
169 struct kho_radix_leaf *leaf;
170 int err = 0;
171
172 if (WARN_ON_ONCE(!tree->root))
173 return -EINVAL;
174
175 might_sleep();
176
177 guard(mutex)(&tree->lock);
178
179 /* Go from high levels to low levels */
180 for (i = KHO_TREE_MAX_DEPTH - 1; i > 0; i--) {
181 idx = kho_radix_get_table_index(key, i);
182
183 if (node->table[idx]) {
184 node = phys_to_virt(node->table[idx]);
185 continue;
186 }
187
188 /* Next node is empty, create a new node for it */
189 new_node = (struct kho_radix_node *)get_zeroed_page(GFP_KERNEL);
190 if (!new_node) {
191 err = -ENOMEM;
192 goto err_free_nodes;
193 }
194
195 node->table[idx] = virt_to_phys(new_node);
196
197 /*
198 * Capture the node where the new branch starts for cleanup
199 * if allocation fails.
200 */
201 if (!anchor_node) {
202 anchor_node = node;
203 anchor_idx = idx;
204 }
205 intermediate_nodes[i] = new_node;
206
207 node = new_node;
208 }
209
210 /* Handle the leaf level bitmap (level 0) */
211 idx = kho_radix_get_bitmap_index(key);
212 leaf = (struct kho_radix_leaf *)node;
213 __set_bit(idx, leaf->bitmap);
214
215 return 0;
216
217 err_free_nodes:
218 for (i = KHO_TREE_MAX_DEPTH - 1; i > 0; i--) {
219 if (intermediate_nodes[i])
220 free_page((unsigned long)intermediate_nodes[i]);
221 }
222 if (anchor_node)
223 anchor_node->table[anchor_idx] = 0;
224
225 return err;
226 }
227 EXPORT_SYMBOL_GPL(kho_radix_add_page);
228
229 /**
230 * kho_radix_del_page - Removes a page's preservation status from the radix tree.
231 * @tree: The KHO radix tree.
232 * @pfn: The page frame number of the page to unpreserve.
233 * @order: The order of the page.
234 *
235 * This function traverses the radix tree and clears the bit corresponding to
236 * the page, effectively removing its "preserved" status. It does not free
237 * the tree's intermediate nodes, even if they become empty.
238 */
kho_radix_del_page(struct kho_radix_tree * tree,unsigned long pfn,unsigned int order)239 void kho_radix_del_page(struct kho_radix_tree *tree, unsigned long pfn,
240 unsigned int order)
241 {
242 unsigned long key = kho_radix_encode_key(PFN_PHYS(pfn), order);
243 struct kho_radix_node *node = tree->root;
244 struct kho_radix_leaf *leaf;
245 unsigned int i, idx;
246
247 if (WARN_ON_ONCE(!tree->root))
248 return;
249
250 might_sleep();
251
252 guard(mutex)(&tree->lock);
253
254 /* Go from high levels to low levels */
255 for (i = KHO_TREE_MAX_DEPTH - 1; i > 0; i--) {
256 idx = kho_radix_get_table_index(key, i);
257
258 /*
259 * Attempting to delete a page that has not been preserved,
260 * return with a warning.
261 */
262 if (WARN_ON(!node->table[idx]))
263 return;
264
265 node = phys_to_virt(node->table[idx]);
266 }
267
268 /* Handle the leaf level bitmap (level 0) */
269 leaf = (struct kho_radix_leaf *)node;
270 idx = kho_radix_get_bitmap_index(key);
271 __clear_bit(idx, leaf->bitmap);
272 }
273 EXPORT_SYMBOL_GPL(kho_radix_del_page);
274
kho_radix_walk_leaf(struct kho_radix_leaf * leaf,unsigned long key,kho_radix_tree_walk_callback_t cb)275 static int kho_radix_walk_leaf(struct kho_radix_leaf *leaf,
276 unsigned long key,
277 kho_radix_tree_walk_callback_t cb)
278 {
279 unsigned long *bitmap = (unsigned long *)leaf;
280 unsigned int order;
281 phys_addr_t phys;
282 unsigned int i;
283 int err;
284
285 for_each_set_bit(i, bitmap, PAGE_SIZE * BITS_PER_BYTE) {
286 phys = kho_radix_decode_key(key | i, &order);
287 err = cb(phys, order);
288 if (err)
289 return err;
290 }
291
292 return 0;
293 }
294
__kho_radix_walk_tree(struct kho_radix_node * root,unsigned int level,unsigned long start,kho_radix_tree_walk_callback_t cb)295 static int __kho_radix_walk_tree(struct kho_radix_node *root,
296 unsigned int level, unsigned long start,
297 kho_radix_tree_walk_callback_t cb)
298 {
299 struct kho_radix_node *node;
300 struct kho_radix_leaf *leaf;
301 unsigned long key, i;
302 unsigned int shift;
303 int err;
304
305 for (i = 0; i < PAGE_SIZE / sizeof(phys_addr_t); i++) {
306 if (!root->table[i])
307 continue;
308
309 shift = ((level - 1) * KHO_TABLE_SIZE_LOG2) +
310 KHO_BITMAP_SIZE_LOG2;
311 key = start | (i << shift);
312
313 node = phys_to_virt(root->table[i]);
314
315 if (level == 1) {
316 /*
317 * we are at level 1,
318 * node is pointing to the level 0 bitmap.
319 */
320 leaf = (struct kho_radix_leaf *)node;
321 err = kho_radix_walk_leaf(leaf, key, cb);
322 } else {
323 err = __kho_radix_walk_tree(node, level - 1,
324 key, cb);
325 }
326
327 if (err)
328 return err;
329 }
330
331 return 0;
332 }
333
334 /**
335 * kho_radix_walk_tree - Traverses the radix tree and calls a callback for each preserved page.
336 * @tree: A pointer to the KHO radix tree to walk.
337 * @cb: A callback function of type kho_radix_tree_walk_callback_t that will be
338 * invoked for each preserved page found in the tree. The callback receives
339 * the physical address and order of the preserved page.
340 *
341 * This function walks the radix tree, searching from the specified top level
342 * down to the lowest level (level 0). For each preserved page found, it invokes
343 * the provided callback, passing the page's physical address and order.
344 *
345 * Return: 0 if the walk completed the specified tree, or the non-zero return
346 * value from the callback that stopped the walk.
347 */
kho_radix_walk_tree(struct kho_radix_tree * tree,kho_radix_tree_walk_callback_t cb)348 int kho_radix_walk_tree(struct kho_radix_tree *tree,
349 kho_radix_tree_walk_callback_t cb)
350 {
351 if (WARN_ON_ONCE(!tree->root))
352 return -EINVAL;
353
354 guard(mutex)(&tree->lock);
355
356 return __kho_radix_walk_tree(tree->root, KHO_TREE_MAX_DEPTH - 1, 0, cb);
357 }
358 EXPORT_SYMBOL_GPL(kho_radix_walk_tree);
359
__kho_unpreserve(struct kho_radix_tree * tree,unsigned long pfn,unsigned long end_pfn)360 static void __kho_unpreserve(struct kho_radix_tree *tree,
361 unsigned long pfn, unsigned long end_pfn)
362 {
363 unsigned int order;
364
365 while (pfn < end_pfn) {
366 order = min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
367
368 kho_radix_del_page(tree, pfn, order);
369
370 pfn += 1 << order;
371 }
372 }
373
374 /* For physically contiguous 0-order pages. */
kho_init_pages(struct page * page,unsigned long nr_pages)375 static void kho_init_pages(struct page *page, unsigned long nr_pages)
376 {
377 for (unsigned long i = 0; i < nr_pages; i++) {
378 set_page_count(page + i, 1);
379 /* Clear each page's codetag to avoid accounting mismatch. */
380 clear_page_tag_ref(page + i);
381 }
382 }
383
kho_init_folio(struct page * page,unsigned int order)384 static void kho_init_folio(struct page *page, unsigned int order)
385 {
386 unsigned long nr_pages = (1 << order);
387
388 /* Head page gets refcount of 1. */
389 set_page_count(page, 1);
390 /* Clear head page's codetag to avoid accounting mismatch. */
391 clear_page_tag_ref(page);
392
393 /* For higher order folios, tail pages get a page count of zero. */
394 for (unsigned long i = 1; i < nr_pages; i++)
395 set_page_count(page + i, 0);
396
397 if (order > 0)
398 prep_compound_page(page, order);
399 }
400
kho_restore_page(phys_addr_t phys,bool is_folio)401 static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
402 {
403 struct page *page = pfn_to_online_page(PHYS_PFN(phys));
404 unsigned long nr_pages;
405 union kho_page_info info;
406
407 if (!page)
408 return NULL;
409
410 info.page_private = page->private;
411 /*
412 * deserialize_bitmap() only sets the magic on the head page. This magic
413 * check also implicitly makes sure phys is order-aligned since for
414 * non-order-aligned phys addresses, magic will never be set.
415 */
416 if (WARN_ON_ONCE(info.magic != KHO_PAGE_MAGIC))
417 return NULL;
418 nr_pages = (1 << info.order);
419
420 /* Clear private to make sure later restores on this page error out. */
421 page->private = 0;
422
423 if (is_folio)
424 kho_init_folio(page, info.order);
425 else
426 kho_init_pages(page, nr_pages);
427
428 adjust_managed_page_count(page, nr_pages);
429 return page;
430 }
431
432 /**
433 * kho_restore_folio - recreates the folio from the preserved memory.
434 * @phys: physical address of the folio.
435 *
436 * Return: pointer to the struct folio on success, NULL on failure.
437 */
kho_restore_folio(phys_addr_t phys)438 struct folio *kho_restore_folio(phys_addr_t phys)
439 {
440 struct page *page = kho_restore_page(phys, true);
441
442 return page ? page_folio(page) : NULL;
443 }
444 EXPORT_SYMBOL_GPL(kho_restore_folio);
445
446 /**
447 * kho_restore_pages - restore list of contiguous order 0 pages.
448 * @phys: physical address of the first page.
449 * @nr_pages: number of pages.
450 *
451 * Restore a contiguous list of order 0 pages that was preserved with
452 * kho_preserve_pages().
453 *
454 * Return: the first page on success, NULL on failure.
455 */
kho_restore_pages(phys_addr_t phys,unsigned long nr_pages)456 struct page *kho_restore_pages(phys_addr_t phys, unsigned long nr_pages)
457 {
458 const unsigned long start_pfn = PHYS_PFN(phys);
459 const unsigned long end_pfn = start_pfn + nr_pages;
460 unsigned long pfn = start_pfn;
461
462 while (pfn < end_pfn) {
463 const unsigned int order =
464 min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
465 struct page *page = kho_restore_page(PFN_PHYS(pfn), false);
466
467 if (!page)
468 return NULL;
469 pfn += 1 << order;
470 }
471
472 return pfn_to_page(start_pfn);
473 }
474 EXPORT_SYMBOL_GPL(kho_restore_pages);
475
kho_preserved_memory_reserve(phys_addr_t phys,unsigned int order)476 static int __init kho_preserved_memory_reserve(phys_addr_t phys,
477 unsigned int order)
478 {
479 union kho_page_info info;
480 struct page *page;
481 u64 sz;
482
483 sz = 1 << (order + PAGE_SHIFT);
484 page = phys_to_page(phys);
485
486 /* Reserve the memory preserved in KHO in memblock */
487 memblock_reserve(phys, sz);
488 memblock_reserved_mark_noinit(phys, sz);
489 info.magic = KHO_PAGE_MAGIC;
490 info.order = order;
491 page->private = info.page_private;
492
493 return 0;
494 }
495
496 /* Returns physical address of the preserved memory map from FDT */
kho_get_mem_map_phys(const void * fdt)497 static phys_addr_t __init kho_get_mem_map_phys(const void *fdt)
498 {
499 const void *mem_ptr;
500 int len;
501
502 mem_ptr = fdt_getprop(fdt, 0, KHO_FDT_MEMORY_MAP_PROP_NAME, &len);
503 if (!mem_ptr || len != sizeof(u64)) {
504 pr_err("failed to get preserved memory map\n");
505 return 0;
506 }
507
508 return get_unaligned((const u64 *)mem_ptr);
509 }
510
511 /*
512 * With KHO enabled, memory can become fragmented because KHO regions may
513 * be anywhere in physical address space. The scratch regions give us a
514 * safe zones that we will never see KHO allocations from. This is where we
515 * can later safely load our new kexec images into and then use the scratch
516 * area for early allocations that happen before page allocator is
517 * initialized.
518 */
519 struct kho_scratch *kho_scratch;
520 unsigned int kho_scratch_cnt;
521
522 /*
523 * The scratch areas are scaled by default as percent of memory allocated from
524 * memblock. A user can override the scale with command line parameter:
525 *
526 * kho_scratch=N%
527 *
528 * It is also possible to explicitly define size for a lowmem, a global and
529 * per-node scratch areas:
530 *
531 * kho_scratch=l[KMG],n[KMG],m[KMG]
532 *
533 * The explicit size definition takes precedence over scale definition.
534 */
535 static unsigned int scratch_scale __initdata = 200;
536 static phys_addr_t scratch_size_global __initdata;
537 static phys_addr_t scratch_size_pernode __initdata;
538 static phys_addr_t scratch_size_lowmem __initdata;
539
kho_parse_scratch_size(char * p)540 static int __init kho_parse_scratch_size(char *p)
541 {
542 size_t len;
543 unsigned long sizes[3];
544 size_t total_size = 0;
545 int i;
546
547 if (!p)
548 return -EINVAL;
549
550 len = strlen(p);
551 if (!len)
552 return -EINVAL;
553
554 /* parse nn% */
555 if (p[len - 1] == '%') {
556 /* unsigned int max is 4,294,967,295, 10 chars */
557 char s_scale[11] = {};
558 int ret = 0;
559
560 if (len > ARRAY_SIZE(s_scale))
561 return -EINVAL;
562
563 memcpy(s_scale, p, len - 1);
564 ret = kstrtouint(s_scale, 10, &scratch_scale);
565 if (!ret)
566 pr_notice("scratch scale is %d%%\n", scratch_scale);
567 return ret;
568 }
569
570 /* parse ll[KMG],mm[KMG],nn[KMG] */
571 for (i = 0; i < ARRAY_SIZE(sizes); i++) {
572 char *endp = p;
573
574 if (i > 0) {
575 if (*p != ',')
576 return -EINVAL;
577 p += 1;
578 }
579
580 sizes[i] = memparse(p, &endp);
581 if (endp == p)
582 return -EINVAL;
583 p = endp;
584 total_size += sizes[i];
585 }
586
587 if (!total_size)
588 return -EINVAL;
589
590 /* The string should be fully consumed by now. */
591 if (*p)
592 return -EINVAL;
593
594 scratch_size_lowmem = sizes[0];
595 scratch_size_global = sizes[1];
596 scratch_size_pernode = sizes[2];
597 scratch_scale = 0;
598
599 pr_notice("scratch areas: lowmem: %lluMiB global: %lluMiB pernode: %lldMiB\n",
600 (u64)(scratch_size_lowmem >> 20),
601 (u64)(scratch_size_global >> 20),
602 (u64)(scratch_size_pernode >> 20));
603
604 return 0;
605 }
606 early_param("kho_scratch", kho_parse_scratch_size);
607
scratch_size_update(void)608 static void __init scratch_size_update(void)
609 {
610 phys_addr_t size;
611
612 if (!scratch_scale)
613 return;
614
615 size = memblock_reserved_kern_size(ARCH_LOW_ADDRESS_LIMIT,
616 NUMA_NO_NODE);
617 size = size * scratch_scale / 100;
618 scratch_size_lowmem = round_up(size, CMA_MIN_ALIGNMENT_BYTES);
619
620 size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE,
621 NUMA_NO_NODE);
622 size = size * scratch_scale / 100 - scratch_size_lowmem;
623 scratch_size_global = round_up(size, CMA_MIN_ALIGNMENT_BYTES);
624 }
625
scratch_size_node(int nid)626 static phys_addr_t __init scratch_size_node(int nid)
627 {
628 phys_addr_t size;
629
630 if (scratch_scale) {
631 size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE,
632 nid);
633 size = size * scratch_scale / 100;
634 } else {
635 size = scratch_size_pernode;
636 }
637
638 return round_up(size, CMA_MIN_ALIGNMENT_BYTES);
639 }
640
641 /**
642 * kho_reserve_scratch - Reserve a contiguous chunk of memory for kexec
643 *
644 * With KHO we can preserve arbitrary pages in the system. To ensure we still
645 * have a large contiguous region of memory when we search the physical address
646 * space for target memory, let's make sure we always have a large CMA region
647 * active. This CMA region will only be used for movable pages which are not a
648 * problem for us during KHO because we can just move them somewhere else.
649 */
kho_reserve_scratch(void)650 static void __init kho_reserve_scratch(void)
651 {
652 phys_addr_t addr, size;
653 int nid, i = 0;
654
655 if (!kho_enable)
656 return;
657
658 scratch_size_update();
659
660 /* FIXME: deal with node hot-plug/remove */
661 kho_scratch_cnt = nodes_weight(node_states[N_MEMORY]) + 2;
662 size = kho_scratch_cnt * sizeof(*kho_scratch);
663 kho_scratch = memblock_alloc(size, PAGE_SIZE);
664 if (!kho_scratch) {
665 pr_err("Failed to reserve scratch array\n");
666 goto err_disable_kho;
667 }
668
669 /*
670 * reserve scratch area in low memory for lowmem allocations in the
671 * next kernel
672 */
673 size = scratch_size_lowmem;
674 addr = memblock_phys_alloc_range(size, CMA_MIN_ALIGNMENT_BYTES, 0,
675 ARCH_LOW_ADDRESS_LIMIT);
676 if (!addr) {
677 pr_err("Failed to reserve lowmem scratch buffer\n");
678 goto err_free_scratch_desc;
679 }
680
681 kho_scratch[i].addr = addr;
682 kho_scratch[i].size = size;
683 i++;
684
685 /* reserve large contiguous area for allocations without nid */
686 size = scratch_size_global;
687 addr = memblock_phys_alloc(size, CMA_MIN_ALIGNMENT_BYTES);
688 if (!addr) {
689 pr_err("Failed to reserve global scratch buffer\n");
690 goto err_free_scratch_areas;
691 }
692
693 kho_scratch[i].addr = addr;
694 kho_scratch[i].size = size;
695 i++;
696
697 /*
698 * Loop over nodes that have both memory and are online. Skip
699 * memoryless nodes, as we can not allocate scratch areas there.
700 */
701 for_each_node_state(nid, N_MEMORY) {
702 size = scratch_size_node(nid);
703 addr = memblock_alloc_range_nid(size, CMA_MIN_ALIGNMENT_BYTES,
704 0, MEMBLOCK_ALLOC_ACCESSIBLE,
705 nid, true);
706 if (!addr) {
707 pr_err("Failed to reserve nid %d scratch buffer\n", nid);
708 goto err_free_scratch_areas;
709 }
710
711 kho_scratch[i].addr = addr;
712 kho_scratch[i].size = size;
713 i++;
714 }
715
716 return;
717
718 err_free_scratch_areas:
719 for (i--; i >= 0; i--)
720 memblock_phys_free(kho_scratch[i].addr, kho_scratch[i].size);
721 err_free_scratch_desc:
722 memblock_free(kho_scratch, kho_scratch_cnt * sizeof(*kho_scratch));
723 err_disable_kho:
724 pr_warn("Failed to reserve scratch area, disabling kexec handover\n");
725 kho_enable = false;
726 }
727
728 /**
729 * kho_add_subtree - record the physical address of a sub blob in KHO root tree.
730 * @name: name of the sub tree.
731 * @blob: the sub tree blob.
732 * @size: size of the blob in bytes.
733 *
734 * Creates a new child node named @name in KHO root FDT and records
735 * the physical address of @blob. The pages of @blob must also be preserved
736 * by KHO for the new kernel to retrieve it after kexec.
737 *
738 * A debugfs blob entry is also created at
739 * ``/sys/kernel/debug/kho/out/sub_fdts/@name`` when kernel is configured with
740 * CONFIG_KEXEC_HANDOVER_DEBUGFS
741 *
742 * Return: 0 on success, error code on failure
743 */
kho_add_subtree(const char * name,void * blob,size_t size)744 int kho_add_subtree(const char *name, void *blob, size_t size)
745 {
746 phys_addr_t phys = virt_to_phys(blob);
747 void *root_fdt = kho_out.fdt;
748 u64 size_u64 = size;
749 int err = -ENOMEM;
750 int off, fdt_err;
751
752 guard(mutex)(&kho_out.lock);
753
754 fdt_err = fdt_open_into(root_fdt, root_fdt, PAGE_SIZE);
755 if (fdt_err < 0)
756 return err;
757
758 off = fdt_add_subnode(root_fdt, 0, name);
759 if (off < 0) {
760 if (off == -FDT_ERR_EXISTS)
761 err = -EEXIST;
762 goto out_pack;
763 }
764
765 fdt_err = fdt_setprop(root_fdt, off, KHO_SUB_TREE_PROP_NAME,
766 &phys, sizeof(phys));
767 if (fdt_err < 0)
768 goto out_del_node;
769
770 fdt_err = fdt_setprop(root_fdt, off, KHO_SUB_TREE_SIZE_PROP_NAME,
771 &size_u64, sizeof(size_u64));
772 if (fdt_err < 0)
773 goto out_del_node;
774
775 WARN_ON_ONCE(kho_debugfs_blob_add(&kho_out.dbg, name, blob,
776 size, false));
777
778 err = 0;
779 goto out_pack;
780
781 out_del_node:
782 fdt_del_node(root_fdt, off);
783 out_pack:
784 fdt_pack(root_fdt);
785
786 return err;
787 }
788 EXPORT_SYMBOL_GPL(kho_add_subtree);
789
kho_remove_subtree(void * blob)790 void kho_remove_subtree(void *blob)
791 {
792 phys_addr_t target_phys = virt_to_phys(blob);
793 void *root_fdt = kho_out.fdt;
794 int off;
795 int err;
796
797 guard(mutex)(&kho_out.lock);
798
799 err = fdt_open_into(root_fdt, root_fdt, PAGE_SIZE);
800 if (err < 0)
801 return;
802
803 for (off = fdt_first_subnode(root_fdt, 0); off >= 0;
804 off = fdt_next_subnode(root_fdt, off)) {
805 const u64 *val;
806 int len;
807
808 val = fdt_getprop(root_fdt, off, KHO_SUB_TREE_PROP_NAME, &len);
809 if (!val || len != sizeof(phys_addr_t))
810 continue;
811
812 if ((phys_addr_t)*val == target_phys) {
813 fdt_del_node(root_fdt, off);
814 kho_debugfs_blob_remove(&kho_out.dbg, blob);
815 break;
816 }
817 }
818
819 fdt_pack(root_fdt);
820 }
821 EXPORT_SYMBOL_GPL(kho_remove_subtree);
822
823 /**
824 * kho_preserve_folio - preserve a folio across kexec.
825 * @folio: folio to preserve.
826 *
827 * Instructs KHO to preserve the whole folio across kexec. The order
828 * will be preserved as well.
829 *
830 * Return: 0 on success, error code on failure
831 */
kho_preserve_folio(struct folio * folio)832 int kho_preserve_folio(struct folio *folio)
833 {
834 struct kho_radix_tree *tree = &kho_out.radix_tree;
835 const unsigned long pfn = folio_pfn(folio);
836 const unsigned int order = folio_order(folio);
837
838 if (WARN_ON(kho_scratch_overlap(pfn << PAGE_SHIFT, PAGE_SIZE << order)))
839 return -EINVAL;
840
841 return kho_radix_add_page(tree, pfn, order);
842 }
843 EXPORT_SYMBOL_GPL(kho_preserve_folio);
844
845 /**
846 * kho_unpreserve_folio - unpreserve a folio.
847 * @folio: folio to unpreserve.
848 *
849 * Instructs KHO to unpreserve a folio that was preserved by
850 * kho_preserve_folio() before. The provided @folio (pfn and order)
851 * must exactly match a previously preserved folio.
852 */
kho_unpreserve_folio(struct folio * folio)853 void kho_unpreserve_folio(struct folio *folio)
854 {
855 struct kho_radix_tree *tree = &kho_out.radix_tree;
856 const unsigned long pfn = folio_pfn(folio);
857 const unsigned int order = folio_order(folio);
858
859 kho_radix_del_page(tree, pfn, order);
860 }
861 EXPORT_SYMBOL_GPL(kho_unpreserve_folio);
862
863 /**
864 * kho_preserve_pages - preserve contiguous pages across kexec
865 * @page: first page in the list.
866 * @nr_pages: number of pages.
867 *
868 * Preserve a contiguous list of order 0 pages. Must be restored using
869 * kho_restore_pages() to ensure the pages are restored properly as order 0.
870 *
871 * Return: 0 on success, error code on failure
872 */
kho_preserve_pages(struct page * page,unsigned long nr_pages)873 int kho_preserve_pages(struct page *page, unsigned long nr_pages)
874 {
875 struct kho_radix_tree *tree = &kho_out.radix_tree;
876 const unsigned long start_pfn = page_to_pfn(page);
877 const unsigned long end_pfn = start_pfn + nr_pages;
878 unsigned long pfn = start_pfn;
879 unsigned long failed_pfn = 0;
880 int err = 0;
881
882 if (WARN_ON(kho_scratch_overlap(start_pfn << PAGE_SHIFT,
883 nr_pages << PAGE_SHIFT))) {
884 return -EINVAL;
885 }
886
887 while (pfn < end_pfn) {
888 unsigned int order =
889 min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
890
891 /*
892 * Make sure all the pages in a single preservation are in the
893 * same NUMA node. The restore machinery can not cope with a
894 * preservation spanning multiple NUMA nodes.
895 */
896 while (pfn_to_nid(pfn) != pfn_to_nid(pfn + (1UL << order) - 1))
897 order--;
898
899 err = kho_radix_add_page(tree, pfn, order);
900 if (err) {
901 failed_pfn = pfn;
902 break;
903 }
904
905 pfn += 1 << order;
906 }
907
908 if (err)
909 __kho_unpreserve(tree, start_pfn, failed_pfn);
910
911 return err;
912 }
913 EXPORT_SYMBOL_GPL(kho_preserve_pages);
914
915 /**
916 * kho_unpreserve_pages - unpreserve contiguous pages.
917 * @page: first page in the list.
918 * @nr_pages: number of pages.
919 *
920 * Instructs KHO to unpreserve @nr_pages contiguous pages starting from @page.
921 * This must be called with the same @page and @nr_pages as the corresponding
922 * kho_preserve_pages() call. Unpreserving arbitrary sub-ranges of larger
923 * preserved blocks is not supported.
924 */
kho_unpreserve_pages(struct page * page,unsigned long nr_pages)925 void kho_unpreserve_pages(struct page *page, unsigned long nr_pages)
926 {
927 struct kho_radix_tree *tree = &kho_out.radix_tree;
928 const unsigned long start_pfn = page_to_pfn(page);
929 const unsigned long end_pfn = start_pfn + nr_pages;
930
931 __kho_unpreserve(tree, start_pfn, end_pfn);
932 }
933 EXPORT_SYMBOL_GPL(kho_unpreserve_pages);
934
935 /* vmalloc flags KHO supports */
936 #define KHO_VMALLOC_SUPPORTED_FLAGS (VM_ALLOC | VM_ALLOW_HUGE_VMAP)
937
938 /* KHO internal flags for vmalloc preservations */
939 #define KHO_VMALLOC_ALLOC 0x0001
940 #define KHO_VMALLOC_HUGE_VMAP 0x0002
941
vmalloc_flags_to_kho(unsigned int vm_flags)942 static unsigned short vmalloc_flags_to_kho(unsigned int vm_flags)
943 {
944 unsigned short kho_flags = 0;
945
946 if (vm_flags & VM_ALLOC)
947 kho_flags |= KHO_VMALLOC_ALLOC;
948 if (vm_flags & VM_ALLOW_HUGE_VMAP)
949 kho_flags |= KHO_VMALLOC_HUGE_VMAP;
950
951 return kho_flags;
952 }
953
kho_flags_to_vmalloc(unsigned short kho_flags)954 static unsigned int kho_flags_to_vmalloc(unsigned short kho_flags)
955 {
956 unsigned int vm_flags = 0;
957
958 if (kho_flags & KHO_VMALLOC_ALLOC)
959 vm_flags |= VM_ALLOC;
960 if (kho_flags & KHO_VMALLOC_HUGE_VMAP)
961 vm_flags |= VM_ALLOW_HUGE_VMAP;
962
963 return vm_flags;
964 }
965
new_vmalloc_chunk(struct kho_vmalloc_chunk * cur)966 static struct kho_vmalloc_chunk *new_vmalloc_chunk(struct kho_vmalloc_chunk *cur)
967 {
968 struct kho_vmalloc_chunk *chunk;
969 int err;
970
971 chunk = (struct kho_vmalloc_chunk *)get_zeroed_page(GFP_KERNEL);
972 if (!chunk)
973 return NULL;
974
975 err = kho_preserve_pages(virt_to_page(chunk), 1);
976 if (err)
977 goto err_free;
978 if (cur)
979 KHOSER_STORE_PTR(cur->hdr.next, chunk);
980 return chunk;
981
982 err_free:
983 free_page((unsigned long)chunk);
984 return NULL;
985 }
986
kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk * chunk,unsigned short order)987 static void kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk *chunk,
988 unsigned short order)
989 {
990 struct kho_radix_tree *tree = &kho_out.radix_tree;
991 unsigned long pfn = PHYS_PFN(virt_to_phys(chunk));
992
993 __kho_unpreserve(tree, pfn, pfn + 1);
994
995 for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) {
996 pfn = PHYS_PFN(chunk->phys[i]);
997 __kho_unpreserve(tree, pfn, pfn + (1 << order));
998 }
999 }
1000
1001 /**
1002 * kho_preserve_vmalloc - preserve memory allocated with vmalloc() across kexec
1003 * @ptr: pointer to the area in vmalloc address space
1004 * @preservation: placeholder for preservation metadata
1005 *
1006 * Instructs KHO to preserve the area in vmalloc address space at @ptr. The
1007 * physical pages mapped at @ptr will be preserved and on successful return
1008 * @preservation will hold the physical address of a structure that describes
1009 * the preservation.
1010 *
1011 * NOTE: The memory allocated with vmalloc_node() variants cannot be reliably
1012 * restored on the same node
1013 *
1014 * Return: 0 on success, error code on failure
1015 */
kho_preserve_vmalloc(void * ptr,struct kho_vmalloc * preservation)1016 int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation)
1017 {
1018 struct kho_vmalloc_chunk *chunk;
1019 struct vm_struct *vm = find_vm_area(ptr);
1020 unsigned int order, flags, nr_contig_pages;
1021 unsigned int idx = 0;
1022 int err;
1023
1024 if (!vm)
1025 return -EINVAL;
1026
1027 if (vm->flags & ~KHO_VMALLOC_SUPPORTED_FLAGS)
1028 return -EOPNOTSUPP;
1029
1030 flags = vmalloc_flags_to_kho(vm->flags);
1031 order = get_vm_area_page_order(vm);
1032
1033 chunk = new_vmalloc_chunk(NULL);
1034 if (!chunk)
1035 return -ENOMEM;
1036 KHOSER_STORE_PTR(preservation->first, chunk);
1037
1038 nr_contig_pages = (1 << order);
1039 for (int i = 0; i < vm->nr_pages; i += nr_contig_pages) {
1040 phys_addr_t phys = page_to_phys(vm->pages[i]);
1041
1042 err = kho_preserve_pages(vm->pages[i], nr_contig_pages);
1043 if (err)
1044 goto err_free;
1045
1046 chunk->phys[idx++] = phys;
1047 if (idx == ARRAY_SIZE(chunk->phys)) {
1048 chunk = new_vmalloc_chunk(chunk);
1049 if (!chunk) {
1050 err = -ENOMEM;
1051 goto err_free;
1052 }
1053 idx = 0;
1054 }
1055 }
1056
1057 preservation->total_pages = vm->nr_pages;
1058 preservation->flags = flags;
1059 preservation->order = order;
1060
1061 return 0;
1062
1063 err_free:
1064 kho_unpreserve_vmalloc(preservation);
1065 return err;
1066 }
1067 EXPORT_SYMBOL_GPL(kho_preserve_vmalloc);
1068
1069 /**
1070 * kho_unpreserve_vmalloc - unpreserve memory allocated with vmalloc()
1071 * @preservation: preservation metadata returned by kho_preserve_vmalloc()
1072 *
1073 * Instructs KHO to unpreserve the area in vmalloc address space that was
1074 * previously preserved with kho_preserve_vmalloc().
1075 */
kho_unpreserve_vmalloc(struct kho_vmalloc * preservation)1076 void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation)
1077 {
1078 struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first);
1079
1080 while (chunk) {
1081 struct kho_vmalloc_chunk *tmp = chunk;
1082
1083 kho_vmalloc_unpreserve_chunk(chunk, preservation->order);
1084
1085 chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
1086 free_page((unsigned long)tmp);
1087 }
1088 }
1089 EXPORT_SYMBOL_GPL(kho_unpreserve_vmalloc);
1090
1091 /**
1092 * kho_restore_vmalloc - recreates and populates an area in vmalloc address
1093 * space from the preserved memory.
1094 * @preservation: preservation metadata.
1095 *
1096 * Recreates an area in vmalloc address space and populates it with memory that
1097 * was preserved using kho_preserve_vmalloc().
1098 *
1099 * Return: pointer to the area in the vmalloc address space, NULL on failure.
1100 */
kho_restore_vmalloc(const struct kho_vmalloc * preservation)1101 void *kho_restore_vmalloc(const struct kho_vmalloc *preservation)
1102 {
1103 struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first);
1104 kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_PROT_NORMAL;
1105 unsigned int align, order, shift, vm_flags;
1106 unsigned long total_pages, contig_pages;
1107 unsigned long addr, size;
1108 struct vm_struct *area;
1109 struct page **pages;
1110 unsigned int idx = 0;
1111 int err;
1112
1113 vm_flags = kho_flags_to_vmalloc(preservation->flags);
1114 if (vm_flags & ~KHO_VMALLOC_SUPPORTED_FLAGS)
1115 return NULL;
1116
1117 total_pages = preservation->total_pages;
1118 pages = kvmalloc_objs(*pages, total_pages);
1119 if (!pages)
1120 return NULL;
1121 order = preservation->order;
1122 contig_pages = (1 << order);
1123 shift = PAGE_SHIFT + order;
1124 align = 1 << shift;
1125
1126 while (chunk) {
1127 struct page *page;
1128
1129 for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) {
1130 phys_addr_t phys = chunk->phys[i];
1131
1132 if (idx + contig_pages > total_pages)
1133 goto err_free_pages_array;
1134
1135 page = kho_restore_pages(phys, contig_pages);
1136 if (!page)
1137 goto err_free_pages_array;
1138
1139 for (int j = 0; j < contig_pages; j++)
1140 pages[idx++] = page + j;
1141
1142 phys += contig_pages * PAGE_SIZE;
1143 }
1144
1145 page = kho_restore_pages(virt_to_phys(chunk), 1);
1146 if (!page)
1147 goto err_free_pages_array;
1148 chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
1149 __free_page(page);
1150 }
1151
1152 if (idx != total_pages)
1153 goto err_free_pages_array;
1154
1155 area = __get_vm_area_node(total_pages * PAGE_SIZE, align, shift,
1156 vm_flags | VM_UNINITIALIZED,
1157 VMALLOC_START, VMALLOC_END,
1158 NUMA_NO_NODE, GFP_KERNEL,
1159 __builtin_return_address(0));
1160 if (!area)
1161 goto err_free_pages_array;
1162
1163 addr = (unsigned long)area->addr;
1164 size = get_vm_area_size(area);
1165 err = vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, shift);
1166 if (err)
1167 goto err_free_vm_area;
1168
1169 area->nr_pages = total_pages;
1170 area->pages = pages;
1171
1172 if (vm_flags & VM_ALLOC)
1173 kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
1174
1175 area->addr = kasan_unpoison_vmalloc(area->addr, total_pages * PAGE_SIZE,
1176 kasan_flags);
1177 clear_vm_uninitialized_flag(area);
1178
1179 return area->addr;
1180
1181 err_free_vm_area:
1182 free_vm_area(area);
1183 err_free_pages_array:
1184 kvfree(pages);
1185 return NULL;
1186 }
1187 EXPORT_SYMBOL_GPL(kho_restore_vmalloc);
1188
1189 /**
1190 * kho_alloc_preserve - Allocate, zero, and preserve memory.
1191 * @size: The number of bytes to allocate.
1192 *
1193 * Allocates a physically contiguous block of zeroed pages that is large
1194 * enough to hold @size bytes. The allocated memory is then registered with
1195 * KHO for preservation across a kexec.
1196 *
1197 * Note: The actual allocated size will be rounded up to the nearest
1198 * power-of-two page boundary.
1199 *
1200 * @return A virtual pointer to the allocated and preserved memory on success,
1201 * or an ERR_PTR() encoded error on failure.
1202 */
kho_alloc_preserve(size_t size)1203 void *kho_alloc_preserve(size_t size)
1204 {
1205 struct folio *folio;
1206 int order, ret;
1207
1208 if (!size)
1209 return ERR_PTR(-EINVAL);
1210
1211 order = get_order(size);
1212 if (order > MAX_PAGE_ORDER)
1213 return ERR_PTR(-E2BIG);
1214
1215 folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, order);
1216 if (!folio)
1217 return ERR_PTR(-ENOMEM);
1218
1219 ret = kho_preserve_folio(folio);
1220 if (ret) {
1221 folio_put(folio);
1222 return ERR_PTR(ret);
1223 }
1224
1225 return folio_address(folio);
1226 }
1227 EXPORT_SYMBOL_GPL(kho_alloc_preserve);
1228
1229 /**
1230 * kho_unpreserve_free - Unpreserve and free memory.
1231 * @mem: Pointer to the memory allocated by kho_alloc_preserve().
1232 *
1233 * Unregisters the memory from KHO preservation and frees the underlying
1234 * pages back to the system. This function should be called to clean up
1235 * memory allocated with kho_alloc_preserve().
1236 */
kho_unpreserve_free(void * mem)1237 void kho_unpreserve_free(void *mem)
1238 {
1239 struct folio *folio;
1240
1241 if (!mem)
1242 return;
1243
1244 folio = virt_to_folio(mem);
1245 kho_unpreserve_folio(folio);
1246 folio_put(folio);
1247 }
1248 EXPORT_SYMBOL_GPL(kho_unpreserve_free);
1249
1250 /**
1251 * kho_restore_free - Restore and free memory after kexec.
1252 * @mem: Pointer to the memory (in the new kernel's address space)
1253 * that was allocated by the old kernel.
1254 *
1255 * This function is intended to be called in the new kernel (post-kexec)
1256 * to take ownership of and free a memory region that was preserved by the
1257 * old kernel using kho_alloc_preserve().
1258 *
1259 * It first restores the pages from KHO (using their physical address)
1260 * and then frees the pages back to the new kernel's page allocator.
1261 */
kho_restore_free(void * mem)1262 void kho_restore_free(void *mem)
1263 {
1264 struct folio *folio;
1265
1266 if (!mem)
1267 return;
1268
1269 folio = kho_restore_folio(__pa(mem));
1270 if (!WARN_ON(!folio))
1271 folio_put(folio);
1272 }
1273 EXPORT_SYMBOL_GPL(kho_restore_free);
1274
1275 struct kho_in {
1276 phys_addr_t fdt_phys;
1277 phys_addr_t scratch_phys;
1278 char previous_release[__NEW_UTS_LEN + 1];
1279 u32 kexec_count;
1280 struct kho_debugfs dbg;
1281 };
1282
1283 static struct kho_in kho_in = {
1284 };
1285
kho_get_fdt(void)1286 static const void *kho_get_fdt(void)
1287 {
1288 return kho_in.fdt_phys ? phys_to_virt(kho_in.fdt_phys) : NULL;
1289 }
1290
1291 /**
1292 * is_kho_boot - check if current kernel was booted via KHO-enabled
1293 * kexec
1294 *
1295 * This function checks if the current kernel was loaded through a kexec
1296 * operation with KHO enabled, by verifying that a valid KHO FDT
1297 * was passed.
1298 *
1299 * Note: This function returns reliable results only after
1300 * kho_populate() has been called during early boot. Before that,
1301 * it may return false even if KHO data is present.
1302 *
1303 * Return: true if booted via KHO-enabled kexec, false otherwise
1304 */
is_kho_boot(void)1305 bool is_kho_boot(void)
1306 {
1307 return !!kho_get_fdt();
1308 }
1309 EXPORT_SYMBOL_GPL(is_kho_boot);
1310
1311 /**
1312 * kho_retrieve_subtree - retrieve a preserved sub blob by its name.
1313 * @name: the name of the sub blob passed to kho_add_subtree().
1314 * @phys: if found, the physical address of the sub blob is stored in @phys.
1315 * @size: if not NULL and found, the size of the sub blob is stored in @size.
1316 *
1317 * Retrieve a preserved sub blob named @name and store its physical
1318 * address in @phys and optionally its size in @size.
1319 *
1320 * Return: 0 on success, error code on failure
1321 */
kho_retrieve_subtree(const char * name,phys_addr_t * phys,size_t * size)1322 int kho_retrieve_subtree(const char *name, phys_addr_t *phys, size_t *size)
1323 {
1324 const void *fdt = kho_get_fdt();
1325 const u64 *val;
1326 int offset, len;
1327
1328 if (!fdt)
1329 return -ENOENT;
1330
1331 if (!phys)
1332 return -EINVAL;
1333
1334 offset = fdt_subnode_offset(fdt, 0, name);
1335 if (offset < 0)
1336 return -ENOENT;
1337
1338 val = fdt_getprop(fdt, offset, KHO_SUB_TREE_PROP_NAME, &len);
1339 if (!val || len != sizeof(*val))
1340 return -EINVAL;
1341
1342 *phys = (phys_addr_t)*val;
1343
1344 val = fdt_getprop(fdt, offset, KHO_SUB_TREE_SIZE_PROP_NAME, &len);
1345 if (!val || len != sizeof(*val)) {
1346 pr_warn("broken KHO subnode '%s': missing or invalid blob-size property\n",
1347 name);
1348 return -EINVAL;
1349 }
1350
1351 if (size)
1352 *size = (size_t)*val;
1353
1354 return 0;
1355 }
1356 EXPORT_SYMBOL_GPL(kho_retrieve_subtree);
1357
kho_mem_retrieve(const void * fdt)1358 static int __init kho_mem_retrieve(const void *fdt)
1359 {
1360 struct kho_radix_tree tree;
1361 const phys_addr_t *mem;
1362 int len;
1363
1364 /* Retrieve the KHO radix tree from passed-in FDT. */
1365 mem = fdt_getprop(fdt, 0, KHO_FDT_MEMORY_MAP_PROP_NAME, &len);
1366
1367 if (!mem || len != sizeof(*mem)) {
1368 pr_err("failed to get preserved KHO memory tree\n");
1369 return -ENOENT;
1370 }
1371
1372 if (!*mem)
1373 return -EINVAL;
1374
1375 tree.root = phys_to_virt(*mem);
1376 mutex_init(&tree.lock);
1377 return kho_radix_walk_tree(&tree, kho_preserved_memory_reserve);
1378 }
1379
kho_out_fdt_setup(void)1380 static __init int kho_out_fdt_setup(void)
1381 {
1382 struct kho_radix_tree *tree = &kho_out.radix_tree;
1383 void *root = kho_out.fdt;
1384 u64 preserved_mem_tree_pa;
1385 int err;
1386
1387 err = fdt_create(root, PAGE_SIZE);
1388 err |= fdt_finish_reservemap(root);
1389 err |= fdt_begin_node(root, "");
1390 err |= fdt_property_string(root, "compatible", KHO_FDT_COMPATIBLE);
1391
1392 preserved_mem_tree_pa = virt_to_phys(tree->root);
1393
1394 err |= fdt_property(root, KHO_FDT_MEMORY_MAP_PROP_NAME,
1395 &preserved_mem_tree_pa,
1396 sizeof(preserved_mem_tree_pa));
1397
1398 err |= fdt_end_node(root);
1399 err |= fdt_finish(root);
1400
1401 return err;
1402 }
1403
kho_in_kexec_metadata(void)1404 static void __init kho_in_kexec_metadata(void)
1405 {
1406 struct kho_kexec_metadata *metadata;
1407 phys_addr_t metadata_phys;
1408 size_t blob_size;
1409 int err;
1410
1411 err = kho_retrieve_subtree(KHO_METADATA_NODE_NAME, &metadata_phys,
1412 &blob_size);
1413 if (err)
1414 /* This is fine, previous kernel didn't export metadata */
1415 return;
1416
1417 /* Check that, at least, "version" is present */
1418 if (blob_size < sizeof(u32)) {
1419 pr_warn("kexec-metadata blob too small (%zu bytes)\n",
1420 blob_size);
1421 return;
1422 }
1423
1424 metadata = phys_to_virt(metadata_phys);
1425
1426 if (metadata->version != KHO_KEXEC_METADATA_VERSION) {
1427 pr_warn("kexec-metadata version %u not supported (expected %u)\n",
1428 metadata->version, KHO_KEXEC_METADATA_VERSION);
1429 return;
1430 }
1431
1432 if (blob_size < sizeof(*metadata)) {
1433 pr_warn("kexec-metadata blob too small for v%u (%zu < %zu)\n",
1434 metadata->version, blob_size, sizeof(*metadata));
1435 return;
1436 }
1437
1438 /*
1439 * Copy data to the kernel structure that will persist during
1440 * kernel lifetime.
1441 */
1442 kho_in.kexec_count = metadata->kexec_count;
1443 strscpy(kho_in.previous_release, metadata->previous_release,
1444 sizeof(kho_in.previous_release));
1445
1446 pr_info("exec from: %s (count %u)\n",
1447 kho_in.previous_release, kho_in.kexec_count);
1448 }
1449
1450 /*
1451 * Create kexec metadata to pass kernel version and boot count to the
1452 * next kernel. This keeps the core KHO ABI minimal and allows the
1453 * metadata format to evolve independently.
1454 */
kho_out_kexec_metadata(void)1455 static __init int kho_out_kexec_metadata(void)
1456 {
1457 struct kho_kexec_metadata *metadata;
1458 int err;
1459
1460 metadata = kho_alloc_preserve(sizeof(*metadata));
1461 if (IS_ERR(metadata))
1462 return PTR_ERR(metadata);
1463
1464 metadata->version = KHO_KEXEC_METADATA_VERSION;
1465 strscpy(metadata->previous_release, init_uts_ns.name.release,
1466 sizeof(metadata->previous_release));
1467 /* kho_in.kexec_count is set to 0 on cold boot */
1468 metadata->kexec_count = kho_in.kexec_count + 1;
1469
1470 err = kho_add_subtree(KHO_METADATA_NODE_NAME, metadata,
1471 sizeof(*metadata));
1472 if (err)
1473 kho_unpreserve_free(metadata);
1474
1475 return err;
1476 }
1477
kho_kexec_metadata_init(const void * fdt)1478 static int __init kho_kexec_metadata_init(const void *fdt)
1479 {
1480 int err;
1481
1482 if (fdt)
1483 kho_in_kexec_metadata();
1484
1485 /* Populate kexec metadata for the possible next kexec */
1486 err = kho_out_kexec_metadata();
1487 if (err)
1488 pr_warn("failed to initialize kexec-metadata subtree: %d\n",
1489 err);
1490
1491 return err;
1492 }
1493
kho_init(void)1494 static __init int kho_init(void)
1495 {
1496 struct kho_radix_tree *tree = &kho_out.radix_tree;
1497 const void *fdt = kho_get_fdt();
1498 int err = 0;
1499
1500 if (!kho_enable)
1501 return 0;
1502
1503 tree->root = kzalloc(PAGE_SIZE, GFP_KERNEL);
1504 if (!tree->root) {
1505 err = -ENOMEM;
1506 goto err_free_scratch;
1507 }
1508
1509 kho_out.fdt = kho_alloc_preserve(PAGE_SIZE);
1510 if (IS_ERR(kho_out.fdt)) {
1511 err = PTR_ERR(kho_out.fdt);
1512 goto err_free_kho_radix_tree_root;
1513 }
1514
1515 err = kho_debugfs_init();
1516 if (err)
1517 goto err_free_fdt;
1518
1519 err = kho_out_debugfs_init(&kho_out.dbg);
1520 if (err)
1521 goto err_free_fdt;
1522
1523 err = kho_out_fdt_setup();
1524 if (err)
1525 goto err_free_fdt;
1526
1527 err = kho_kexec_metadata_init(fdt);
1528 if (err)
1529 goto err_free_fdt;
1530
1531 if (fdt) {
1532 kho_in_debugfs_init(&kho_in.dbg, fdt);
1533 return 0;
1534 }
1535
1536 for (int i = 0; i < kho_scratch_cnt; i++) {
1537 unsigned long base_pfn = PHYS_PFN(kho_scratch[i].addr);
1538 unsigned long count = kho_scratch[i].size >> PAGE_SHIFT;
1539 unsigned long pfn;
1540
1541 /*
1542 * When debug_pagealloc is enabled, __free_pages() clears the
1543 * corresponding PRESENT bit in the kernel page table.
1544 * Subsequent kmemleak scans of these pages cause the
1545 * non-PRESENT page faults.
1546 * Mark scratch areas with kmemleak_ignore_phys() to exclude
1547 * them from kmemleak scanning.
1548 */
1549 kmemleak_ignore_phys(kho_scratch[i].addr);
1550 for (pfn = base_pfn; pfn < base_pfn + count;
1551 pfn += pageblock_nr_pages)
1552 init_cma_reserved_pageblock(pfn_to_page(pfn));
1553 }
1554
1555 WARN_ON_ONCE(kho_debugfs_blob_add(&kho_out.dbg, "fdt",
1556 kho_out.fdt,
1557 fdt_totalsize(kho_out.fdt), true));
1558
1559 return 0;
1560
1561 err_free_fdt:
1562 kho_unpreserve_free(kho_out.fdt);
1563 err_free_kho_radix_tree_root:
1564 kfree(tree->root);
1565 tree->root = NULL;
1566 err_free_scratch:
1567 kho_out.fdt = NULL;
1568 for (int i = 0; i < kho_scratch_cnt; i++) {
1569 void *start = __va(kho_scratch[i].addr);
1570 void *end = start + kho_scratch[i].size;
1571
1572 free_reserved_area(start, end, -1, "");
1573 }
1574 kho_enable = false;
1575 return err;
1576 }
1577 fs_initcall(kho_init);
1578
kho_release_scratch(void)1579 static void __init kho_release_scratch(void)
1580 {
1581 phys_addr_t start, end;
1582 u64 i;
1583
1584 memmap_init_kho_scratch_pages();
1585
1586 /*
1587 * Mark scratch mem as CMA before we return it. That way we
1588 * ensure that no kernel allocations happen on it. That means
1589 * we can reuse it as scratch memory again later.
1590 */
1591 __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
1592 MEMBLOCK_KHO_SCRATCH, &start, &end, NULL) {
1593 ulong start_pfn = pageblock_start_pfn(PFN_DOWN(start));
1594 ulong end_pfn = pageblock_align(PFN_UP(end));
1595 ulong pfn;
1596
1597 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages)
1598 init_pageblock_migratetype(pfn_to_page(pfn),
1599 MIGRATE_CMA, false);
1600 }
1601 }
1602
kho_memory_init(void)1603 void __init kho_memory_init(void)
1604 {
1605 if (kho_in.scratch_phys) {
1606 kho_scratch = phys_to_virt(kho_in.scratch_phys);
1607 kho_release_scratch();
1608
1609 if (kho_mem_retrieve(kho_get_fdt()))
1610 kho_in.fdt_phys = 0;
1611 } else {
1612 kho_reserve_scratch();
1613 }
1614 }
1615
kho_populate(phys_addr_t fdt_phys,u64 fdt_len,phys_addr_t scratch_phys,u64 scratch_len)1616 void __init kho_populate(phys_addr_t fdt_phys, u64 fdt_len,
1617 phys_addr_t scratch_phys, u64 scratch_len)
1618 {
1619 unsigned int scratch_cnt = scratch_len / sizeof(*kho_scratch);
1620 struct kho_scratch *scratch = NULL;
1621 phys_addr_t mem_map_phys;
1622 void *fdt = NULL;
1623 bool populated = false;
1624 int err;
1625
1626 /* Validate the input FDT */
1627 fdt = early_memremap(fdt_phys, fdt_len);
1628 if (!fdt) {
1629 pr_warn("setup: failed to memremap FDT (0x%llx)\n", fdt_phys);
1630 goto report;
1631 }
1632 err = fdt_check_header(fdt);
1633 if (err) {
1634 pr_warn("setup: handover FDT (0x%llx) is invalid: %d\n",
1635 fdt_phys, err);
1636 goto unmap_fdt;
1637 }
1638 err = fdt_node_check_compatible(fdt, 0, KHO_FDT_COMPATIBLE);
1639 if (err) {
1640 pr_warn("setup: handover FDT (0x%llx) is incompatible with '%s': %d\n",
1641 fdt_phys, KHO_FDT_COMPATIBLE, err);
1642 goto unmap_fdt;
1643 }
1644
1645 mem_map_phys = kho_get_mem_map_phys(fdt);
1646 if (!mem_map_phys)
1647 goto unmap_fdt;
1648
1649 scratch = early_memremap(scratch_phys, scratch_len);
1650 if (!scratch) {
1651 pr_warn("setup: failed to memremap scratch (phys=0x%llx, len=%lld)\n",
1652 scratch_phys, scratch_len);
1653 goto unmap_fdt;
1654 }
1655
1656 /*
1657 * We pass a safe contiguous blocks of memory to use for early boot
1658 * purporses from the previous kernel so that we can resize the
1659 * memblock array as needed.
1660 */
1661 for (int i = 0; i < scratch_cnt; i++) {
1662 struct kho_scratch *area = &scratch[i];
1663 u64 size = area->size;
1664
1665 memblock_add(area->addr, size);
1666 err = memblock_mark_kho_scratch(area->addr, size);
1667 if (err) {
1668 pr_warn("failed to mark the scratch region 0x%pa+0x%pa: %pe",
1669 &area->addr, &size, ERR_PTR(err));
1670 goto unmap_scratch;
1671 }
1672 pr_debug("Marked 0x%pa+0x%pa as scratch", &area->addr, &size);
1673 }
1674
1675 memblock_reserve(scratch_phys, scratch_len);
1676
1677 /*
1678 * Now that we have a viable region of scratch memory, let's tell
1679 * the memblocks allocator to only use that for any allocations.
1680 * That way we ensure that nothing scribbles over in use data while
1681 * we initialize the page tables which we will need to ingest all
1682 * memory reservations from the previous kernel.
1683 */
1684 memblock_set_kho_scratch_only();
1685
1686 kho_in.fdt_phys = fdt_phys;
1687 kho_in.scratch_phys = scratch_phys;
1688 kho_scratch_cnt = scratch_cnt;
1689
1690 populated = true;
1691 pr_info("found kexec handover data.\n");
1692
1693 unmap_scratch:
1694 early_memunmap(scratch, scratch_len);
1695 unmap_fdt:
1696 early_memunmap(fdt, fdt_len);
1697 report:
1698 if (!populated)
1699 pr_warn("disabling KHO revival\n");
1700 }
1701
1702 /* Helper functions for kexec_file_load */
1703
kho_fill_kimage(struct kimage * image)1704 int kho_fill_kimage(struct kimage *image)
1705 {
1706 ssize_t scratch_size;
1707 int err = 0;
1708 struct kexec_buf scratch;
1709
1710 if (!kho_enable)
1711 return 0;
1712
1713 image->kho.fdt = virt_to_phys(kho_out.fdt);
1714
1715 scratch_size = sizeof(*kho_scratch) * kho_scratch_cnt;
1716 scratch = (struct kexec_buf){
1717 .image = image,
1718 .buffer = kho_scratch,
1719 .bufsz = scratch_size,
1720 .mem = KEXEC_BUF_MEM_UNKNOWN,
1721 .memsz = scratch_size,
1722 .buf_align = SZ_64K, /* Makes it easier to map */
1723 .buf_max = ULONG_MAX,
1724 .top_down = true,
1725 };
1726 err = kexec_add_buffer(&scratch);
1727 if (err)
1728 return err;
1729 image->kho.scratch = &image->segment[image->nr_segments - 1];
1730
1731 return 0;
1732 }
1733
kho_walk_scratch(struct kexec_buf * kbuf,int (* func)(struct resource *,void *))1734 static int kho_walk_scratch(struct kexec_buf *kbuf,
1735 int (*func)(struct resource *, void *))
1736 {
1737 int ret = 0;
1738 int i;
1739
1740 for (i = 0; i < kho_scratch_cnt; i++) {
1741 struct resource res = {
1742 .start = kho_scratch[i].addr,
1743 .end = kho_scratch[i].addr + kho_scratch[i].size - 1,
1744 };
1745
1746 /* Try to fit the kimage into our KHO scratch region */
1747 ret = func(&res, kbuf);
1748 if (ret)
1749 break;
1750 }
1751
1752 return ret;
1753 }
1754
kho_locate_mem_hole(struct kexec_buf * kbuf,int (* func)(struct resource *,void *))1755 int kho_locate_mem_hole(struct kexec_buf *kbuf,
1756 int (*func)(struct resource *, void *))
1757 {
1758 int ret;
1759
1760 if (!kho_enable || kbuf->image->type == KEXEC_TYPE_CRASH)
1761 return 1;
1762
1763 ret = kho_walk_scratch(kbuf, func);
1764
1765 return ret == 1 ? 0 : -EADDRNOTAVAIL;
1766 }
1767