1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kexec_handover.c - kexec handover metadata processing
4 * Copyright (C) 2023 Alexander Graf <graf@amazon.com>
5 * Copyright (C) 2025 Microsoft Corporation, Mike Rapoport <rppt@kernel.org>
6 * Copyright (C) 2025 Google LLC, Changyuan Lyu <changyuanl@google.com>
7 * Copyright (C) 2025 Pasha Tatashin <pasha.tatashin@soleen.com>
8 */
9
10 #define pr_fmt(fmt) "KHO: " fmt
11
12 #include <linux/cleanup.h>
13 #include <linux/cma.h>
14 #include <linux/kmemleak.h>
15 #include <linux/count_zeros.h>
16 #include <linux/kexec.h>
17 #include <linux/kexec_handover.h>
18 #include <linux/libfdt.h>
19 #include <linux/list.h>
20 #include <linux/memblock.h>
21 #include <linux/page-isolation.h>
22 #include <linux/unaligned.h>
23 #include <linux/vmalloc.h>
24
25 #include <asm/early_ioremap.h>
26
27 #include "kexec_handover_internal.h"
28 /*
29 * KHO is tightly coupled with mm init and needs access to some of mm
30 * internal APIs.
31 */
32 #include "../../mm/internal.h"
33 #include "../kexec_internal.h"
34 #include "kexec_handover_internal.h"
35
36 #define KHO_FDT_COMPATIBLE "kho-v1"
37 #define PROP_PRESERVED_MEMORY_MAP "preserved-memory-map"
38 #define PROP_SUB_FDT "fdt"
39
40 #define KHO_PAGE_MAGIC 0x4b484f50U /* ASCII for 'KHOP' */
41
42 /*
43 * KHO uses page->private, which is an unsigned long, to store page metadata.
44 * Use it to store both the magic and the order.
45 */
46 union kho_page_info {
47 unsigned long page_private;
48 struct {
49 unsigned int order;
50 unsigned int magic;
51 };
52 };
53
54 static_assert(sizeof(union kho_page_info) == sizeof(((struct page *)0)->private));
55
56 static bool kho_enable __ro_after_init = IS_ENABLED(CONFIG_KEXEC_HANDOVER_ENABLE_DEFAULT);
57
kho_is_enabled(void)58 bool kho_is_enabled(void)
59 {
60 return kho_enable;
61 }
62 EXPORT_SYMBOL_GPL(kho_is_enabled);
63
kho_parse_enable(char * p)64 static int __init kho_parse_enable(char *p)
65 {
66 return kstrtobool(p, &kho_enable);
67 }
68 early_param("kho", kho_parse_enable);
69
70 /*
71 * Keep track of memory that is to be preserved across KHO.
72 *
73 * The serializing side uses two levels of xarrays to manage chunks of per-order
74 * PAGE_SIZE byte bitmaps. For instance if PAGE_SIZE = 4096, the entire 1G order
75 * of a 8TB system would fit inside a single 4096 byte bitmap. For order 0
76 * allocations each bitmap will cover 128M of address space. Thus, for 16G of
77 * memory at most 512K of bitmap memory will be needed for order 0.
78 *
79 * This approach is fully incremental, as the serialization progresses folios
80 * can continue be aggregated to the tracker. The final step, immediately prior
81 * to kexec would serialize the xarray information into a linked list for the
82 * successor kernel to parse.
83 */
84
85 #define PRESERVE_BITS (PAGE_SIZE * 8)
86
87 struct kho_mem_phys_bits {
88 DECLARE_BITMAP(preserve, PRESERVE_BITS);
89 };
90
91 static_assert(sizeof(struct kho_mem_phys_bits) == PAGE_SIZE);
92
93 struct kho_mem_phys {
94 /*
95 * Points to kho_mem_phys_bits, a sparse bitmap array. Each bit is sized
96 * to order.
97 */
98 struct xarray phys_bits;
99 };
100
101 struct kho_mem_track {
102 /* Points to kho_mem_phys, each order gets its own bitmap tree */
103 struct xarray orders;
104 };
105
106 struct khoser_mem_chunk;
107
108 struct kho_out {
109 void *fdt;
110 bool finalized;
111 struct mutex lock; /* protects KHO FDT finalization */
112
113 struct kho_mem_track track;
114 struct kho_debugfs dbg;
115 };
116
117 static struct kho_out kho_out = {
118 .lock = __MUTEX_INITIALIZER(kho_out.lock),
119 .track = {
120 .orders = XARRAY_INIT(kho_out.track.orders, 0),
121 },
122 .finalized = false,
123 };
124
xa_load_or_alloc(struct xarray * xa,unsigned long index)125 static void *xa_load_or_alloc(struct xarray *xa, unsigned long index)
126 {
127 void *res = xa_load(xa, index);
128
129 if (res)
130 return res;
131
132 void *elm __free(free_page) = (void *)get_zeroed_page(GFP_KERNEL);
133
134 if (!elm)
135 return ERR_PTR(-ENOMEM);
136
137 if (WARN_ON(kho_scratch_overlap(virt_to_phys(elm), PAGE_SIZE)))
138 return ERR_PTR(-EINVAL);
139
140 res = xa_cmpxchg(xa, index, NULL, elm, GFP_KERNEL);
141 if (xa_is_err(res))
142 return ERR_PTR(xa_err(res));
143 else if (res)
144 return res;
145
146 return no_free_ptr(elm);
147 }
148
__kho_unpreserve_order(struct kho_mem_track * track,unsigned long pfn,unsigned int order)149 static void __kho_unpreserve_order(struct kho_mem_track *track, unsigned long pfn,
150 unsigned int order)
151 {
152 struct kho_mem_phys_bits *bits;
153 struct kho_mem_phys *physxa;
154 const unsigned long pfn_high = pfn >> order;
155
156 physxa = xa_load(&track->orders, order);
157 if (WARN_ON_ONCE(!physxa))
158 return;
159
160 bits = xa_load(&physxa->phys_bits, pfn_high / PRESERVE_BITS);
161 if (WARN_ON_ONCE(!bits))
162 return;
163
164 clear_bit(pfn_high % PRESERVE_BITS, bits->preserve);
165 }
166
__kho_unpreserve(struct kho_mem_track * track,unsigned long pfn,unsigned long end_pfn)167 static void __kho_unpreserve(struct kho_mem_track *track, unsigned long pfn,
168 unsigned long end_pfn)
169 {
170 unsigned int order;
171
172 while (pfn < end_pfn) {
173 order = min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
174
175 __kho_unpreserve_order(track, pfn, order);
176
177 pfn += 1 << order;
178 }
179 }
180
__kho_preserve_order(struct kho_mem_track * track,unsigned long pfn,unsigned int order)181 static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
182 unsigned int order)
183 {
184 struct kho_mem_phys_bits *bits;
185 struct kho_mem_phys *physxa, *new_physxa;
186 const unsigned long pfn_high = pfn >> order;
187
188 might_sleep();
189 physxa = xa_load(&track->orders, order);
190 if (!physxa) {
191 int err;
192
193 new_physxa = kzalloc(sizeof(*physxa), GFP_KERNEL);
194 if (!new_physxa)
195 return -ENOMEM;
196
197 xa_init(&new_physxa->phys_bits);
198 physxa = xa_cmpxchg(&track->orders, order, NULL, new_physxa,
199 GFP_KERNEL);
200
201 err = xa_err(physxa);
202 if (err || physxa) {
203 xa_destroy(&new_physxa->phys_bits);
204 kfree(new_physxa);
205
206 if (err)
207 return err;
208 } else {
209 physxa = new_physxa;
210 }
211 }
212
213 bits = xa_load_or_alloc(&physxa->phys_bits, pfn_high / PRESERVE_BITS);
214 if (IS_ERR(bits))
215 return PTR_ERR(bits);
216
217 set_bit(pfn_high % PRESERVE_BITS, bits->preserve);
218
219 return 0;
220 }
221
kho_restore_page(phys_addr_t phys,bool is_folio)222 static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
223 {
224 struct page *page = pfn_to_online_page(PHYS_PFN(phys));
225 unsigned int nr_pages, ref_cnt;
226 union kho_page_info info;
227
228 if (!page)
229 return NULL;
230
231 info.page_private = page->private;
232 /*
233 * deserialize_bitmap() only sets the magic on the head page. This magic
234 * check also implicitly makes sure phys is order-aligned since for
235 * non-order-aligned phys addresses, magic will never be set.
236 */
237 if (WARN_ON_ONCE(info.magic != KHO_PAGE_MAGIC || info.order > MAX_PAGE_ORDER))
238 return NULL;
239 nr_pages = (1 << info.order);
240
241 /* Clear private to make sure later restores on this page error out. */
242 page->private = 0;
243 /* Head page gets refcount of 1. */
244 set_page_count(page, 1);
245
246 /*
247 * For higher order folios, tail pages get a page count of zero.
248 * For physically contiguous order-0 pages every pages gets a page
249 * count of 1
250 */
251 ref_cnt = is_folio ? 0 : 1;
252 for (unsigned int i = 1; i < nr_pages; i++)
253 set_page_count(page + i, ref_cnt);
254
255 if (is_folio && info.order)
256 prep_compound_page(page, info.order);
257
258 adjust_managed_page_count(page, nr_pages);
259 return page;
260 }
261
262 /**
263 * kho_restore_folio - recreates the folio from the preserved memory.
264 * @phys: physical address of the folio.
265 *
266 * Return: pointer to the struct folio on success, NULL on failure.
267 */
kho_restore_folio(phys_addr_t phys)268 struct folio *kho_restore_folio(phys_addr_t phys)
269 {
270 struct page *page = kho_restore_page(phys, true);
271
272 return page ? page_folio(page) : NULL;
273 }
274 EXPORT_SYMBOL_GPL(kho_restore_folio);
275
276 /**
277 * kho_restore_pages - restore list of contiguous order 0 pages.
278 * @phys: physical address of the first page.
279 * @nr_pages: number of pages.
280 *
281 * Restore a contiguous list of order 0 pages that was preserved with
282 * kho_preserve_pages().
283 *
284 * Return: 0 on success, error code on failure
285 */
kho_restore_pages(phys_addr_t phys,unsigned int nr_pages)286 struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages)
287 {
288 const unsigned long start_pfn = PHYS_PFN(phys);
289 const unsigned long end_pfn = start_pfn + nr_pages;
290 unsigned long pfn = start_pfn;
291
292 while (pfn < end_pfn) {
293 const unsigned int order =
294 min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
295 struct page *page = kho_restore_page(PFN_PHYS(pfn), false);
296
297 if (!page)
298 return NULL;
299 pfn += 1 << order;
300 }
301
302 return pfn_to_page(start_pfn);
303 }
304 EXPORT_SYMBOL_GPL(kho_restore_pages);
305
306 /* Serialize and deserialize struct kho_mem_phys across kexec
307 *
308 * Record all the bitmaps in a linked list of pages for the next kernel to
309 * process. Each chunk holds bitmaps of the same order and each block of bitmaps
310 * starts at a given physical address. This allows the bitmaps to be sparse. The
311 * xarray is used to store them in a tree while building up the data structure,
312 * but the KHO successor kernel only needs to process them once in order.
313 *
314 * All of this memory is normal kmalloc() memory and is not marked for
315 * preservation. The successor kernel will remain isolated to the scratch space
316 * until it completes processing this list. Once processed all the memory
317 * storing these ranges will be marked as free.
318 */
319
320 struct khoser_mem_bitmap_ptr {
321 phys_addr_t phys_start;
322 DECLARE_KHOSER_PTR(bitmap, struct kho_mem_phys_bits *);
323 };
324
325 struct khoser_mem_chunk_hdr {
326 DECLARE_KHOSER_PTR(next, struct khoser_mem_chunk *);
327 unsigned int order;
328 unsigned int num_elms;
329 };
330
331 #define KHOSER_BITMAP_SIZE \
332 ((PAGE_SIZE - sizeof(struct khoser_mem_chunk_hdr)) / \
333 sizeof(struct khoser_mem_bitmap_ptr))
334
335 struct khoser_mem_chunk {
336 struct khoser_mem_chunk_hdr hdr;
337 struct khoser_mem_bitmap_ptr bitmaps[KHOSER_BITMAP_SIZE];
338 };
339
340 static_assert(sizeof(struct khoser_mem_chunk) == PAGE_SIZE);
341
new_chunk(struct khoser_mem_chunk * cur_chunk,unsigned long order)342 static struct khoser_mem_chunk *new_chunk(struct khoser_mem_chunk *cur_chunk,
343 unsigned long order)
344 {
345 struct khoser_mem_chunk *chunk __free(free_page) = NULL;
346
347 chunk = (void *)get_zeroed_page(GFP_KERNEL);
348 if (!chunk)
349 return ERR_PTR(-ENOMEM);
350
351 if (WARN_ON(kho_scratch_overlap(virt_to_phys(chunk), PAGE_SIZE)))
352 return ERR_PTR(-EINVAL);
353
354 chunk->hdr.order = order;
355 if (cur_chunk)
356 KHOSER_STORE_PTR(cur_chunk->hdr.next, chunk);
357 return no_free_ptr(chunk);
358 }
359
kho_mem_ser_free(struct khoser_mem_chunk * first_chunk)360 static void kho_mem_ser_free(struct khoser_mem_chunk *first_chunk)
361 {
362 struct khoser_mem_chunk *chunk = first_chunk;
363
364 while (chunk) {
365 struct khoser_mem_chunk *tmp = chunk;
366
367 chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
368 free_page((unsigned long)tmp);
369 }
370 }
371
372 /*
373 * Update memory map property, if old one is found discard it via
374 * kho_mem_ser_free().
375 */
kho_update_memory_map(struct khoser_mem_chunk * first_chunk)376 static void kho_update_memory_map(struct khoser_mem_chunk *first_chunk)
377 {
378 void *ptr;
379 u64 phys;
380
381 ptr = fdt_getprop_w(kho_out.fdt, 0, PROP_PRESERVED_MEMORY_MAP, NULL);
382
383 /* Check and discard previous memory map */
384 phys = get_unaligned((u64 *)ptr);
385 if (phys)
386 kho_mem_ser_free((struct khoser_mem_chunk *)phys_to_virt(phys));
387
388 /* Update with the new value */
389 phys = first_chunk ? (u64)virt_to_phys(first_chunk) : 0;
390 put_unaligned(phys, (u64 *)ptr);
391 }
392
kho_mem_serialize(struct kho_out * kho_out)393 static int kho_mem_serialize(struct kho_out *kho_out)
394 {
395 struct khoser_mem_chunk *first_chunk = NULL;
396 struct khoser_mem_chunk *chunk = NULL;
397 struct kho_mem_phys *physxa;
398 unsigned long order;
399 int err = -ENOMEM;
400
401 xa_for_each(&kho_out->track.orders, order, physxa) {
402 struct kho_mem_phys_bits *bits;
403 unsigned long phys;
404
405 chunk = new_chunk(chunk, order);
406 if (IS_ERR(chunk)) {
407 err = PTR_ERR(chunk);
408 goto err_free;
409 }
410
411 if (!first_chunk)
412 first_chunk = chunk;
413
414 xa_for_each(&physxa->phys_bits, phys, bits) {
415 struct khoser_mem_bitmap_ptr *elm;
416
417 if (chunk->hdr.num_elms == ARRAY_SIZE(chunk->bitmaps)) {
418 chunk = new_chunk(chunk, order);
419 if (IS_ERR(chunk)) {
420 err = PTR_ERR(chunk);
421 goto err_free;
422 }
423 }
424
425 elm = &chunk->bitmaps[chunk->hdr.num_elms];
426 chunk->hdr.num_elms++;
427 elm->phys_start = (phys * PRESERVE_BITS)
428 << (order + PAGE_SHIFT);
429 KHOSER_STORE_PTR(elm->bitmap, bits);
430 }
431 }
432
433 kho_update_memory_map(first_chunk);
434
435 return 0;
436
437 err_free:
438 kho_mem_ser_free(first_chunk);
439 return err;
440 }
441
deserialize_bitmap(unsigned int order,struct khoser_mem_bitmap_ptr * elm)442 static void __init deserialize_bitmap(unsigned int order,
443 struct khoser_mem_bitmap_ptr *elm)
444 {
445 struct kho_mem_phys_bits *bitmap = KHOSER_LOAD_PTR(elm->bitmap);
446 unsigned long bit;
447
448 for_each_set_bit(bit, bitmap->preserve, PRESERVE_BITS) {
449 int sz = 1 << (order + PAGE_SHIFT);
450 phys_addr_t phys =
451 elm->phys_start + (bit << (order + PAGE_SHIFT));
452 struct page *page = phys_to_page(phys);
453 union kho_page_info info;
454
455 memblock_reserve(phys, sz);
456 memblock_reserved_mark_noinit(phys, sz);
457 info.magic = KHO_PAGE_MAGIC;
458 info.order = order;
459 page->private = info.page_private;
460 }
461 }
462
463 /* Returns physical address of the preserved memory map from FDT */
kho_get_mem_map_phys(const void * fdt)464 static phys_addr_t __init kho_get_mem_map_phys(const void *fdt)
465 {
466 const void *mem_ptr;
467 int len;
468
469 mem_ptr = fdt_getprop(fdt, 0, PROP_PRESERVED_MEMORY_MAP, &len);
470 if (!mem_ptr || len != sizeof(u64)) {
471 pr_err("failed to get preserved memory bitmaps\n");
472 return 0;
473 }
474
475 return get_unaligned((const u64 *)mem_ptr);
476 }
477
kho_mem_deserialize(struct khoser_mem_chunk * chunk)478 static void __init kho_mem_deserialize(struct khoser_mem_chunk *chunk)
479 {
480 while (chunk) {
481 unsigned int i;
482
483 for (i = 0; i != chunk->hdr.num_elms; i++)
484 deserialize_bitmap(chunk->hdr.order,
485 &chunk->bitmaps[i]);
486 chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
487 }
488 }
489
490 /*
491 * With KHO enabled, memory can become fragmented because KHO regions may
492 * be anywhere in physical address space. The scratch regions give us a
493 * safe zones that we will never see KHO allocations from. This is where we
494 * can later safely load our new kexec images into and then use the scratch
495 * area for early allocations that happen before page allocator is
496 * initialized.
497 */
498 struct kho_scratch *kho_scratch;
499 unsigned int kho_scratch_cnt;
500
501 /*
502 * The scratch areas are scaled by default as percent of memory allocated from
503 * memblock. A user can override the scale with command line parameter:
504 *
505 * kho_scratch=N%
506 *
507 * It is also possible to explicitly define size for a lowmem, a global and
508 * per-node scratch areas:
509 *
510 * kho_scratch=l[KMG],n[KMG],m[KMG]
511 *
512 * The explicit size definition takes precedence over scale definition.
513 */
514 static unsigned int scratch_scale __initdata = 200;
515 static phys_addr_t scratch_size_global __initdata;
516 static phys_addr_t scratch_size_pernode __initdata;
517 static phys_addr_t scratch_size_lowmem __initdata;
518
kho_parse_scratch_size(char * p)519 static int __init kho_parse_scratch_size(char *p)
520 {
521 size_t len;
522 unsigned long sizes[3];
523 size_t total_size = 0;
524 int i;
525
526 if (!p)
527 return -EINVAL;
528
529 len = strlen(p);
530 if (!len)
531 return -EINVAL;
532
533 /* parse nn% */
534 if (p[len - 1] == '%') {
535 /* unsigned int max is 4,294,967,295, 10 chars */
536 char s_scale[11] = {};
537 int ret = 0;
538
539 if (len > ARRAY_SIZE(s_scale))
540 return -EINVAL;
541
542 memcpy(s_scale, p, len - 1);
543 ret = kstrtouint(s_scale, 10, &scratch_scale);
544 if (!ret)
545 pr_notice("scratch scale is %d%%\n", scratch_scale);
546 return ret;
547 }
548
549 /* parse ll[KMG],mm[KMG],nn[KMG] */
550 for (i = 0; i < ARRAY_SIZE(sizes); i++) {
551 char *endp = p;
552
553 if (i > 0) {
554 if (*p != ',')
555 return -EINVAL;
556 p += 1;
557 }
558
559 sizes[i] = memparse(p, &endp);
560 if (endp == p)
561 return -EINVAL;
562 p = endp;
563 total_size += sizes[i];
564 }
565
566 if (!total_size)
567 return -EINVAL;
568
569 /* The string should be fully consumed by now. */
570 if (*p)
571 return -EINVAL;
572
573 scratch_size_lowmem = sizes[0];
574 scratch_size_global = sizes[1];
575 scratch_size_pernode = sizes[2];
576 scratch_scale = 0;
577
578 pr_notice("scratch areas: lowmem: %lluMiB global: %lluMiB pernode: %lldMiB\n",
579 (u64)(scratch_size_lowmem >> 20),
580 (u64)(scratch_size_global >> 20),
581 (u64)(scratch_size_pernode >> 20));
582
583 return 0;
584 }
585 early_param("kho_scratch", kho_parse_scratch_size);
586
scratch_size_update(void)587 static void __init scratch_size_update(void)
588 {
589 phys_addr_t size;
590
591 if (!scratch_scale)
592 return;
593
594 size = memblock_reserved_kern_size(ARCH_LOW_ADDRESS_LIMIT,
595 NUMA_NO_NODE);
596 size = size * scratch_scale / 100;
597 scratch_size_lowmem = round_up(size, CMA_MIN_ALIGNMENT_BYTES);
598
599 size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE,
600 NUMA_NO_NODE);
601 size = size * scratch_scale / 100 - scratch_size_lowmem;
602 scratch_size_global = round_up(size, CMA_MIN_ALIGNMENT_BYTES);
603 }
604
scratch_size_node(int nid)605 static phys_addr_t __init scratch_size_node(int nid)
606 {
607 phys_addr_t size;
608
609 if (scratch_scale) {
610 size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE,
611 nid);
612 size = size * scratch_scale / 100;
613 } else {
614 size = scratch_size_pernode;
615 }
616
617 return round_up(size, CMA_MIN_ALIGNMENT_BYTES);
618 }
619
620 /**
621 * kho_reserve_scratch - Reserve a contiguous chunk of memory for kexec
622 *
623 * With KHO we can preserve arbitrary pages in the system. To ensure we still
624 * have a large contiguous region of memory when we search the physical address
625 * space for target memory, let's make sure we always have a large CMA region
626 * active. This CMA region will only be used for movable pages which are not a
627 * problem for us during KHO because we can just move them somewhere else.
628 */
kho_reserve_scratch(void)629 static void __init kho_reserve_scratch(void)
630 {
631 phys_addr_t addr, size;
632 int nid, i = 0;
633
634 if (!kho_enable)
635 return;
636
637 scratch_size_update();
638
639 /* FIXME: deal with node hot-plug/remove */
640 kho_scratch_cnt = num_online_nodes() + 2;
641 size = kho_scratch_cnt * sizeof(*kho_scratch);
642 kho_scratch = memblock_alloc(size, PAGE_SIZE);
643 if (!kho_scratch)
644 goto err_disable_kho;
645
646 /*
647 * reserve scratch area in low memory for lowmem allocations in the
648 * next kernel
649 */
650 size = scratch_size_lowmem;
651 addr = memblock_phys_alloc_range(size, CMA_MIN_ALIGNMENT_BYTES, 0,
652 ARCH_LOW_ADDRESS_LIMIT);
653 if (!addr)
654 goto err_free_scratch_desc;
655
656 kho_scratch[i].addr = addr;
657 kho_scratch[i].size = size;
658 i++;
659
660 /* reserve large contiguous area for allocations without nid */
661 size = scratch_size_global;
662 addr = memblock_phys_alloc(size, CMA_MIN_ALIGNMENT_BYTES);
663 if (!addr)
664 goto err_free_scratch_areas;
665
666 kho_scratch[i].addr = addr;
667 kho_scratch[i].size = size;
668 i++;
669
670 for_each_online_node(nid) {
671 size = scratch_size_node(nid);
672 addr = memblock_alloc_range_nid(size, CMA_MIN_ALIGNMENT_BYTES,
673 0, MEMBLOCK_ALLOC_ACCESSIBLE,
674 nid, true);
675 if (!addr)
676 goto err_free_scratch_areas;
677
678 kho_scratch[i].addr = addr;
679 kho_scratch[i].size = size;
680 i++;
681 }
682
683 return;
684
685 err_free_scratch_areas:
686 for (i--; i >= 0; i--)
687 memblock_phys_free(kho_scratch[i].addr, kho_scratch[i].size);
688 err_free_scratch_desc:
689 memblock_free(kho_scratch, kho_scratch_cnt * sizeof(*kho_scratch));
690 err_disable_kho:
691 pr_warn("Failed to reserve scratch area, disabling kexec handover\n");
692 kho_enable = false;
693 }
694
695 /**
696 * kho_add_subtree - record the physical address of a sub FDT in KHO root tree.
697 * @name: name of the sub tree.
698 * @fdt: the sub tree blob.
699 *
700 * Creates a new child node named @name in KHO root FDT and records
701 * the physical address of @fdt. The pages of @fdt must also be preserved
702 * by KHO for the new kernel to retrieve it after kexec.
703 *
704 * A debugfs blob entry is also created at
705 * ``/sys/kernel/debug/kho/out/sub_fdts/@name`` when kernel is configured with
706 * CONFIG_KEXEC_HANDOVER_DEBUGFS
707 *
708 * Return: 0 on success, error code on failure
709 */
kho_add_subtree(const char * name,void * fdt)710 int kho_add_subtree(const char *name, void *fdt)
711 {
712 phys_addr_t phys = virt_to_phys(fdt);
713 void *root_fdt = kho_out.fdt;
714 int err = -ENOMEM;
715 int off, fdt_err;
716
717 guard(mutex)(&kho_out.lock);
718
719 fdt_err = fdt_open_into(root_fdt, root_fdt, PAGE_SIZE);
720 if (fdt_err < 0)
721 return err;
722
723 off = fdt_add_subnode(root_fdt, 0, name);
724 if (off < 0) {
725 if (off == -FDT_ERR_EXISTS)
726 err = -EEXIST;
727 goto out_pack;
728 }
729
730 err = fdt_setprop(root_fdt, off, PROP_SUB_FDT, &phys, sizeof(phys));
731 if (err < 0)
732 goto out_pack;
733
734 WARN_ON_ONCE(kho_debugfs_fdt_add(&kho_out.dbg, name, fdt, false));
735
736 out_pack:
737 fdt_pack(root_fdt);
738
739 return err;
740 }
741 EXPORT_SYMBOL_GPL(kho_add_subtree);
742
kho_remove_subtree(void * fdt)743 void kho_remove_subtree(void *fdt)
744 {
745 phys_addr_t target_phys = virt_to_phys(fdt);
746 void *root_fdt = kho_out.fdt;
747 int off;
748 int err;
749
750 guard(mutex)(&kho_out.lock);
751
752 err = fdt_open_into(root_fdt, root_fdt, PAGE_SIZE);
753 if (err < 0)
754 return;
755
756 for (off = fdt_first_subnode(root_fdt, 0); off >= 0;
757 off = fdt_next_subnode(root_fdt, off)) {
758 const u64 *val;
759 int len;
760
761 val = fdt_getprop(root_fdt, off, PROP_SUB_FDT, &len);
762 if (!val || len != sizeof(phys_addr_t))
763 continue;
764
765 if ((phys_addr_t)*val == target_phys) {
766 fdt_del_node(root_fdt, off);
767 kho_debugfs_fdt_remove(&kho_out.dbg, fdt);
768 break;
769 }
770 }
771
772 fdt_pack(root_fdt);
773 }
774 EXPORT_SYMBOL_GPL(kho_remove_subtree);
775
776 /**
777 * kho_preserve_folio - preserve a folio across kexec.
778 * @folio: folio to preserve.
779 *
780 * Instructs KHO to preserve the whole folio across kexec. The order
781 * will be preserved as well.
782 *
783 * Return: 0 on success, error code on failure
784 */
kho_preserve_folio(struct folio * folio)785 int kho_preserve_folio(struct folio *folio)
786 {
787 const unsigned long pfn = folio_pfn(folio);
788 const unsigned int order = folio_order(folio);
789 struct kho_mem_track *track = &kho_out.track;
790
791 if (WARN_ON(kho_scratch_overlap(pfn << PAGE_SHIFT, PAGE_SIZE << order)))
792 return -EINVAL;
793
794 return __kho_preserve_order(track, pfn, order);
795 }
796 EXPORT_SYMBOL_GPL(kho_preserve_folio);
797
798 /**
799 * kho_unpreserve_folio - unpreserve a folio.
800 * @folio: folio to unpreserve.
801 *
802 * Instructs KHO to unpreserve a folio that was preserved by
803 * kho_preserve_folio() before. The provided @folio (pfn and order)
804 * must exactly match a previously preserved folio.
805 */
kho_unpreserve_folio(struct folio * folio)806 void kho_unpreserve_folio(struct folio *folio)
807 {
808 const unsigned long pfn = folio_pfn(folio);
809 const unsigned int order = folio_order(folio);
810 struct kho_mem_track *track = &kho_out.track;
811
812 __kho_unpreserve_order(track, pfn, order);
813 }
814 EXPORT_SYMBOL_GPL(kho_unpreserve_folio);
815
816 /**
817 * kho_preserve_pages - preserve contiguous pages across kexec
818 * @page: first page in the list.
819 * @nr_pages: number of pages.
820 *
821 * Preserve a contiguous list of order 0 pages. Must be restored using
822 * kho_restore_pages() to ensure the pages are restored properly as order 0.
823 *
824 * Return: 0 on success, error code on failure
825 */
kho_preserve_pages(struct page * page,unsigned int nr_pages)826 int kho_preserve_pages(struct page *page, unsigned int nr_pages)
827 {
828 struct kho_mem_track *track = &kho_out.track;
829 const unsigned long start_pfn = page_to_pfn(page);
830 const unsigned long end_pfn = start_pfn + nr_pages;
831 unsigned long pfn = start_pfn;
832 unsigned long failed_pfn = 0;
833 int err = 0;
834
835 if (WARN_ON(kho_scratch_overlap(start_pfn << PAGE_SHIFT,
836 nr_pages << PAGE_SHIFT))) {
837 return -EINVAL;
838 }
839
840 while (pfn < end_pfn) {
841 const unsigned int order =
842 min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
843
844 err = __kho_preserve_order(track, pfn, order);
845 if (err) {
846 failed_pfn = pfn;
847 break;
848 }
849
850 pfn += 1 << order;
851 }
852
853 if (err)
854 __kho_unpreserve(track, start_pfn, failed_pfn);
855
856 return err;
857 }
858 EXPORT_SYMBOL_GPL(kho_preserve_pages);
859
860 /**
861 * kho_unpreserve_pages - unpreserve contiguous pages.
862 * @page: first page in the list.
863 * @nr_pages: number of pages.
864 *
865 * Instructs KHO to unpreserve @nr_pages contiguous pages starting from @page.
866 * This must be called with the same @page and @nr_pages as the corresponding
867 * kho_preserve_pages() call. Unpreserving arbitrary sub-ranges of larger
868 * preserved blocks is not supported.
869 */
kho_unpreserve_pages(struct page * page,unsigned int nr_pages)870 void kho_unpreserve_pages(struct page *page, unsigned int nr_pages)
871 {
872 struct kho_mem_track *track = &kho_out.track;
873 const unsigned long start_pfn = page_to_pfn(page);
874 const unsigned long end_pfn = start_pfn + nr_pages;
875
876 __kho_unpreserve(track, start_pfn, end_pfn);
877 }
878 EXPORT_SYMBOL_GPL(kho_unpreserve_pages);
879
880 struct kho_vmalloc_hdr {
881 DECLARE_KHOSER_PTR(next, struct kho_vmalloc_chunk *);
882 };
883
884 #define KHO_VMALLOC_SIZE \
885 ((PAGE_SIZE - sizeof(struct kho_vmalloc_hdr)) / \
886 sizeof(phys_addr_t))
887
888 struct kho_vmalloc_chunk {
889 struct kho_vmalloc_hdr hdr;
890 phys_addr_t phys[KHO_VMALLOC_SIZE];
891 };
892
893 static_assert(sizeof(struct kho_vmalloc_chunk) == PAGE_SIZE);
894
895 /* vmalloc flags KHO supports */
896 #define KHO_VMALLOC_SUPPORTED_FLAGS (VM_ALLOC | VM_ALLOW_HUGE_VMAP)
897
898 /* KHO internal flags for vmalloc preservations */
899 #define KHO_VMALLOC_ALLOC 0x0001
900 #define KHO_VMALLOC_HUGE_VMAP 0x0002
901
vmalloc_flags_to_kho(unsigned int vm_flags)902 static unsigned short vmalloc_flags_to_kho(unsigned int vm_flags)
903 {
904 unsigned short kho_flags = 0;
905
906 if (vm_flags & VM_ALLOC)
907 kho_flags |= KHO_VMALLOC_ALLOC;
908 if (vm_flags & VM_ALLOW_HUGE_VMAP)
909 kho_flags |= KHO_VMALLOC_HUGE_VMAP;
910
911 return kho_flags;
912 }
913
kho_flags_to_vmalloc(unsigned short kho_flags)914 static unsigned int kho_flags_to_vmalloc(unsigned short kho_flags)
915 {
916 unsigned int vm_flags = 0;
917
918 if (kho_flags & KHO_VMALLOC_ALLOC)
919 vm_flags |= VM_ALLOC;
920 if (kho_flags & KHO_VMALLOC_HUGE_VMAP)
921 vm_flags |= VM_ALLOW_HUGE_VMAP;
922
923 return vm_flags;
924 }
925
new_vmalloc_chunk(struct kho_vmalloc_chunk * cur)926 static struct kho_vmalloc_chunk *new_vmalloc_chunk(struct kho_vmalloc_chunk *cur)
927 {
928 struct kho_vmalloc_chunk *chunk;
929 int err;
930
931 chunk = (struct kho_vmalloc_chunk *)get_zeroed_page(GFP_KERNEL);
932 if (!chunk)
933 return NULL;
934
935 err = kho_preserve_pages(virt_to_page(chunk), 1);
936 if (err)
937 goto err_free;
938 if (cur)
939 KHOSER_STORE_PTR(cur->hdr.next, chunk);
940 return chunk;
941
942 err_free:
943 free_page((unsigned long)chunk);
944 return NULL;
945 }
946
kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk * chunk,unsigned short order)947 static void kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk *chunk,
948 unsigned short order)
949 {
950 struct kho_mem_track *track = &kho_out.track;
951 unsigned long pfn = PHYS_PFN(virt_to_phys(chunk));
952
953 __kho_unpreserve(track, pfn, pfn + 1);
954
955 for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) {
956 pfn = PHYS_PFN(chunk->phys[i]);
957 __kho_unpreserve(track, pfn, pfn + (1 << order));
958 }
959 }
960
961 /**
962 * kho_preserve_vmalloc - preserve memory allocated with vmalloc() across kexec
963 * @ptr: pointer to the area in vmalloc address space
964 * @preservation: placeholder for preservation metadata
965 *
966 * Instructs KHO to preserve the area in vmalloc address space at @ptr. The
967 * physical pages mapped at @ptr will be preserved and on successful return
968 * @preservation will hold the physical address of a structure that describes
969 * the preservation.
970 *
971 * NOTE: The memory allocated with vmalloc_node() variants cannot be reliably
972 * restored on the same node
973 *
974 * Return: 0 on success, error code on failure
975 */
kho_preserve_vmalloc(void * ptr,struct kho_vmalloc * preservation)976 int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation)
977 {
978 struct kho_vmalloc_chunk *chunk;
979 struct vm_struct *vm = find_vm_area(ptr);
980 unsigned int order, flags, nr_contig_pages;
981 unsigned int idx = 0;
982 int err;
983
984 if (!vm)
985 return -EINVAL;
986
987 if (vm->flags & ~KHO_VMALLOC_SUPPORTED_FLAGS)
988 return -EOPNOTSUPP;
989
990 flags = vmalloc_flags_to_kho(vm->flags);
991 order = get_vm_area_page_order(vm);
992
993 chunk = new_vmalloc_chunk(NULL);
994 if (!chunk)
995 return -ENOMEM;
996 KHOSER_STORE_PTR(preservation->first, chunk);
997
998 nr_contig_pages = (1 << order);
999 for (int i = 0; i < vm->nr_pages; i += nr_contig_pages) {
1000 phys_addr_t phys = page_to_phys(vm->pages[i]);
1001
1002 err = kho_preserve_pages(vm->pages[i], nr_contig_pages);
1003 if (err)
1004 goto err_free;
1005
1006 chunk->phys[idx++] = phys;
1007 if (idx == ARRAY_SIZE(chunk->phys)) {
1008 chunk = new_vmalloc_chunk(chunk);
1009 if (!chunk)
1010 goto err_free;
1011 idx = 0;
1012 }
1013 }
1014
1015 preservation->total_pages = vm->nr_pages;
1016 preservation->flags = flags;
1017 preservation->order = order;
1018
1019 return 0;
1020
1021 err_free:
1022 kho_unpreserve_vmalloc(preservation);
1023 return err;
1024 }
1025 EXPORT_SYMBOL_GPL(kho_preserve_vmalloc);
1026
1027 /**
1028 * kho_unpreserve_vmalloc - unpreserve memory allocated with vmalloc()
1029 * @preservation: preservation metadata returned by kho_preserve_vmalloc()
1030 *
1031 * Instructs KHO to unpreserve the area in vmalloc address space that was
1032 * previously preserved with kho_preserve_vmalloc().
1033 */
kho_unpreserve_vmalloc(struct kho_vmalloc * preservation)1034 void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation)
1035 {
1036 struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first);
1037
1038 while (chunk) {
1039 struct kho_vmalloc_chunk *tmp = chunk;
1040
1041 kho_vmalloc_unpreserve_chunk(chunk, preservation->order);
1042
1043 chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
1044 free_page((unsigned long)tmp);
1045 }
1046 }
1047 EXPORT_SYMBOL_GPL(kho_unpreserve_vmalloc);
1048
1049 /**
1050 * kho_restore_vmalloc - recreates and populates an area in vmalloc address
1051 * space from the preserved memory.
1052 * @preservation: preservation metadata.
1053 *
1054 * Recreates an area in vmalloc address space and populates it with memory that
1055 * was preserved using kho_preserve_vmalloc().
1056 *
1057 * Return: pointer to the area in the vmalloc address space, NULL on failure.
1058 */
kho_restore_vmalloc(const struct kho_vmalloc * preservation)1059 void *kho_restore_vmalloc(const struct kho_vmalloc *preservation)
1060 {
1061 struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first);
1062 unsigned int align, order, shift, vm_flags;
1063 unsigned long total_pages, contig_pages;
1064 unsigned long addr, size;
1065 struct vm_struct *area;
1066 struct page **pages;
1067 unsigned int idx = 0;
1068 int err;
1069
1070 vm_flags = kho_flags_to_vmalloc(preservation->flags);
1071 if (vm_flags & ~KHO_VMALLOC_SUPPORTED_FLAGS)
1072 return NULL;
1073
1074 total_pages = preservation->total_pages;
1075 pages = kvmalloc_array(total_pages, sizeof(*pages), GFP_KERNEL);
1076 if (!pages)
1077 return NULL;
1078 order = preservation->order;
1079 contig_pages = (1 << order);
1080 shift = PAGE_SHIFT + order;
1081 align = 1 << shift;
1082
1083 while (chunk) {
1084 struct page *page;
1085
1086 for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) {
1087 phys_addr_t phys = chunk->phys[i];
1088
1089 if (idx + contig_pages > total_pages)
1090 goto err_free_pages_array;
1091
1092 page = kho_restore_pages(phys, contig_pages);
1093 if (!page)
1094 goto err_free_pages_array;
1095
1096 for (int j = 0; j < contig_pages; j++)
1097 pages[idx++] = page + j;
1098
1099 phys += contig_pages * PAGE_SIZE;
1100 }
1101
1102 page = kho_restore_pages(virt_to_phys(chunk), 1);
1103 if (!page)
1104 goto err_free_pages_array;
1105 chunk = KHOSER_LOAD_PTR(chunk->hdr.next);
1106 __free_page(page);
1107 }
1108
1109 if (idx != total_pages)
1110 goto err_free_pages_array;
1111
1112 area = __get_vm_area_node(total_pages * PAGE_SIZE, align, shift,
1113 vm_flags, VMALLOC_START, VMALLOC_END,
1114 NUMA_NO_NODE, GFP_KERNEL,
1115 __builtin_return_address(0));
1116 if (!area)
1117 goto err_free_pages_array;
1118
1119 addr = (unsigned long)area->addr;
1120 size = get_vm_area_size(area);
1121 err = vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, shift);
1122 if (err)
1123 goto err_free_vm_area;
1124
1125 area->nr_pages = total_pages;
1126 area->pages = pages;
1127
1128 return area->addr;
1129
1130 err_free_vm_area:
1131 free_vm_area(area);
1132 err_free_pages_array:
1133 kvfree(pages);
1134 return NULL;
1135 }
1136 EXPORT_SYMBOL_GPL(kho_restore_vmalloc);
1137
1138 /**
1139 * kho_alloc_preserve - Allocate, zero, and preserve memory.
1140 * @size: The number of bytes to allocate.
1141 *
1142 * Allocates a physically contiguous block of zeroed pages that is large
1143 * enough to hold @size bytes. The allocated memory is then registered with
1144 * KHO for preservation across a kexec.
1145 *
1146 * Note: The actual allocated size will be rounded up to the nearest
1147 * power-of-two page boundary.
1148 *
1149 * @return A virtual pointer to the allocated and preserved memory on success,
1150 * or an ERR_PTR() encoded error on failure.
1151 */
kho_alloc_preserve(size_t size)1152 void *kho_alloc_preserve(size_t size)
1153 {
1154 struct folio *folio;
1155 int order, ret;
1156
1157 if (!size)
1158 return ERR_PTR(-EINVAL);
1159
1160 order = get_order(size);
1161 if (order > MAX_PAGE_ORDER)
1162 return ERR_PTR(-E2BIG);
1163
1164 folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, order);
1165 if (!folio)
1166 return ERR_PTR(-ENOMEM);
1167
1168 ret = kho_preserve_folio(folio);
1169 if (ret) {
1170 folio_put(folio);
1171 return ERR_PTR(ret);
1172 }
1173
1174 return folio_address(folio);
1175 }
1176 EXPORT_SYMBOL_GPL(kho_alloc_preserve);
1177
1178 /**
1179 * kho_unpreserve_free - Unpreserve and free memory.
1180 * @mem: Pointer to the memory allocated by kho_alloc_preserve().
1181 *
1182 * Unregisters the memory from KHO preservation and frees the underlying
1183 * pages back to the system. This function should be called to clean up
1184 * memory allocated with kho_alloc_preserve().
1185 */
kho_unpreserve_free(void * mem)1186 void kho_unpreserve_free(void *mem)
1187 {
1188 struct folio *folio;
1189
1190 if (!mem)
1191 return;
1192
1193 folio = virt_to_folio(mem);
1194 kho_unpreserve_folio(folio);
1195 folio_put(folio);
1196 }
1197 EXPORT_SYMBOL_GPL(kho_unpreserve_free);
1198
1199 /**
1200 * kho_restore_free - Restore and free memory after kexec.
1201 * @mem: Pointer to the memory (in the new kernel's address space)
1202 * that was allocated by the old kernel.
1203 *
1204 * This function is intended to be called in the new kernel (post-kexec)
1205 * to take ownership of and free a memory region that was preserved by the
1206 * old kernel using kho_alloc_preserve().
1207 *
1208 * It first restores the pages from KHO (using their physical address)
1209 * and then frees the pages back to the new kernel's page allocator.
1210 */
kho_restore_free(void * mem)1211 void kho_restore_free(void *mem)
1212 {
1213 struct folio *folio;
1214
1215 if (!mem)
1216 return;
1217
1218 folio = kho_restore_folio(__pa(mem));
1219 if (!WARN_ON(!folio))
1220 folio_put(folio);
1221 }
1222 EXPORT_SYMBOL_GPL(kho_restore_free);
1223
kho_finalize(void)1224 int kho_finalize(void)
1225 {
1226 int ret;
1227
1228 if (!kho_enable)
1229 return -EOPNOTSUPP;
1230
1231 guard(mutex)(&kho_out.lock);
1232 ret = kho_mem_serialize(&kho_out);
1233 if (ret)
1234 return ret;
1235
1236 kho_out.finalized = true;
1237
1238 return 0;
1239 }
1240
kho_finalized(void)1241 bool kho_finalized(void)
1242 {
1243 guard(mutex)(&kho_out.lock);
1244 return kho_out.finalized;
1245 }
1246
1247 struct kho_in {
1248 phys_addr_t fdt_phys;
1249 phys_addr_t scratch_phys;
1250 phys_addr_t mem_map_phys;
1251 struct kho_debugfs dbg;
1252 };
1253
1254 static struct kho_in kho_in = {
1255 };
1256
kho_get_fdt(void)1257 static const void *kho_get_fdt(void)
1258 {
1259 return kho_in.fdt_phys ? phys_to_virt(kho_in.fdt_phys) : NULL;
1260 }
1261
1262 /**
1263 * is_kho_boot - check if current kernel was booted via KHO-enabled
1264 * kexec
1265 *
1266 * This function checks if the current kernel was loaded through a kexec
1267 * operation with KHO enabled, by verifying that a valid KHO FDT
1268 * was passed.
1269 *
1270 * Note: This function returns reliable results only after
1271 * kho_populate() has been called during early boot. Before that,
1272 * it may return false even if KHO data is present.
1273 *
1274 * Return: true if booted via KHO-enabled kexec, false otherwise
1275 */
is_kho_boot(void)1276 bool is_kho_boot(void)
1277 {
1278 return !!kho_get_fdt();
1279 }
1280 EXPORT_SYMBOL_GPL(is_kho_boot);
1281
1282 /**
1283 * kho_retrieve_subtree - retrieve a preserved sub FDT by its name.
1284 * @name: the name of the sub FDT passed to kho_add_subtree().
1285 * @phys: if found, the physical address of the sub FDT is stored in @phys.
1286 *
1287 * Retrieve a preserved sub FDT named @name and store its physical
1288 * address in @phys.
1289 *
1290 * Return: 0 on success, error code on failure
1291 */
kho_retrieve_subtree(const char * name,phys_addr_t * phys)1292 int kho_retrieve_subtree(const char *name, phys_addr_t *phys)
1293 {
1294 const void *fdt = kho_get_fdt();
1295 const u64 *val;
1296 int offset, len;
1297
1298 if (!fdt)
1299 return -ENOENT;
1300
1301 if (!phys)
1302 return -EINVAL;
1303
1304 offset = fdt_subnode_offset(fdt, 0, name);
1305 if (offset < 0)
1306 return -ENOENT;
1307
1308 val = fdt_getprop(fdt, offset, PROP_SUB_FDT, &len);
1309 if (!val || len != sizeof(*val))
1310 return -EINVAL;
1311
1312 *phys = (phys_addr_t)*val;
1313
1314 return 0;
1315 }
1316 EXPORT_SYMBOL_GPL(kho_retrieve_subtree);
1317
kho_out_fdt_setup(void)1318 static __init int kho_out_fdt_setup(void)
1319 {
1320 void *root = kho_out.fdt;
1321 u64 empty_mem_map = 0;
1322 int err;
1323
1324 err = fdt_create(root, PAGE_SIZE);
1325 err |= fdt_finish_reservemap(root);
1326 err |= fdt_begin_node(root, "");
1327 err |= fdt_property_string(root, "compatible", KHO_FDT_COMPATIBLE);
1328 err |= fdt_property(root, PROP_PRESERVED_MEMORY_MAP, &empty_mem_map,
1329 sizeof(empty_mem_map));
1330 err |= fdt_end_node(root);
1331 err |= fdt_finish(root);
1332
1333 return err;
1334 }
1335
kho_init(void)1336 static __init int kho_init(void)
1337 {
1338 const void *fdt = kho_get_fdt();
1339 int err = 0;
1340
1341 if (!kho_enable)
1342 return 0;
1343
1344 kho_out.fdt = kho_alloc_preserve(PAGE_SIZE);
1345 if (IS_ERR(kho_out.fdt)) {
1346 err = PTR_ERR(kho_out.fdt);
1347 goto err_free_scratch;
1348 }
1349
1350 err = kho_debugfs_init();
1351 if (err)
1352 goto err_free_fdt;
1353
1354 err = kho_out_debugfs_init(&kho_out.dbg);
1355 if (err)
1356 goto err_free_fdt;
1357
1358 err = kho_out_fdt_setup();
1359 if (err)
1360 goto err_free_fdt;
1361
1362 if (fdt) {
1363 kho_in_debugfs_init(&kho_in.dbg, fdt);
1364 return 0;
1365 }
1366
1367 for (int i = 0; i < kho_scratch_cnt; i++) {
1368 unsigned long base_pfn = PHYS_PFN(kho_scratch[i].addr);
1369 unsigned long count = kho_scratch[i].size >> PAGE_SHIFT;
1370 unsigned long pfn;
1371
1372 /*
1373 * When debug_pagealloc is enabled, __free_pages() clears the
1374 * corresponding PRESENT bit in the kernel page table.
1375 * Subsequent kmemleak scans of these pages cause the
1376 * non-PRESENT page faults.
1377 * Mark scratch areas with kmemleak_ignore_phys() to exclude
1378 * them from kmemleak scanning.
1379 */
1380 kmemleak_ignore_phys(kho_scratch[i].addr);
1381 for (pfn = base_pfn; pfn < base_pfn + count;
1382 pfn += pageblock_nr_pages)
1383 init_cma_reserved_pageblock(pfn_to_page(pfn));
1384 }
1385
1386 WARN_ON_ONCE(kho_debugfs_fdt_add(&kho_out.dbg, "fdt",
1387 kho_out.fdt, true));
1388
1389 return 0;
1390
1391 err_free_fdt:
1392 kho_unpreserve_free(kho_out.fdt);
1393 err_free_scratch:
1394 kho_out.fdt = NULL;
1395 for (int i = 0; i < kho_scratch_cnt; i++) {
1396 void *start = __va(kho_scratch[i].addr);
1397 void *end = start + kho_scratch[i].size;
1398
1399 free_reserved_area(start, end, -1, "");
1400 }
1401 kho_enable = false;
1402 return err;
1403 }
1404 fs_initcall(kho_init);
1405
kho_release_scratch(void)1406 static void __init kho_release_scratch(void)
1407 {
1408 phys_addr_t start, end;
1409 u64 i;
1410
1411 memmap_init_kho_scratch_pages();
1412
1413 /*
1414 * Mark scratch mem as CMA before we return it. That way we
1415 * ensure that no kernel allocations happen on it. That means
1416 * we can reuse it as scratch memory again later.
1417 */
1418 __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
1419 MEMBLOCK_KHO_SCRATCH, &start, &end, NULL) {
1420 ulong start_pfn = pageblock_start_pfn(PFN_DOWN(start));
1421 ulong end_pfn = pageblock_align(PFN_UP(end));
1422 ulong pfn;
1423
1424 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages)
1425 init_pageblock_migratetype(pfn_to_page(pfn),
1426 MIGRATE_CMA, false);
1427 }
1428 }
1429
kho_memory_init(void)1430 void __init kho_memory_init(void)
1431 {
1432 if (kho_in.mem_map_phys) {
1433 kho_scratch = phys_to_virt(kho_in.scratch_phys);
1434 kho_release_scratch();
1435 kho_mem_deserialize(phys_to_virt(kho_in.mem_map_phys));
1436 } else {
1437 kho_reserve_scratch();
1438 }
1439 }
1440
kho_populate(phys_addr_t fdt_phys,u64 fdt_len,phys_addr_t scratch_phys,u64 scratch_len)1441 void __init kho_populate(phys_addr_t fdt_phys, u64 fdt_len,
1442 phys_addr_t scratch_phys, u64 scratch_len)
1443 {
1444 struct kho_scratch *scratch = NULL;
1445 phys_addr_t mem_map_phys;
1446 void *fdt = NULL;
1447 int err = 0;
1448 unsigned int scratch_cnt = scratch_len / sizeof(*kho_scratch);
1449
1450 /* Validate the input FDT */
1451 fdt = early_memremap(fdt_phys, fdt_len);
1452 if (!fdt) {
1453 pr_warn("setup: failed to memremap FDT (0x%llx)\n", fdt_phys);
1454 err = -EFAULT;
1455 goto out;
1456 }
1457 err = fdt_check_header(fdt);
1458 if (err) {
1459 pr_warn("setup: handover FDT (0x%llx) is invalid: %d\n",
1460 fdt_phys, err);
1461 err = -EINVAL;
1462 goto out;
1463 }
1464 err = fdt_node_check_compatible(fdt, 0, KHO_FDT_COMPATIBLE);
1465 if (err) {
1466 pr_warn("setup: handover FDT (0x%llx) is incompatible with '%s': %d\n",
1467 fdt_phys, KHO_FDT_COMPATIBLE, err);
1468 err = -EINVAL;
1469 goto out;
1470 }
1471
1472 mem_map_phys = kho_get_mem_map_phys(fdt);
1473 if (!mem_map_phys) {
1474 err = -ENOENT;
1475 goto out;
1476 }
1477
1478 scratch = early_memremap(scratch_phys, scratch_len);
1479 if (!scratch) {
1480 pr_warn("setup: failed to memremap scratch (phys=0x%llx, len=%lld)\n",
1481 scratch_phys, scratch_len);
1482 err = -EFAULT;
1483 goto out;
1484 }
1485
1486 /*
1487 * We pass a safe contiguous blocks of memory to use for early boot
1488 * purporses from the previous kernel so that we can resize the
1489 * memblock array as needed.
1490 */
1491 for (int i = 0; i < scratch_cnt; i++) {
1492 struct kho_scratch *area = &scratch[i];
1493 u64 size = area->size;
1494
1495 memblock_add(area->addr, size);
1496 err = memblock_mark_kho_scratch(area->addr, size);
1497 if (WARN_ON(err)) {
1498 pr_warn("failed to mark the scratch region 0x%pa+0x%pa: %pe",
1499 &area->addr, &size, ERR_PTR(err));
1500 goto out;
1501 }
1502 pr_debug("Marked 0x%pa+0x%pa as scratch", &area->addr, &size);
1503 }
1504
1505 memblock_reserve(scratch_phys, scratch_len);
1506
1507 /*
1508 * Now that we have a viable region of scratch memory, let's tell
1509 * the memblocks allocator to only use that for any allocations.
1510 * That way we ensure that nothing scribbles over in use data while
1511 * we initialize the page tables which we will need to ingest all
1512 * memory reservations from the previous kernel.
1513 */
1514 memblock_set_kho_scratch_only();
1515
1516 kho_in.fdt_phys = fdt_phys;
1517 kho_in.scratch_phys = scratch_phys;
1518 kho_in.mem_map_phys = mem_map_phys;
1519 kho_scratch_cnt = scratch_cnt;
1520 pr_info("found kexec handover data.\n");
1521
1522 out:
1523 if (fdt)
1524 early_memunmap(fdt, fdt_len);
1525 if (scratch)
1526 early_memunmap(scratch, scratch_len);
1527 if (err)
1528 pr_warn("disabling KHO revival: %d\n", err);
1529 }
1530
1531 /* Helper functions for kexec_file_load */
1532
kho_fill_kimage(struct kimage * image)1533 int kho_fill_kimage(struct kimage *image)
1534 {
1535 ssize_t scratch_size;
1536 int err = 0;
1537 struct kexec_buf scratch;
1538
1539 if (!kho_enable)
1540 return 0;
1541
1542 image->kho.fdt = virt_to_phys(kho_out.fdt);
1543
1544 scratch_size = sizeof(*kho_scratch) * kho_scratch_cnt;
1545 scratch = (struct kexec_buf){
1546 .image = image,
1547 .buffer = kho_scratch,
1548 .bufsz = scratch_size,
1549 .mem = KEXEC_BUF_MEM_UNKNOWN,
1550 .memsz = scratch_size,
1551 .buf_align = SZ_64K, /* Makes it easier to map */
1552 .buf_max = ULONG_MAX,
1553 .top_down = true,
1554 };
1555 err = kexec_add_buffer(&scratch);
1556 if (err)
1557 return err;
1558 image->kho.scratch = &image->segment[image->nr_segments - 1];
1559
1560 return 0;
1561 }
1562
kho_walk_scratch(struct kexec_buf * kbuf,int (* func)(struct resource *,void *))1563 static int kho_walk_scratch(struct kexec_buf *kbuf,
1564 int (*func)(struct resource *, void *))
1565 {
1566 int ret = 0;
1567 int i;
1568
1569 for (i = 0; i < kho_scratch_cnt; i++) {
1570 struct resource res = {
1571 .start = kho_scratch[i].addr,
1572 .end = kho_scratch[i].addr + kho_scratch[i].size - 1,
1573 };
1574
1575 /* Try to fit the kimage into our KHO scratch region */
1576 ret = func(&res, kbuf);
1577 if (ret)
1578 break;
1579 }
1580
1581 return ret;
1582 }
1583
kho_locate_mem_hole(struct kexec_buf * kbuf,int (* func)(struct resource *,void *))1584 int kho_locate_mem_hole(struct kexec_buf *kbuf,
1585 int (*func)(struct resource *, void *))
1586 {
1587 int ret;
1588
1589 if (!kho_enable || kbuf->image->type == KEXEC_TYPE_CRASH)
1590 return 1;
1591
1592 ret = kho_walk_scratch(kbuf, func);
1593
1594 return ret == 1 ? 0 : -EADDRNOTAVAIL;
1595 }
1596