1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kexec_handover.c - kexec handover metadata processing 4 * Copyright (C) 2023 Alexander Graf <graf@amazon.com> 5 * Copyright (C) 2025 Microsoft Corporation, Mike Rapoport <rppt@kernel.org> 6 * Copyright (C) 2025 Google LLC, Changyuan Lyu <changyuanl@google.com> 7 */ 8 9 #define pr_fmt(fmt) "KHO: " fmt 10 11 #include <linux/cma.h> 12 #include <linux/count_zeros.h> 13 #include <linux/debugfs.h> 14 #include <linux/kexec.h> 15 #include <linux/kexec_handover.h> 16 #include <linux/libfdt.h> 17 #include <linux/list.h> 18 #include <linux/memblock.h> 19 #include <linux/notifier.h> 20 #include <linux/page-isolation.h> 21 22 #include <asm/early_ioremap.h> 23 24 /* 25 * KHO is tightly coupled with mm init and needs access to some of mm 26 * internal APIs. 27 */ 28 #include "../mm/internal.h" 29 #include "kexec_internal.h" 30 31 #define KHO_FDT_COMPATIBLE "kho-v1" 32 #define PROP_PRESERVED_MEMORY_MAP "preserved-memory-map" 33 #define PROP_SUB_FDT "fdt" 34 35 static bool kho_enable __ro_after_init; 36 37 bool kho_is_enabled(void) 38 { 39 return kho_enable; 40 } 41 EXPORT_SYMBOL_GPL(kho_is_enabled); 42 43 static int __init kho_parse_enable(char *p) 44 { 45 return kstrtobool(p, &kho_enable); 46 } 47 early_param("kho", kho_parse_enable); 48 49 /* 50 * Keep track of memory that is to be preserved across KHO. 51 * 52 * The serializing side uses two levels of xarrays to manage chunks of per-order 53 * 512 byte bitmaps. For instance if PAGE_SIZE = 4096, the entire 1G order of a 54 * 1TB system would fit inside a single 512 byte bitmap. For order 0 allocations 55 * each bitmap will cover 16M of address space. Thus, for 16G of memory at most 56 * 512K of bitmap memory will be needed for order 0. 57 * 58 * This approach is fully incremental, as the serialization progresses folios 59 * can continue be aggregated to the tracker. The final step, immediately prior 60 * to kexec would serialize the xarray information into a linked list for the 61 * successor kernel to parse. 62 */ 63 64 #define PRESERVE_BITS (512 * 8) 65 66 struct kho_mem_phys_bits { 67 DECLARE_BITMAP(preserve, PRESERVE_BITS); 68 }; 69 70 struct kho_mem_phys { 71 /* 72 * Points to kho_mem_phys_bits, a sparse bitmap array. Each bit is sized 73 * to order. 74 */ 75 struct xarray phys_bits; 76 }; 77 78 struct kho_mem_track { 79 /* Points to kho_mem_phys, each order gets its own bitmap tree */ 80 struct xarray orders; 81 }; 82 83 struct khoser_mem_chunk; 84 85 struct kho_serialization { 86 struct page *fdt; 87 struct list_head fdt_list; 88 struct dentry *sub_fdt_dir; 89 struct kho_mem_track track; 90 /* First chunk of serialized preserved memory map */ 91 struct khoser_mem_chunk *preserved_mem_map; 92 }; 93 94 static void *xa_load_or_alloc(struct xarray *xa, unsigned long index, size_t sz) 95 { 96 void *elm, *res; 97 98 elm = xa_load(xa, index); 99 if (elm) 100 return elm; 101 102 elm = kzalloc(sz, GFP_KERNEL); 103 if (!elm) 104 return ERR_PTR(-ENOMEM); 105 106 res = xa_cmpxchg(xa, index, NULL, elm, GFP_KERNEL); 107 if (xa_is_err(res)) 108 res = ERR_PTR(xa_err(res)); 109 110 if (res) { 111 kfree(elm); 112 return res; 113 } 114 115 return elm; 116 } 117 118 static void __kho_unpreserve(struct kho_mem_track *track, unsigned long pfn, 119 unsigned long end_pfn) 120 { 121 struct kho_mem_phys_bits *bits; 122 struct kho_mem_phys *physxa; 123 124 while (pfn < end_pfn) { 125 const unsigned int order = 126 min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn)); 127 const unsigned long pfn_high = pfn >> order; 128 129 physxa = xa_load(&track->orders, order); 130 if (!physxa) 131 continue; 132 133 bits = xa_load(&physxa->phys_bits, pfn_high / PRESERVE_BITS); 134 if (!bits) 135 continue; 136 137 clear_bit(pfn_high % PRESERVE_BITS, bits->preserve); 138 139 pfn += 1 << order; 140 } 141 } 142 143 static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn, 144 unsigned int order) 145 { 146 struct kho_mem_phys_bits *bits; 147 struct kho_mem_phys *physxa, *new_physxa; 148 const unsigned long pfn_high = pfn >> order; 149 150 might_sleep(); 151 152 physxa = xa_load(&track->orders, order); 153 if (!physxa) { 154 int err; 155 156 new_physxa = kzalloc(sizeof(*physxa), GFP_KERNEL); 157 if (!new_physxa) 158 return -ENOMEM; 159 160 xa_init(&new_physxa->phys_bits); 161 physxa = xa_cmpxchg(&track->orders, order, NULL, new_physxa, 162 GFP_KERNEL); 163 164 err = xa_err(physxa); 165 if (err || physxa) { 166 xa_destroy(&new_physxa->phys_bits); 167 kfree(new_physxa); 168 169 if (err) 170 return err; 171 } else { 172 physxa = new_physxa; 173 } 174 } 175 176 bits = xa_load_or_alloc(&physxa->phys_bits, pfn_high / PRESERVE_BITS, 177 sizeof(*bits)); 178 if (IS_ERR(bits)) 179 return PTR_ERR(bits); 180 181 set_bit(pfn_high % PRESERVE_BITS, bits->preserve); 182 183 return 0; 184 } 185 186 /* almost as free_reserved_page(), just don't free the page */ 187 static void kho_restore_page(struct page *page, unsigned int order) 188 { 189 unsigned int nr_pages = (1 << order); 190 191 /* Head page gets refcount of 1. */ 192 set_page_count(page, 1); 193 194 /* For higher order folios, tail pages get a page count of zero. */ 195 for (unsigned int i = 1; i < nr_pages; i++) 196 set_page_count(page + i, 0); 197 198 if (order > 0) 199 prep_compound_page(page, order); 200 201 adjust_managed_page_count(page, nr_pages); 202 } 203 204 /** 205 * kho_restore_folio - recreates the folio from the preserved memory. 206 * @phys: physical address of the folio. 207 * 208 * Return: pointer to the struct folio on success, NULL on failure. 209 */ 210 struct folio *kho_restore_folio(phys_addr_t phys) 211 { 212 struct page *page = pfn_to_online_page(PHYS_PFN(phys)); 213 unsigned long order; 214 215 if (!page) 216 return NULL; 217 218 order = page->private; 219 if (order > MAX_PAGE_ORDER) 220 return NULL; 221 222 kho_restore_page(page, order); 223 return page_folio(page); 224 } 225 EXPORT_SYMBOL_GPL(kho_restore_folio); 226 227 /* Serialize and deserialize struct kho_mem_phys across kexec 228 * 229 * Record all the bitmaps in a linked list of pages for the next kernel to 230 * process. Each chunk holds bitmaps of the same order and each block of bitmaps 231 * starts at a given physical address. This allows the bitmaps to be sparse. The 232 * xarray is used to store them in a tree while building up the data structure, 233 * but the KHO successor kernel only needs to process them once in order. 234 * 235 * All of this memory is normal kmalloc() memory and is not marked for 236 * preservation. The successor kernel will remain isolated to the scratch space 237 * until it completes processing this list. Once processed all the memory 238 * storing these ranges will be marked as free. 239 */ 240 241 struct khoser_mem_bitmap_ptr { 242 phys_addr_t phys_start; 243 DECLARE_KHOSER_PTR(bitmap, struct kho_mem_phys_bits *); 244 }; 245 246 struct khoser_mem_chunk_hdr { 247 DECLARE_KHOSER_PTR(next, struct khoser_mem_chunk *); 248 unsigned int order; 249 unsigned int num_elms; 250 }; 251 252 #define KHOSER_BITMAP_SIZE \ 253 ((PAGE_SIZE - sizeof(struct khoser_mem_chunk_hdr)) / \ 254 sizeof(struct khoser_mem_bitmap_ptr)) 255 256 struct khoser_mem_chunk { 257 struct khoser_mem_chunk_hdr hdr; 258 struct khoser_mem_bitmap_ptr bitmaps[KHOSER_BITMAP_SIZE]; 259 }; 260 261 static_assert(sizeof(struct khoser_mem_chunk) == PAGE_SIZE); 262 263 static struct khoser_mem_chunk *new_chunk(struct khoser_mem_chunk *cur_chunk, 264 unsigned long order) 265 { 266 struct khoser_mem_chunk *chunk; 267 268 chunk = kzalloc(PAGE_SIZE, GFP_KERNEL); 269 if (!chunk) 270 return NULL; 271 chunk->hdr.order = order; 272 if (cur_chunk) 273 KHOSER_STORE_PTR(cur_chunk->hdr.next, chunk); 274 return chunk; 275 } 276 277 static void kho_mem_ser_free(struct khoser_mem_chunk *first_chunk) 278 { 279 struct khoser_mem_chunk *chunk = first_chunk; 280 281 while (chunk) { 282 struct khoser_mem_chunk *tmp = chunk; 283 284 chunk = KHOSER_LOAD_PTR(chunk->hdr.next); 285 kfree(tmp); 286 } 287 } 288 289 static int kho_mem_serialize(struct kho_serialization *ser) 290 { 291 struct khoser_mem_chunk *first_chunk = NULL; 292 struct khoser_mem_chunk *chunk = NULL; 293 struct kho_mem_phys *physxa; 294 unsigned long order; 295 296 xa_for_each(&ser->track.orders, order, physxa) { 297 struct kho_mem_phys_bits *bits; 298 unsigned long phys; 299 300 chunk = new_chunk(chunk, order); 301 if (!chunk) 302 goto err_free; 303 304 if (!first_chunk) 305 first_chunk = chunk; 306 307 xa_for_each(&physxa->phys_bits, phys, bits) { 308 struct khoser_mem_bitmap_ptr *elm; 309 310 if (chunk->hdr.num_elms == ARRAY_SIZE(chunk->bitmaps)) { 311 chunk = new_chunk(chunk, order); 312 if (!chunk) 313 goto err_free; 314 } 315 316 elm = &chunk->bitmaps[chunk->hdr.num_elms]; 317 chunk->hdr.num_elms++; 318 elm->phys_start = (phys * PRESERVE_BITS) 319 << (order + PAGE_SHIFT); 320 KHOSER_STORE_PTR(elm->bitmap, bits); 321 } 322 } 323 324 ser->preserved_mem_map = first_chunk; 325 326 return 0; 327 328 err_free: 329 kho_mem_ser_free(first_chunk); 330 return -ENOMEM; 331 } 332 333 static void __init deserialize_bitmap(unsigned int order, 334 struct khoser_mem_bitmap_ptr *elm) 335 { 336 struct kho_mem_phys_bits *bitmap = KHOSER_LOAD_PTR(elm->bitmap); 337 unsigned long bit; 338 339 for_each_set_bit(bit, bitmap->preserve, PRESERVE_BITS) { 340 int sz = 1 << (order + PAGE_SHIFT); 341 phys_addr_t phys = 342 elm->phys_start + (bit << (order + PAGE_SHIFT)); 343 struct page *page = phys_to_page(phys); 344 345 memblock_reserve(phys, sz); 346 memblock_reserved_mark_noinit(phys, sz); 347 page->private = order; 348 } 349 } 350 351 static void __init kho_mem_deserialize(const void *fdt) 352 { 353 struct khoser_mem_chunk *chunk; 354 const phys_addr_t *mem; 355 int len; 356 357 mem = fdt_getprop(fdt, 0, PROP_PRESERVED_MEMORY_MAP, &len); 358 359 if (!mem || len != sizeof(*mem)) { 360 pr_err("failed to get preserved memory bitmaps\n"); 361 return; 362 } 363 364 chunk = *mem ? phys_to_virt(*mem) : NULL; 365 while (chunk) { 366 unsigned int i; 367 368 for (i = 0; i != chunk->hdr.num_elms; i++) 369 deserialize_bitmap(chunk->hdr.order, 370 &chunk->bitmaps[i]); 371 chunk = KHOSER_LOAD_PTR(chunk->hdr.next); 372 } 373 } 374 375 /* 376 * With KHO enabled, memory can become fragmented because KHO regions may 377 * be anywhere in physical address space. The scratch regions give us a 378 * safe zones that we will never see KHO allocations from. This is where we 379 * can later safely load our new kexec images into and then use the scratch 380 * area for early allocations that happen before page allocator is 381 * initialized. 382 */ 383 static struct kho_scratch *kho_scratch; 384 static unsigned int kho_scratch_cnt; 385 386 /* 387 * The scratch areas are scaled by default as percent of memory allocated from 388 * memblock. A user can override the scale with command line parameter: 389 * 390 * kho_scratch=N% 391 * 392 * It is also possible to explicitly define size for a lowmem, a global and 393 * per-node scratch areas: 394 * 395 * kho_scratch=l[KMG],n[KMG],m[KMG] 396 * 397 * The explicit size definition takes precedence over scale definition. 398 */ 399 static unsigned int scratch_scale __initdata = 200; 400 static phys_addr_t scratch_size_global __initdata; 401 static phys_addr_t scratch_size_pernode __initdata; 402 static phys_addr_t scratch_size_lowmem __initdata; 403 404 static int __init kho_parse_scratch_size(char *p) 405 { 406 size_t len; 407 unsigned long sizes[3]; 408 size_t total_size = 0; 409 int i; 410 411 if (!p) 412 return -EINVAL; 413 414 len = strlen(p); 415 if (!len) 416 return -EINVAL; 417 418 /* parse nn% */ 419 if (p[len - 1] == '%') { 420 /* unsigned int max is 4,294,967,295, 10 chars */ 421 char s_scale[11] = {}; 422 int ret = 0; 423 424 if (len > ARRAY_SIZE(s_scale)) 425 return -EINVAL; 426 427 memcpy(s_scale, p, len - 1); 428 ret = kstrtouint(s_scale, 10, &scratch_scale); 429 if (!ret) 430 pr_notice("scratch scale is %d%%\n", scratch_scale); 431 return ret; 432 } 433 434 /* parse ll[KMG],mm[KMG],nn[KMG] */ 435 for (i = 0; i < ARRAY_SIZE(sizes); i++) { 436 char *endp = p; 437 438 if (i > 0) { 439 if (*p != ',') 440 return -EINVAL; 441 p += 1; 442 } 443 444 sizes[i] = memparse(p, &endp); 445 if (endp == p) 446 return -EINVAL; 447 p = endp; 448 total_size += sizes[i]; 449 } 450 451 if (!total_size) 452 return -EINVAL; 453 454 scratch_size_lowmem = sizes[0]; 455 scratch_size_global = sizes[1]; 456 scratch_size_pernode = sizes[2]; 457 scratch_scale = 0; 458 459 pr_notice("scratch areas: lowmem: %lluMiB global: %lluMiB pernode: %lldMiB\n", 460 (u64)(scratch_size_lowmem >> 20), 461 (u64)(scratch_size_global >> 20), 462 (u64)(scratch_size_pernode >> 20)); 463 464 return 0; 465 } 466 early_param("kho_scratch", kho_parse_scratch_size); 467 468 static void __init scratch_size_update(void) 469 { 470 phys_addr_t size; 471 472 if (!scratch_scale) 473 return; 474 475 size = memblock_reserved_kern_size(ARCH_LOW_ADDRESS_LIMIT, 476 NUMA_NO_NODE); 477 size = size * scratch_scale / 100; 478 scratch_size_lowmem = round_up(size, CMA_MIN_ALIGNMENT_BYTES); 479 480 size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE, 481 NUMA_NO_NODE); 482 size = size * scratch_scale / 100 - scratch_size_lowmem; 483 scratch_size_global = round_up(size, CMA_MIN_ALIGNMENT_BYTES); 484 } 485 486 static phys_addr_t __init scratch_size_node(int nid) 487 { 488 phys_addr_t size; 489 490 if (scratch_scale) { 491 size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE, 492 nid); 493 size = size * scratch_scale / 100; 494 } else { 495 size = scratch_size_pernode; 496 } 497 498 return round_up(size, CMA_MIN_ALIGNMENT_BYTES); 499 } 500 501 /** 502 * kho_reserve_scratch - Reserve a contiguous chunk of memory for kexec 503 * 504 * With KHO we can preserve arbitrary pages in the system. To ensure we still 505 * have a large contiguous region of memory when we search the physical address 506 * space for target memory, let's make sure we always have a large CMA region 507 * active. This CMA region will only be used for movable pages which are not a 508 * problem for us during KHO because we can just move them somewhere else. 509 */ 510 static void __init kho_reserve_scratch(void) 511 { 512 phys_addr_t addr, size; 513 int nid, i = 0; 514 515 if (!kho_enable) 516 return; 517 518 scratch_size_update(); 519 520 /* FIXME: deal with node hot-plug/remove */ 521 kho_scratch_cnt = num_online_nodes() + 2; 522 size = kho_scratch_cnt * sizeof(*kho_scratch); 523 kho_scratch = memblock_alloc(size, PAGE_SIZE); 524 if (!kho_scratch) 525 goto err_disable_kho; 526 527 /* 528 * reserve scratch area in low memory for lowmem allocations in the 529 * next kernel 530 */ 531 size = scratch_size_lowmem; 532 addr = memblock_phys_alloc_range(size, CMA_MIN_ALIGNMENT_BYTES, 0, 533 ARCH_LOW_ADDRESS_LIMIT); 534 if (!addr) 535 goto err_free_scratch_desc; 536 537 kho_scratch[i].addr = addr; 538 kho_scratch[i].size = size; 539 i++; 540 541 /* reserve large contiguous area for allocations without nid */ 542 size = scratch_size_global; 543 addr = memblock_phys_alloc(size, CMA_MIN_ALIGNMENT_BYTES); 544 if (!addr) 545 goto err_free_scratch_areas; 546 547 kho_scratch[i].addr = addr; 548 kho_scratch[i].size = size; 549 i++; 550 551 for_each_online_node(nid) { 552 size = scratch_size_node(nid); 553 addr = memblock_alloc_range_nid(size, CMA_MIN_ALIGNMENT_BYTES, 554 0, MEMBLOCK_ALLOC_ACCESSIBLE, 555 nid, true); 556 if (!addr) 557 goto err_free_scratch_areas; 558 559 kho_scratch[i].addr = addr; 560 kho_scratch[i].size = size; 561 i++; 562 } 563 564 return; 565 566 err_free_scratch_areas: 567 for (i--; i >= 0; i--) 568 memblock_phys_free(kho_scratch[i].addr, kho_scratch[i].size); 569 err_free_scratch_desc: 570 memblock_free(kho_scratch, kho_scratch_cnt * sizeof(*kho_scratch)); 571 err_disable_kho: 572 pr_warn("Failed to reserve scratch area, disabling kexec handover\n"); 573 kho_enable = false; 574 } 575 576 struct fdt_debugfs { 577 struct list_head list; 578 struct debugfs_blob_wrapper wrapper; 579 struct dentry *file; 580 }; 581 582 static int kho_debugfs_fdt_add(struct list_head *list, struct dentry *dir, 583 const char *name, const void *fdt) 584 { 585 struct fdt_debugfs *f; 586 struct dentry *file; 587 588 f = kmalloc(sizeof(*f), GFP_KERNEL); 589 if (!f) 590 return -ENOMEM; 591 592 f->wrapper.data = (void *)fdt; 593 f->wrapper.size = fdt_totalsize(fdt); 594 595 file = debugfs_create_blob(name, 0400, dir, &f->wrapper); 596 if (IS_ERR(file)) { 597 kfree(f); 598 return PTR_ERR(file); 599 } 600 601 f->file = file; 602 list_add(&f->list, list); 603 604 return 0; 605 } 606 607 /** 608 * kho_add_subtree - record the physical address of a sub FDT in KHO root tree. 609 * @ser: serialization control object passed by KHO notifiers. 610 * @name: name of the sub tree. 611 * @fdt: the sub tree blob. 612 * 613 * Creates a new child node named @name in KHO root FDT and records 614 * the physical address of @fdt. The pages of @fdt must also be preserved 615 * by KHO for the new kernel to retrieve it after kexec. 616 * 617 * A debugfs blob entry is also created at 618 * ``/sys/kernel/debug/kho/out/sub_fdts/@name``. 619 * 620 * Return: 0 on success, error code on failure 621 */ 622 int kho_add_subtree(struct kho_serialization *ser, const char *name, void *fdt) 623 { 624 int err = 0; 625 u64 phys = (u64)virt_to_phys(fdt); 626 void *root = page_to_virt(ser->fdt); 627 628 err |= fdt_begin_node(root, name); 629 err |= fdt_property(root, PROP_SUB_FDT, &phys, sizeof(phys)); 630 err |= fdt_end_node(root); 631 632 if (err) 633 return err; 634 635 return kho_debugfs_fdt_add(&ser->fdt_list, ser->sub_fdt_dir, name, fdt); 636 } 637 EXPORT_SYMBOL_GPL(kho_add_subtree); 638 639 struct kho_out { 640 struct blocking_notifier_head chain_head; 641 642 struct dentry *dir; 643 644 struct mutex lock; /* protects KHO FDT finalization */ 645 646 struct kho_serialization ser; 647 bool finalized; 648 }; 649 650 static struct kho_out kho_out = { 651 .chain_head = BLOCKING_NOTIFIER_INIT(kho_out.chain_head), 652 .lock = __MUTEX_INITIALIZER(kho_out.lock), 653 .ser = { 654 .fdt_list = LIST_HEAD_INIT(kho_out.ser.fdt_list), 655 .track = { 656 .orders = XARRAY_INIT(kho_out.ser.track.orders, 0), 657 }, 658 }, 659 .finalized = false, 660 }; 661 662 int register_kho_notifier(struct notifier_block *nb) 663 { 664 return blocking_notifier_chain_register(&kho_out.chain_head, nb); 665 } 666 EXPORT_SYMBOL_GPL(register_kho_notifier); 667 668 int unregister_kho_notifier(struct notifier_block *nb) 669 { 670 return blocking_notifier_chain_unregister(&kho_out.chain_head, nb); 671 } 672 EXPORT_SYMBOL_GPL(unregister_kho_notifier); 673 674 /** 675 * kho_preserve_folio - preserve a folio across kexec. 676 * @folio: folio to preserve. 677 * 678 * Instructs KHO to preserve the whole folio across kexec. The order 679 * will be preserved as well. 680 * 681 * Return: 0 on success, error code on failure 682 */ 683 int kho_preserve_folio(struct folio *folio) 684 { 685 const unsigned long pfn = folio_pfn(folio); 686 const unsigned int order = folio_order(folio); 687 struct kho_mem_track *track = &kho_out.ser.track; 688 689 if (kho_out.finalized) 690 return -EBUSY; 691 692 return __kho_preserve_order(track, pfn, order); 693 } 694 EXPORT_SYMBOL_GPL(kho_preserve_folio); 695 696 /** 697 * kho_preserve_phys - preserve a physically contiguous range across kexec. 698 * @phys: physical address of the range. 699 * @size: size of the range. 700 * 701 * Instructs KHO to preserve the memory range from @phys to @phys + @size 702 * across kexec. 703 * 704 * Return: 0 on success, error code on failure 705 */ 706 int kho_preserve_phys(phys_addr_t phys, size_t size) 707 { 708 unsigned long pfn = PHYS_PFN(phys); 709 unsigned long failed_pfn = 0; 710 const unsigned long start_pfn = pfn; 711 const unsigned long end_pfn = PHYS_PFN(phys + size); 712 int err = 0; 713 struct kho_mem_track *track = &kho_out.ser.track; 714 715 if (kho_out.finalized) 716 return -EBUSY; 717 718 if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size)) 719 return -EINVAL; 720 721 while (pfn < end_pfn) { 722 const unsigned int order = 723 min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn)); 724 725 err = __kho_preserve_order(track, pfn, order); 726 if (err) { 727 failed_pfn = pfn; 728 break; 729 } 730 731 pfn += 1 << order; 732 } 733 734 if (err) 735 __kho_unpreserve(track, start_pfn, failed_pfn); 736 737 return err; 738 } 739 EXPORT_SYMBOL_GPL(kho_preserve_phys); 740 741 /* Handling for debug/kho/out */ 742 743 static struct dentry *debugfs_root; 744 745 static int kho_out_update_debugfs_fdt(void) 746 { 747 int err = 0; 748 struct fdt_debugfs *ff, *tmp; 749 750 if (kho_out.finalized) { 751 err = kho_debugfs_fdt_add(&kho_out.ser.fdt_list, kho_out.dir, 752 "fdt", page_to_virt(kho_out.ser.fdt)); 753 } else { 754 list_for_each_entry_safe(ff, tmp, &kho_out.ser.fdt_list, list) { 755 debugfs_remove(ff->file); 756 list_del(&ff->list); 757 kfree(ff); 758 } 759 } 760 761 return err; 762 } 763 764 static int kho_abort(void) 765 { 766 int err; 767 unsigned long order; 768 struct kho_mem_phys *physxa; 769 770 xa_for_each(&kho_out.ser.track.orders, order, physxa) { 771 struct kho_mem_phys_bits *bits; 772 unsigned long phys; 773 774 xa_for_each(&physxa->phys_bits, phys, bits) 775 kfree(bits); 776 777 xa_destroy(&physxa->phys_bits); 778 kfree(physxa); 779 } 780 xa_destroy(&kho_out.ser.track.orders); 781 782 if (kho_out.ser.preserved_mem_map) { 783 kho_mem_ser_free(kho_out.ser.preserved_mem_map); 784 kho_out.ser.preserved_mem_map = NULL; 785 } 786 787 err = blocking_notifier_call_chain(&kho_out.chain_head, KEXEC_KHO_ABORT, 788 NULL); 789 err = notifier_to_errno(err); 790 791 if (err) 792 pr_err("Failed to abort KHO finalization: %d\n", err); 793 794 return err; 795 } 796 797 static int kho_finalize(void) 798 { 799 int err = 0; 800 u64 *preserved_mem_map; 801 void *fdt = page_to_virt(kho_out.ser.fdt); 802 803 err |= fdt_create(fdt, PAGE_SIZE); 804 err |= fdt_finish_reservemap(fdt); 805 err |= fdt_begin_node(fdt, ""); 806 err |= fdt_property_string(fdt, "compatible", KHO_FDT_COMPATIBLE); 807 /** 808 * Reserve the preserved-memory-map property in the root FDT, so 809 * that all property definitions will precede subnodes created by 810 * KHO callers. 811 */ 812 err |= fdt_property_placeholder(fdt, PROP_PRESERVED_MEMORY_MAP, 813 sizeof(*preserved_mem_map), 814 (void **)&preserved_mem_map); 815 if (err) 816 goto abort; 817 818 err = kho_preserve_folio(page_folio(kho_out.ser.fdt)); 819 if (err) 820 goto abort; 821 822 err = blocking_notifier_call_chain(&kho_out.chain_head, 823 KEXEC_KHO_FINALIZE, &kho_out.ser); 824 err = notifier_to_errno(err); 825 if (err) 826 goto abort; 827 828 err = kho_mem_serialize(&kho_out.ser); 829 if (err) 830 goto abort; 831 832 *preserved_mem_map = (u64)virt_to_phys(kho_out.ser.preserved_mem_map); 833 834 err |= fdt_end_node(fdt); 835 err |= fdt_finish(fdt); 836 837 abort: 838 if (err) { 839 pr_err("Failed to convert KHO state tree: %d\n", err); 840 kho_abort(); 841 } 842 843 return err; 844 } 845 846 static int kho_out_finalize_get(void *data, u64 *val) 847 { 848 mutex_lock(&kho_out.lock); 849 *val = kho_out.finalized; 850 mutex_unlock(&kho_out.lock); 851 852 return 0; 853 } 854 855 static int kho_out_finalize_set(void *data, u64 _val) 856 { 857 int ret = 0; 858 bool val = !!_val; 859 860 mutex_lock(&kho_out.lock); 861 862 if (val == kho_out.finalized) { 863 if (kho_out.finalized) 864 ret = -EEXIST; 865 else 866 ret = -ENOENT; 867 goto unlock; 868 } 869 870 if (val) 871 ret = kho_finalize(); 872 else 873 ret = kho_abort(); 874 875 if (ret) 876 goto unlock; 877 878 kho_out.finalized = val; 879 ret = kho_out_update_debugfs_fdt(); 880 881 unlock: 882 mutex_unlock(&kho_out.lock); 883 return ret; 884 } 885 886 DEFINE_DEBUGFS_ATTRIBUTE(fops_kho_out_finalize, kho_out_finalize_get, 887 kho_out_finalize_set, "%llu\n"); 888 889 static int scratch_phys_show(struct seq_file *m, void *v) 890 { 891 for (int i = 0; i < kho_scratch_cnt; i++) 892 seq_printf(m, "0x%llx\n", kho_scratch[i].addr); 893 894 return 0; 895 } 896 DEFINE_SHOW_ATTRIBUTE(scratch_phys); 897 898 static int scratch_len_show(struct seq_file *m, void *v) 899 { 900 for (int i = 0; i < kho_scratch_cnt; i++) 901 seq_printf(m, "0x%llx\n", kho_scratch[i].size); 902 903 return 0; 904 } 905 DEFINE_SHOW_ATTRIBUTE(scratch_len); 906 907 static __init int kho_out_debugfs_init(void) 908 { 909 struct dentry *dir, *f, *sub_fdt_dir; 910 911 dir = debugfs_create_dir("out", debugfs_root); 912 if (IS_ERR(dir)) 913 return -ENOMEM; 914 915 sub_fdt_dir = debugfs_create_dir("sub_fdts", dir); 916 if (IS_ERR(sub_fdt_dir)) 917 goto err_rmdir; 918 919 f = debugfs_create_file("scratch_phys", 0400, dir, NULL, 920 &scratch_phys_fops); 921 if (IS_ERR(f)) 922 goto err_rmdir; 923 924 f = debugfs_create_file("scratch_len", 0400, dir, NULL, 925 &scratch_len_fops); 926 if (IS_ERR(f)) 927 goto err_rmdir; 928 929 f = debugfs_create_file("finalize", 0600, dir, NULL, 930 &fops_kho_out_finalize); 931 if (IS_ERR(f)) 932 goto err_rmdir; 933 934 kho_out.dir = dir; 935 kho_out.ser.sub_fdt_dir = sub_fdt_dir; 936 return 0; 937 938 err_rmdir: 939 debugfs_remove_recursive(dir); 940 return -ENOENT; 941 } 942 943 struct kho_in { 944 struct dentry *dir; 945 phys_addr_t fdt_phys; 946 phys_addr_t scratch_phys; 947 struct list_head fdt_list; 948 }; 949 950 static struct kho_in kho_in = { 951 .fdt_list = LIST_HEAD_INIT(kho_in.fdt_list), 952 }; 953 954 static const void *kho_get_fdt(void) 955 { 956 return kho_in.fdt_phys ? phys_to_virt(kho_in.fdt_phys) : NULL; 957 } 958 959 /** 960 * kho_retrieve_subtree - retrieve a preserved sub FDT by its name. 961 * @name: the name of the sub FDT passed to kho_add_subtree(). 962 * @phys: if found, the physical address of the sub FDT is stored in @phys. 963 * 964 * Retrieve a preserved sub FDT named @name and store its physical 965 * address in @phys. 966 * 967 * Return: 0 on success, error code on failure 968 */ 969 int kho_retrieve_subtree(const char *name, phys_addr_t *phys) 970 { 971 const void *fdt = kho_get_fdt(); 972 const u64 *val; 973 int offset, len; 974 975 if (!fdt) 976 return -ENOENT; 977 978 if (!phys) 979 return -EINVAL; 980 981 offset = fdt_subnode_offset(fdt, 0, name); 982 if (offset < 0) 983 return -ENOENT; 984 985 val = fdt_getprop(fdt, offset, PROP_SUB_FDT, &len); 986 if (!val || len != sizeof(*val)) 987 return -EINVAL; 988 989 *phys = (phys_addr_t)*val; 990 991 return 0; 992 } 993 EXPORT_SYMBOL_GPL(kho_retrieve_subtree); 994 995 /* Handling for debugfs/kho/in */ 996 997 static __init int kho_in_debugfs_init(const void *fdt) 998 { 999 struct dentry *sub_fdt_dir; 1000 int err, child; 1001 1002 kho_in.dir = debugfs_create_dir("in", debugfs_root); 1003 if (IS_ERR(kho_in.dir)) 1004 return PTR_ERR(kho_in.dir); 1005 1006 sub_fdt_dir = debugfs_create_dir("sub_fdts", kho_in.dir); 1007 if (IS_ERR(sub_fdt_dir)) { 1008 err = PTR_ERR(sub_fdt_dir); 1009 goto err_rmdir; 1010 } 1011 1012 err = kho_debugfs_fdt_add(&kho_in.fdt_list, kho_in.dir, "fdt", fdt); 1013 if (err) 1014 goto err_rmdir; 1015 1016 fdt_for_each_subnode(child, fdt, 0) { 1017 int len = 0; 1018 const char *name = fdt_get_name(fdt, child, NULL); 1019 const u64 *fdt_phys; 1020 1021 fdt_phys = fdt_getprop(fdt, child, "fdt", &len); 1022 if (!fdt_phys) 1023 continue; 1024 if (len != sizeof(*fdt_phys)) { 1025 pr_warn("node `%s`'s prop `fdt` has invalid length: %d\n", 1026 name, len); 1027 continue; 1028 } 1029 err = kho_debugfs_fdt_add(&kho_in.fdt_list, sub_fdt_dir, name, 1030 phys_to_virt(*fdt_phys)); 1031 if (err) { 1032 pr_warn("failed to add fdt `%s` to debugfs: %d\n", name, 1033 err); 1034 continue; 1035 } 1036 } 1037 1038 return 0; 1039 1040 err_rmdir: 1041 debugfs_remove_recursive(kho_in.dir); 1042 return err; 1043 } 1044 1045 static __init int kho_init(void) 1046 { 1047 int err = 0; 1048 const void *fdt = kho_get_fdt(); 1049 1050 if (!kho_enable) 1051 return 0; 1052 1053 kho_out.ser.fdt = alloc_page(GFP_KERNEL); 1054 if (!kho_out.ser.fdt) { 1055 err = -ENOMEM; 1056 goto err_free_scratch; 1057 } 1058 1059 debugfs_root = debugfs_create_dir("kho", NULL); 1060 if (IS_ERR(debugfs_root)) { 1061 err = -ENOENT; 1062 goto err_free_fdt; 1063 } 1064 1065 err = kho_out_debugfs_init(); 1066 if (err) 1067 goto err_free_fdt; 1068 1069 if (fdt) { 1070 err = kho_in_debugfs_init(fdt); 1071 /* 1072 * Failure to create /sys/kernel/debug/kho/in does not prevent 1073 * reviving state from KHO and setting up KHO for the next 1074 * kexec. 1075 */ 1076 if (err) 1077 pr_err("failed exposing handover FDT in debugfs: %d\n", 1078 err); 1079 1080 return 0; 1081 } 1082 1083 for (int i = 0; i < kho_scratch_cnt; i++) { 1084 unsigned long base_pfn = PHYS_PFN(kho_scratch[i].addr); 1085 unsigned long count = kho_scratch[i].size >> PAGE_SHIFT; 1086 unsigned long pfn; 1087 1088 for (pfn = base_pfn; pfn < base_pfn + count; 1089 pfn += pageblock_nr_pages) 1090 init_cma_reserved_pageblock(pfn_to_page(pfn)); 1091 } 1092 1093 return 0; 1094 1095 err_free_fdt: 1096 put_page(kho_out.ser.fdt); 1097 kho_out.ser.fdt = NULL; 1098 err_free_scratch: 1099 for (int i = 0; i < kho_scratch_cnt; i++) { 1100 void *start = __va(kho_scratch[i].addr); 1101 void *end = start + kho_scratch[i].size; 1102 1103 free_reserved_area(start, end, -1, ""); 1104 } 1105 kho_enable = false; 1106 return err; 1107 } 1108 late_initcall(kho_init); 1109 1110 static void __init kho_release_scratch(void) 1111 { 1112 phys_addr_t start, end; 1113 u64 i; 1114 1115 memmap_init_kho_scratch_pages(); 1116 1117 /* 1118 * Mark scratch mem as CMA before we return it. That way we 1119 * ensure that no kernel allocations happen on it. That means 1120 * we can reuse it as scratch memory again later. 1121 */ 1122 __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, 1123 MEMBLOCK_KHO_SCRATCH, &start, &end, NULL) { 1124 ulong start_pfn = pageblock_start_pfn(PFN_DOWN(start)); 1125 ulong end_pfn = pageblock_align(PFN_UP(end)); 1126 ulong pfn; 1127 1128 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) 1129 init_pageblock_migratetype(pfn_to_page(pfn), 1130 MIGRATE_CMA, false); 1131 } 1132 } 1133 1134 void __init kho_memory_init(void) 1135 { 1136 struct folio *folio; 1137 1138 if (kho_in.scratch_phys) { 1139 kho_scratch = phys_to_virt(kho_in.scratch_phys); 1140 kho_release_scratch(); 1141 1142 kho_mem_deserialize(kho_get_fdt()); 1143 folio = kho_restore_folio(kho_in.fdt_phys); 1144 if (!folio) 1145 pr_warn("failed to restore folio for KHO fdt\n"); 1146 } else { 1147 kho_reserve_scratch(); 1148 } 1149 } 1150 1151 void __init kho_populate(phys_addr_t fdt_phys, u64 fdt_len, 1152 phys_addr_t scratch_phys, u64 scratch_len) 1153 { 1154 void *fdt = NULL; 1155 struct kho_scratch *scratch = NULL; 1156 int err = 0; 1157 unsigned int scratch_cnt = scratch_len / sizeof(*kho_scratch); 1158 1159 /* Validate the input FDT */ 1160 fdt = early_memremap(fdt_phys, fdt_len); 1161 if (!fdt) { 1162 pr_warn("setup: failed to memremap FDT (0x%llx)\n", fdt_phys); 1163 err = -EFAULT; 1164 goto out; 1165 } 1166 err = fdt_check_header(fdt); 1167 if (err) { 1168 pr_warn("setup: handover FDT (0x%llx) is invalid: %d\n", 1169 fdt_phys, err); 1170 err = -EINVAL; 1171 goto out; 1172 } 1173 err = fdt_node_check_compatible(fdt, 0, KHO_FDT_COMPATIBLE); 1174 if (err) { 1175 pr_warn("setup: handover FDT (0x%llx) is incompatible with '%s': %d\n", 1176 fdt_phys, KHO_FDT_COMPATIBLE, err); 1177 err = -EINVAL; 1178 goto out; 1179 } 1180 1181 scratch = early_memremap(scratch_phys, scratch_len); 1182 if (!scratch) { 1183 pr_warn("setup: failed to memremap scratch (phys=0x%llx, len=%lld)\n", 1184 scratch_phys, scratch_len); 1185 err = -EFAULT; 1186 goto out; 1187 } 1188 1189 /* 1190 * We pass a safe contiguous blocks of memory to use for early boot 1191 * purporses from the previous kernel so that we can resize the 1192 * memblock array as needed. 1193 */ 1194 for (int i = 0; i < scratch_cnt; i++) { 1195 struct kho_scratch *area = &scratch[i]; 1196 u64 size = area->size; 1197 1198 memblock_add(area->addr, size); 1199 err = memblock_mark_kho_scratch(area->addr, size); 1200 if (WARN_ON(err)) { 1201 pr_warn("failed to mark the scratch region 0x%pa+0x%pa: %d", 1202 &area->addr, &size, err); 1203 goto out; 1204 } 1205 pr_debug("Marked 0x%pa+0x%pa as scratch", &area->addr, &size); 1206 } 1207 1208 memblock_reserve(scratch_phys, scratch_len); 1209 1210 /* 1211 * Now that we have a viable region of scratch memory, let's tell 1212 * the memblocks allocator to only use that for any allocations. 1213 * That way we ensure that nothing scribbles over in use data while 1214 * we initialize the page tables which we will need to ingest all 1215 * memory reservations from the previous kernel. 1216 */ 1217 memblock_set_kho_scratch_only(); 1218 1219 kho_in.fdt_phys = fdt_phys; 1220 kho_in.scratch_phys = scratch_phys; 1221 kho_scratch_cnt = scratch_cnt; 1222 pr_info("found kexec handover data. Will skip init for some devices\n"); 1223 1224 out: 1225 if (fdt) 1226 early_memunmap(fdt, fdt_len); 1227 if (scratch) 1228 early_memunmap(scratch, scratch_len); 1229 if (err) 1230 pr_warn("disabling KHO revival: %d\n", err); 1231 } 1232 1233 /* Helper functions for kexec_file_load */ 1234 1235 int kho_fill_kimage(struct kimage *image) 1236 { 1237 ssize_t scratch_size; 1238 int err = 0; 1239 struct kexec_buf scratch; 1240 1241 if (!kho_enable) 1242 return 0; 1243 1244 image->kho.fdt = page_to_phys(kho_out.ser.fdt); 1245 1246 scratch_size = sizeof(*kho_scratch) * kho_scratch_cnt; 1247 scratch = (struct kexec_buf){ 1248 .image = image, 1249 .buffer = kho_scratch, 1250 .bufsz = scratch_size, 1251 .mem = KEXEC_BUF_MEM_UNKNOWN, 1252 .memsz = scratch_size, 1253 .buf_align = SZ_64K, /* Makes it easier to map */ 1254 .buf_max = ULONG_MAX, 1255 .top_down = true, 1256 }; 1257 err = kexec_add_buffer(&scratch); 1258 if (err) 1259 return err; 1260 image->kho.scratch = &image->segment[image->nr_segments - 1]; 1261 1262 return 0; 1263 } 1264 1265 static int kho_walk_scratch(struct kexec_buf *kbuf, 1266 int (*func)(struct resource *, void *)) 1267 { 1268 int ret = 0; 1269 int i; 1270 1271 for (i = 0; i < kho_scratch_cnt; i++) { 1272 struct resource res = { 1273 .start = kho_scratch[i].addr, 1274 .end = kho_scratch[i].addr + kho_scratch[i].size - 1, 1275 }; 1276 1277 /* Try to fit the kimage into our KHO scratch region */ 1278 ret = func(&res, kbuf); 1279 if (ret) 1280 break; 1281 } 1282 1283 return ret; 1284 } 1285 1286 int kho_locate_mem_hole(struct kexec_buf *kbuf, 1287 int (*func)(struct resource *, void *)) 1288 { 1289 int ret; 1290 1291 if (!kho_enable || kbuf->image->type == KEXEC_TYPE_CRASH) 1292 return 1; 1293 1294 ret = kho_walk_scratch(kbuf, func); 1295 1296 return ret == 1 ? 0 : -EADDRNOTAVAIL; 1297 } 1298