1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kexec_handover.c - kexec handover metadata processing 4 * Copyright (C) 2023 Alexander Graf <graf@amazon.com> 5 * Copyright (C) 2025 Microsoft Corporation, Mike Rapoport <rppt@kernel.org> 6 * Copyright (C) 2025 Google LLC, Changyuan Lyu <changyuanl@google.com> 7 * Copyright (C) 2025 Pasha Tatashin <pasha.tatashin@soleen.com> 8 * Copyright (C) 2026 Google LLC, Jason Miu <jasonmiu@google.com> 9 */ 10 11 #define pr_fmt(fmt) "KHO: " fmt 12 13 #include <linux/cleanup.h> 14 #include <linux/cma.h> 15 #include <linux/kmemleak.h> 16 #include <linux/count_zeros.h> 17 #include <linux/kasan.h> 18 #include <linux/kexec.h> 19 #include <linux/kexec_handover.h> 20 #include <linux/kho_radix_tree.h> 21 #include <linux/utsname.h> 22 #include <linux/kho/abi/kexec_handover.h> 23 #include <linux/kho/abi/kexec_metadata.h> 24 #include <linux/libfdt.h> 25 #include <linux/list.h> 26 #include <linux/memblock.h> 27 #include <linux/page-isolation.h> 28 #include <linux/unaligned.h> 29 #include <linux/vmalloc.h> 30 31 #include <asm/early_ioremap.h> 32 33 /* 34 * KHO is tightly coupled with mm init and needs access to some of mm 35 * internal APIs. 36 */ 37 #include "../../mm/internal.h" 38 #include "../kexec_internal.h" 39 #include "kexec_handover_internal.h" 40 41 /* The magic token for preserved pages */ 42 #define KHO_PAGE_MAGIC 0x4b484f50U /* ASCII for 'KHOP' */ 43 44 /* 45 * KHO uses page->private, which is an unsigned long, to store page metadata. 46 * Use it to store both the magic and the order. 47 */ 48 union kho_page_info { 49 unsigned long page_private; 50 struct { 51 unsigned int order; 52 unsigned int magic; 53 }; 54 }; 55 56 static_assert(sizeof(union kho_page_info) == sizeof(((struct page *)0)->private)); 57 58 static bool kho_enable __ro_after_init = IS_ENABLED(CONFIG_KEXEC_HANDOVER_ENABLE_DEFAULT); 59 60 bool kho_is_enabled(void) 61 { 62 return kho_enable; 63 } 64 EXPORT_SYMBOL_GPL(kho_is_enabled); 65 66 static int __init kho_parse_enable(char *p) 67 { 68 return kstrtobool(p, &kho_enable); 69 } 70 early_param("kho", kho_parse_enable); 71 72 struct kho_out { 73 void *fdt; 74 struct mutex lock; /* protects KHO FDT */ 75 76 struct kho_radix_tree radix_tree; 77 struct kho_debugfs dbg; 78 }; 79 80 static struct kho_out kho_out = { 81 .lock = __MUTEX_INITIALIZER(kho_out.lock), 82 .radix_tree = { 83 .lock = __MUTEX_INITIALIZER(kho_out.radix_tree.lock), 84 }, 85 }; 86 87 /** 88 * kho_radix_encode_key - Encodes a physical address and order into a radix key. 89 * @phys: The physical address of the page. 90 * @order: The order of the page. 91 * 92 * This function combines a page's physical address and its order into a 93 * single unsigned long, which is used as a key for all radix tree 94 * operations. 95 * 96 * Return: The encoded unsigned long radix key. 97 */ 98 static unsigned long kho_radix_encode_key(phys_addr_t phys, unsigned int order) 99 { 100 /* Order bits part */ 101 unsigned long h = 1UL << (KHO_ORDER_0_LOG2 - order); 102 /* Shifted physical address part */ 103 unsigned long l = phys >> (PAGE_SHIFT + order); 104 105 return h | l; 106 } 107 108 /** 109 * kho_radix_decode_key - Decodes a radix key back into a physical address and order. 110 * @key: The unsigned long key to decode. 111 * @order: An output parameter, a pointer to an unsigned int where the decoded 112 * page order will be stored. 113 * 114 * This function reverses the encoding performed by kho_radix_encode_key(), 115 * extracting the original physical address and page order from a given key. 116 * 117 * Return: The decoded physical address. 118 */ 119 static phys_addr_t kho_radix_decode_key(unsigned long key, unsigned int *order) 120 { 121 unsigned int order_bit = fls64(key); 122 phys_addr_t phys; 123 124 /* order_bit is numbered starting at 1 from fls64 */ 125 *order = KHO_ORDER_0_LOG2 - order_bit + 1; 126 /* The order is discarded by the shift */ 127 phys = key << (PAGE_SHIFT + *order); 128 129 return phys; 130 } 131 132 static unsigned long kho_radix_get_bitmap_index(unsigned long key) 133 { 134 return key % (1 << KHO_BITMAP_SIZE_LOG2); 135 } 136 137 static unsigned long kho_radix_get_table_index(unsigned long key, 138 unsigned int level) 139 { 140 int s; 141 142 s = ((level - 1) * KHO_TABLE_SIZE_LOG2) + KHO_BITMAP_SIZE_LOG2; 143 return (key >> s) % (1 << KHO_TABLE_SIZE_LOG2); 144 } 145 146 /** 147 * kho_radix_add_page - Marks a page as preserved in the radix tree. 148 * @tree: The KHO radix tree. 149 * @pfn: The page frame number of the page to preserve. 150 * @order: The order of the page. 151 * 152 * This function traverses the radix tree based on the key derived from @pfn 153 * and @order. It sets the corresponding bit in the leaf bitmap to mark the 154 * page for preservation. If intermediate nodes do not exist along the path, 155 * they are allocated and added to the tree. 156 * 157 * Return: 0 on success, or a negative error code on failure. 158 */ 159 int kho_radix_add_page(struct kho_radix_tree *tree, 160 unsigned long pfn, unsigned int order) 161 { 162 /* Newly allocated nodes for error cleanup */ 163 struct kho_radix_node *intermediate_nodes[KHO_TREE_MAX_DEPTH] = { 0 }; 164 unsigned long key = kho_radix_encode_key(PFN_PHYS(pfn), order); 165 struct kho_radix_node *anchor_node = NULL; 166 struct kho_radix_node *node = tree->root; 167 struct kho_radix_node *new_node; 168 unsigned int i, idx, anchor_idx; 169 struct kho_radix_leaf *leaf; 170 int err = 0; 171 172 if (WARN_ON_ONCE(!tree->root)) 173 return -EINVAL; 174 175 might_sleep(); 176 177 guard(mutex)(&tree->lock); 178 179 /* Go from high levels to low levels */ 180 for (i = KHO_TREE_MAX_DEPTH - 1; i > 0; i--) { 181 idx = kho_radix_get_table_index(key, i); 182 183 if (node->table[idx]) { 184 node = phys_to_virt(node->table[idx]); 185 continue; 186 } 187 188 /* Next node is empty, create a new node for it */ 189 new_node = (struct kho_radix_node *)get_zeroed_page(GFP_KERNEL); 190 if (!new_node) { 191 err = -ENOMEM; 192 goto err_free_nodes; 193 } 194 195 node->table[idx] = virt_to_phys(new_node); 196 197 /* 198 * Capture the node where the new branch starts for cleanup 199 * if allocation fails. 200 */ 201 if (!anchor_node) { 202 anchor_node = node; 203 anchor_idx = idx; 204 } 205 intermediate_nodes[i] = new_node; 206 207 node = new_node; 208 } 209 210 /* Handle the leaf level bitmap (level 0) */ 211 idx = kho_radix_get_bitmap_index(key); 212 leaf = (struct kho_radix_leaf *)node; 213 __set_bit(idx, leaf->bitmap); 214 215 return 0; 216 217 err_free_nodes: 218 for (i = KHO_TREE_MAX_DEPTH - 1; i > 0; i--) { 219 if (intermediate_nodes[i]) 220 free_page((unsigned long)intermediate_nodes[i]); 221 } 222 if (anchor_node) 223 anchor_node->table[anchor_idx] = 0; 224 225 return err; 226 } 227 EXPORT_SYMBOL_GPL(kho_radix_add_page); 228 229 /** 230 * kho_radix_del_page - Removes a page's preservation status from the radix tree. 231 * @tree: The KHO radix tree. 232 * @pfn: The page frame number of the page to unpreserve. 233 * @order: The order of the page. 234 * 235 * This function traverses the radix tree and clears the bit corresponding to 236 * the page, effectively removing its "preserved" status. It does not free 237 * the tree's intermediate nodes, even if they become empty. 238 */ 239 void kho_radix_del_page(struct kho_radix_tree *tree, unsigned long pfn, 240 unsigned int order) 241 { 242 unsigned long key = kho_radix_encode_key(PFN_PHYS(pfn), order); 243 struct kho_radix_node *node = tree->root; 244 struct kho_radix_leaf *leaf; 245 unsigned int i, idx; 246 247 if (WARN_ON_ONCE(!tree->root)) 248 return; 249 250 might_sleep(); 251 252 guard(mutex)(&tree->lock); 253 254 /* Go from high levels to low levels */ 255 for (i = KHO_TREE_MAX_DEPTH - 1; i > 0; i--) { 256 idx = kho_radix_get_table_index(key, i); 257 258 /* 259 * Attempting to delete a page that has not been preserved, 260 * return with a warning. 261 */ 262 if (WARN_ON(!node->table[idx])) 263 return; 264 265 node = phys_to_virt(node->table[idx]); 266 } 267 268 /* Handle the leaf level bitmap (level 0) */ 269 leaf = (struct kho_radix_leaf *)node; 270 idx = kho_radix_get_bitmap_index(key); 271 __clear_bit(idx, leaf->bitmap); 272 } 273 EXPORT_SYMBOL_GPL(kho_radix_del_page); 274 275 static int kho_radix_walk_leaf(struct kho_radix_leaf *leaf, 276 unsigned long key, 277 kho_radix_tree_walk_callback_t cb) 278 { 279 unsigned long *bitmap = (unsigned long *)leaf; 280 unsigned int order; 281 phys_addr_t phys; 282 unsigned int i; 283 int err; 284 285 for_each_set_bit(i, bitmap, PAGE_SIZE * BITS_PER_BYTE) { 286 phys = kho_radix_decode_key(key | i, &order); 287 err = cb(phys, order); 288 if (err) 289 return err; 290 } 291 292 return 0; 293 } 294 295 static int __kho_radix_walk_tree(struct kho_radix_node *root, 296 unsigned int level, unsigned long start, 297 kho_radix_tree_walk_callback_t cb) 298 { 299 struct kho_radix_node *node; 300 struct kho_radix_leaf *leaf; 301 unsigned long key, i; 302 unsigned int shift; 303 int err; 304 305 for (i = 0; i < PAGE_SIZE / sizeof(phys_addr_t); i++) { 306 if (!root->table[i]) 307 continue; 308 309 shift = ((level - 1) * KHO_TABLE_SIZE_LOG2) + 310 KHO_BITMAP_SIZE_LOG2; 311 key = start | (i << shift); 312 313 node = phys_to_virt(root->table[i]); 314 315 if (level == 1) { 316 /* 317 * we are at level 1, 318 * node is pointing to the level 0 bitmap. 319 */ 320 leaf = (struct kho_radix_leaf *)node; 321 err = kho_radix_walk_leaf(leaf, key, cb); 322 } else { 323 err = __kho_radix_walk_tree(node, level - 1, 324 key, cb); 325 } 326 327 if (err) 328 return err; 329 } 330 331 return 0; 332 } 333 334 /** 335 * kho_radix_walk_tree - Traverses the radix tree and calls a callback for each preserved page. 336 * @tree: A pointer to the KHO radix tree to walk. 337 * @cb: A callback function of type kho_radix_tree_walk_callback_t that will be 338 * invoked for each preserved page found in the tree. The callback receives 339 * the physical address and order of the preserved page. 340 * 341 * This function walks the radix tree, searching from the specified top level 342 * down to the lowest level (level 0). For each preserved page found, it invokes 343 * the provided callback, passing the page's physical address and order. 344 * 345 * Return: 0 if the walk completed the specified tree, or the non-zero return 346 * value from the callback that stopped the walk. 347 */ 348 int kho_radix_walk_tree(struct kho_radix_tree *tree, 349 kho_radix_tree_walk_callback_t cb) 350 { 351 if (WARN_ON_ONCE(!tree->root)) 352 return -EINVAL; 353 354 guard(mutex)(&tree->lock); 355 356 return __kho_radix_walk_tree(tree->root, KHO_TREE_MAX_DEPTH - 1, 0, cb); 357 } 358 EXPORT_SYMBOL_GPL(kho_radix_walk_tree); 359 360 static void __kho_unpreserve(struct kho_radix_tree *tree, 361 unsigned long pfn, unsigned long end_pfn) 362 { 363 unsigned int order; 364 365 while (pfn < end_pfn) { 366 order = min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn)); 367 368 kho_radix_del_page(tree, pfn, order); 369 370 pfn += 1 << order; 371 } 372 } 373 374 /* For physically contiguous 0-order pages. */ 375 static void kho_init_pages(struct page *page, unsigned long nr_pages) 376 { 377 for (unsigned long i = 0; i < nr_pages; i++) { 378 set_page_count(page + i, 1); 379 /* Clear each page's codetag to avoid accounting mismatch. */ 380 clear_page_tag_ref(page + i); 381 } 382 } 383 384 static void kho_init_folio(struct page *page, unsigned int order) 385 { 386 unsigned long nr_pages = (1 << order); 387 388 /* Head page gets refcount of 1. */ 389 set_page_count(page, 1); 390 /* Clear head page's codetag to avoid accounting mismatch. */ 391 clear_page_tag_ref(page); 392 393 /* For higher order folios, tail pages get a page count of zero. */ 394 for (unsigned long i = 1; i < nr_pages; i++) 395 set_page_count(page + i, 0); 396 397 if (order > 0) 398 prep_compound_page(page, order); 399 } 400 401 static struct page *kho_restore_page(phys_addr_t phys, bool is_folio) 402 { 403 struct page *page = pfn_to_online_page(PHYS_PFN(phys)); 404 unsigned long nr_pages; 405 union kho_page_info info; 406 407 if (!page) 408 return NULL; 409 410 info.page_private = page->private; 411 /* 412 * deserialize_bitmap() only sets the magic on the head page. This magic 413 * check also implicitly makes sure phys is order-aligned since for 414 * non-order-aligned phys addresses, magic will never be set. 415 */ 416 if (WARN_ON_ONCE(info.magic != KHO_PAGE_MAGIC)) 417 return NULL; 418 nr_pages = (1 << info.order); 419 420 /* Clear private to make sure later restores on this page error out. */ 421 page->private = 0; 422 423 if (is_folio) 424 kho_init_folio(page, info.order); 425 else 426 kho_init_pages(page, nr_pages); 427 428 adjust_managed_page_count(page, nr_pages); 429 return page; 430 } 431 432 /** 433 * kho_restore_folio - recreates the folio from the preserved memory. 434 * @phys: physical address of the folio. 435 * 436 * Return: pointer to the struct folio on success, NULL on failure. 437 */ 438 struct folio *kho_restore_folio(phys_addr_t phys) 439 { 440 struct page *page = kho_restore_page(phys, true); 441 442 return page ? page_folio(page) : NULL; 443 } 444 EXPORT_SYMBOL_GPL(kho_restore_folio); 445 446 /** 447 * kho_restore_pages - restore list of contiguous order 0 pages. 448 * @phys: physical address of the first page. 449 * @nr_pages: number of pages. 450 * 451 * Restore a contiguous list of order 0 pages that was preserved with 452 * kho_preserve_pages(). 453 * 454 * Return: the first page on success, NULL on failure. 455 */ 456 struct page *kho_restore_pages(phys_addr_t phys, unsigned long nr_pages) 457 { 458 const unsigned long start_pfn = PHYS_PFN(phys); 459 const unsigned long end_pfn = start_pfn + nr_pages; 460 unsigned long pfn = start_pfn; 461 462 while (pfn < end_pfn) { 463 const unsigned int order = 464 min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn)); 465 struct page *page = kho_restore_page(PFN_PHYS(pfn), false); 466 467 if (!page) 468 return NULL; 469 pfn += 1 << order; 470 } 471 472 return pfn_to_page(start_pfn); 473 } 474 EXPORT_SYMBOL_GPL(kho_restore_pages); 475 476 static int __init kho_preserved_memory_reserve(phys_addr_t phys, 477 unsigned int order) 478 { 479 union kho_page_info info; 480 struct page *page; 481 u64 sz; 482 483 sz = 1 << (order + PAGE_SHIFT); 484 page = phys_to_page(phys); 485 486 /* Reserve the memory preserved in KHO in memblock */ 487 memblock_reserve(phys, sz); 488 memblock_reserved_mark_noinit(phys, sz); 489 info.magic = KHO_PAGE_MAGIC; 490 info.order = order; 491 page->private = info.page_private; 492 493 return 0; 494 } 495 496 /* Returns physical address of the preserved memory map from FDT */ 497 static phys_addr_t __init kho_get_mem_map_phys(const void *fdt) 498 { 499 const void *mem_ptr; 500 int len; 501 502 mem_ptr = fdt_getprop(fdt, 0, KHO_FDT_MEMORY_MAP_PROP_NAME, &len); 503 if (!mem_ptr || len != sizeof(u64)) { 504 pr_err("failed to get preserved memory map\n"); 505 return 0; 506 } 507 508 return get_unaligned((const u64 *)mem_ptr); 509 } 510 511 /* 512 * With KHO enabled, memory can become fragmented because KHO regions may 513 * be anywhere in physical address space. The scratch regions give us a 514 * safe zones that we will never see KHO allocations from. This is where we 515 * can later safely load our new kexec images into and then use the scratch 516 * area for early allocations that happen before page allocator is 517 * initialized. 518 */ 519 struct kho_scratch *kho_scratch; 520 unsigned int kho_scratch_cnt; 521 522 /* 523 * The scratch areas are scaled by default as percent of memory allocated from 524 * memblock. A user can override the scale with command line parameter: 525 * 526 * kho_scratch=N% 527 * 528 * It is also possible to explicitly define size for a lowmem, a global and 529 * per-node scratch areas: 530 * 531 * kho_scratch=l[KMG],n[KMG],m[KMG] 532 * 533 * The explicit size definition takes precedence over scale definition. 534 */ 535 static unsigned int scratch_scale __initdata = 200; 536 static phys_addr_t scratch_size_global __initdata; 537 static phys_addr_t scratch_size_pernode __initdata; 538 static phys_addr_t scratch_size_lowmem __initdata; 539 540 static int __init kho_parse_scratch_size(char *p) 541 { 542 size_t len; 543 unsigned long sizes[3]; 544 size_t total_size = 0; 545 int i; 546 547 if (!p) 548 return -EINVAL; 549 550 len = strlen(p); 551 if (!len) 552 return -EINVAL; 553 554 /* parse nn% */ 555 if (p[len - 1] == '%') { 556 /* unsigned int max is 4,294,967,295, 10 chars */ 557 char s_scale[11] = {}; 558 int ret = 0; 559 560 if (len > ARRAY_SIZE(s_scale)) 561 return -EINVAL; 562 563 memcpy(s_scale, p, len - 1); 564 ret = kstrtouint(s_scale, 10, &scratch_scale); 565 if (!ret) 566 pr_notice("scratch scale is %d%%\n", scratch_scale); 567 return ret; 568 } 569 570 /* parse ll[KMG],mm[KMG],nn[KMG] */ 571 for (i = 0; i < ARRAY_SIZE(sizes); i++) { 572 char *endp = p; 573 574 if (i > 0) { 575 if (*p != ',') 576 return -EINVAL; 577 p += 1; 578 } 579 580 sizes[i] = memparse(p, &endp); 581 if (endp == p) 582 return -EINVAL; 583 p = endp; 584 total_size += sizes[i]; 585 } 586 587 if (!total_size) 588 return -EINVAL; 589 590 /* The string should be fully consumed by now. */ 591 if (*p) 592 return -EINVAL; 593 594 scratch_size_lowmem = sizes[0]; 595 scratch_size_global = sizes[1]; 596 scratch_size_pernode = sizes[2]; 597 scratch_scale = 0; 598 599 pr_notice("scratch areas: lowmem: %lluMiB global: %lluMiB pernode: %lldMiB\n", 600 (u64)(scratch_size_lowmem >> 20), 601 (u64)(scratch_size_global >> 20), 602 (u64)(scratch_size_pernode >> 20)); 603 604 return 0; 605 } 606 early_param("kho_scratch", kho_parse_scratch_size); 607 608 static void __init scratch_size_update(void) 609 { 610 phys_addr_t size; 611 612 if (!scratch_scale) 613 return; 614 615 size = memblock_reserved_kern_size(ARCH_LOW_ADDRESS_LIMIT, 616 NUMA_NO_NODE); 617 size = size * scratch_scale / 100; 618 scratch_size_lowmem = round_up(size, CMA_MIN_ALIGNMENT_BYTES); 619 620 size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE, 621 NUMA_NO_NODE); 622 size = size * scratch_scale / 100 - scratch_size_lowmem; 623 scratch_size_global = round_up(size, CMA_MIN_ALIGNMENT_BYTES); 624 } 625 626 static phys_addr_t __init scratch_size_node(int nid) 627 { 628 phys_addr_t size; 629 630 if (scratch_scale) { 631 size = memblock_reserved_kern_size(MEMBLOCK_ALLOC_ANYWHERE, 632 nid); 633 size = size * scratch_scale / 100; 634 } else { 635 size = scratch_size_pernode; 636 } 637 638 return round_up(size, CMA_MIN_ALIGNMENT_BYTES); 639 } 640 641 /** 642 * kho_reserve_scratch - Reserve a contiguous chunk of memory for kexec 643 * 644 * With KHO we can preserve arbitrary pages in the system. To ensure we still 645 * have a large contiguous region of memory when we search the physical address 646 * space for target memory, let's make sure we always have a large CMA region 647 * active. This CMA region will only be used for movable pages which are not a 648 * problem for us during KHO because we can just move them somewhere else. 649 */ 650 static void __init kho_reserve_scratch(void) 651 { 652 phys_addr_t addr, size; 653 int nid, i = 0; 654 655 if (!kho_enable) 656 return; 657 658 scratch_size_update(); 659 660 /* FIXME: deal with node hot-plug/remove */ 661 kho_scratch_cnt = nodes_weight(node_states[N_MEMORY]) + 2; 662 size = kho_scratch_cnt * sizeof(*kho_scratch); 663 kho_scratch = memblock_alloc(size, PAGE_SIZE); 664 if (!kho_scratch) { 665 pr_err("Failed to reserve scratch array\n"); 666 goto err_disable_kho; 667 } 668 669 /* 670 * reserve scratch area in low memory for lowmem allocations in the 671 * next kernel 672 */ 673 size = scratch_size_lowmem; 674 addr = memblock_phys_alloc_range(size, CMA_MIN_ALIGNMENT_BYTES, 0, 675 ARCH_LOW_ADDRESS_LIMIT); 676 if (!addr) { 677 pr_err("Failed to reserve lowmem scratch buffer\n"); 678 goto err_free_scratch_desc; 679 } 680 681 kho_scratch[i].addr = addr; 682 kho_scratch[i].size = size; 683 i++; 684 685 /* reserve large contiguous area for allocations without nid */ 686 size = scratch_size_global; 687 addr = memblock_phys_alloc(size, CMA_MIN_ALIGNMENT_BYTES); 688 if (!addr) { 689 pr_err("Failed to reserve global scratch buffer\n"); 690 goto err_free_scratch_areas; 691 } 692 693 kho_scratch[i].addr = addr; 694 kho_scratch[i].size = size; 695 i++; 696 697 /* 698 * Loop over nodes that have both memory and are online. Skip 699 * memoryless nodes, as we can not allocate scratch areas there. 700 */ 701 for_each_node_state(nid, N_MEMORY) { 702 size = scratch_size_node(nid); 703 addr = memblock_alloc_range_nid(size, CMA_MIN_ALIGNMENT_BYTES, 704 0, MEMBLOCK_ALLOC_ACCESSIBLE, 705 nid, true); 706 if (!addr) { 707 pr_err("Failed to reserve nid %d scratch buffer\n", nid); 708 goto err_free_scratch_areas; 709 } 710 711 kho_scratch[i].addr = addr; 712 kho_scratch[i].size = size; 713 i++; 714 } 715 716 return; 717 718 err_free_scratch_areas: 719 for (i--; i >= 0; i--) 720 memblock_phys_free(kho_scratch[i].addr, kho_scratch[i].size); 721 err_free_scratch_desc: 722 memblock_free(kho_scratch, kho_scratch_cnt * sizeof(*kho_scratch)); 723 err_disable_kho: 724 pr_warn("Failed to reserve scratch area, disabling kexec handover\n"); 725 kho_enable = false; 726 } 727 728 /** 729 * kho_add_subtree - record the physical address of a sub blob in KHO root tree. 730 * @name: name of the sub tree. 731 * @blob: the sub tree blob. 732 * @size: size of the blob in bytes. 733 * 734 * Creates a new child node named @name in KHO root FDT and records 735 * the physical address of @blob. The pages of @blob must also be preserved 736 * by KHO for the new kernel to retrieve it after kexec. 737 * 738 * A debugfs blob entry is also created at 739 * ``/sys/kernel/debug/kho/out/sub_fdts/@name`` when kernel is configured with 740 * CONFIG_KEXEC_HANDOVER_DEBUGFS 741 * 742 * Return: 0 on success, error code on failure 743 */ 744 int kho_add_subtree(const char *name, void *blob, size_t size) 745 { 746 phys_addr_t phys = virt_to_phys(blob); 747 void *root_fdt = kho_out.fdt; 748 u64 size_u64 = size; 749 int err = -ENOMEM; 750 int off, fdt_err; 751 752 guard(mutex)(&kho_out.lock); 753 754 fdt_err = fdt_open_into(root_fdt, root_fdt, PAGE_SIZE); 755 if (fdt_err < 0) 756 return err; 757 758 off = fdt_add_subnode(root_fdt, 0, name); 759 if (off < 0) { 760 if (off == -FDT_ERR_EXISTS) 761 err = -EEXIST; 762 goto out_pack; 763 } 764 765 err = fdt_setprop(root_fdt, off, KHO_SUB_TREE_PROP_NAME, 766 &phys, sizeof(phys)); 767 if (err < 0) 768 goto out_pack; 769 770 err = fdt_setprop(root_fdt, off, KHO_SUB_TREE_SIZE_PROP_NAME, 771 &size_u64, sizeof(size_u64)); 772 if (err < 0) 773 goto out_pack; 774 775 WARN_ON_ONCE(kho_debugfs_blob_add(&kho_out.dbg, name, blob, 776 size, false)); 777 778 out_pack: 779 fdt_pack(root_fdt); 780 781 return err; 782 } 783 EXPORT_SYMBOL_GPL(kho_add_subtree); 784 785 void kho_remove_subtree(void *blob) 786 { 787 phys_addr_t target_phys = virt_to_phys(blob); 788 void *root_fdt = kho_out.fdt; 789 int off; 790 int err; 791 792 guard(mutex)(&kho_out.lock); 793 794 err = fdt_open_into(root_fdt, root_fdt, PAGE_SIZE); 795 if (err < 0) 796 return; 797 798 for (off = fdt_first_subnode(root_fdt, 0); off >= 0; 799 off = fdt_next_subnode(root_fdt, off)) { 800 const u64 *val; 801 int len; 802 803 val = fdt_getprop(root_fdt, off, KHO_SUB_TREE_PROP_NAME, &len); 804 if (!val || len != sizeof(phys_addr_t)) 805 continue; 806 807 if ((phys_addr_t)*val == target_phys) { 808 fdt_del_node(root_fdt, off); 809 kho_debugfs_blob_remove(&kho_out.dbg, blob); 810 break; 811 } 812 } 813 814 fdt_pack(root_fdt); 815 } 816 EXPORT_SYMBOL_GPL(kho_remove_subtree); 817 818 /** 819 * kho_preserve_folio - preserve a folio across kexec. 820 * @folio: folio to preserve. 821 * 822 * Instructs KHO to preserve the whole folio across kexec. The order 823 * will be preserved as well. 824 * 825 * Return: 0 on success, error code on failure 826 */ 827 int kho_preserve_folio(struct folio *folio) 828 { 829 struct kho_radix_tree *tree = &kho_out.radix_tree; 830 const unsigned long pfn = folio_pfn(folio); 831 const unsigned int order = folio_order(folio); 832 833 if (WARN_ON(kho_scratch_overlap(pfn << PAGE_SHIFT, PAGE_SIZE << order))) 834 return -EINVAL; 835 836 return kho_radix_add_page(tree, pfn, order); 837 } 838 EXPORT_SYMBOL_GPL(kho_preserve_folio); 839 840 /** 841 * kho_unpreserve_folio - unpreserve a folio. 842 * @folio: folio to unpreserve. 843 * 844 * Instructs KHO to unpreserve a folio that was preserved by 845 * kho_preserve_folio() before. The provided @folio (pfn and order) 846 * must exactly match a previously preserved folio. 847 */ 848 void kho_unpreserve_folio(struct folio *folio) 849 { 850 struct kho_radix_tree *tree = &kho_out.radix_tree; 851 const unsigned long pfn = folio_pfn(folio); 852 const unsigned int order = folio_order(folio); 853 854 kho_radix_del_page(tree, pfn, order); 855 } 856 EXPORT_SYMBOL_GPL(kho_unpreserve_folio); 857 858 /** 859 * kho_preserve_pages - preserve contiguous pages across kexec 860 * @page: first page in the list. 861 * @nr_pages: number of pages. 862 * 863 * Preserve a contiguous list of order 0 pages. Must be restored using 864 * kho_restore_pages() to ensure the pages are restored properly as order 0. 865 * 866 * Return: 0 on success, error code on failure 867 */ 868 int kho_preserve_pages(struct page *page, unsigned long nr_pages) 869 { 870 struct kho_radix_tree *tree = &kho_out.radix_tree; 871 const unsigned long start_pfn = page_to_pfn(page); 872 const unsigned long end_pfn = start_pfn + nr_pages; 873 unsigned long pfn = start_pfn; 874 unsigned long failed_pfn = 0; 875 int err = 0; 876 877 if (WARN_ON(kho_scratch_overlap(start_pfn << PAGE_SHIFT, 878 nr_pages << PAGE_SHIFT))) { 879 return -EINVAL; 880 } 881 882 while (pfn < end_pfn) { 883 unsigned int order = 884 min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn)); 885 886 /* 887 * Make sure all the pages in a single preservation are in the 888 * same NUMA node. The restore machinery can not cope with a 889 * preservation spanning multiple NUMA nodes. 890 */ 891 while (pfn_to_nid(pfn) != pfn_to_nid(pfn + (1UL << order) - 1)) 892 order--; 893 894 err = kho_radix_add_page(tree, pfn, order); 895 if (err) { 896 failed_pfn = pfn; 897 break; 898 } 899 900 pfn += 1 << order; 901 } 902 903 if (err) 904 __kho_unpreserve(tree, start_pfn, failed_pfn); 905 906 return err; 907 } 908 EXPORT_SYMBOL_GPL(kho_preserve_pages); 909 910 /** 911 * kho_unpreserve_pages - unpreserve contiguous pages. 912 * @page: first page in the list. 913 * @nr_pages: number of pages. 914 * 915 * Instructs KHO to unpreserve @nr_pages contiguous pages starting from @page. 916 * This must be called with the same @page and @nr_pages as the corresponding 917 * kho_preserve_pages() call. Unpreserving arbitrary sub-ranges of larger 918 * preserved blocks is not supported. 919 */ 920 void kho_unpreserve_pages(struct page *page, unsigned long nr_pages) 921 { 922 struct kho_radix_tree *tree = &kho_out.radix_tree; 923 const unsigned long start_pfn = page_to_pfn(page); 924 const unsigned long end_pfn = start_pfn + nr_pages; 925 926 __kho_unpreserve(tree, start_pfn, end_pfn); 927 } 928 EXPORT_SYMBOL_GPL(kho_unpreserve_pages); 929 930 /* vmalloc flags KHO supports */ 931 #define KHO_VMALLOC_SUPPORTED_FLAGS (VM_ALLOC | VM_ALLOW_HUGE_VMAP) 932 933 /* KHO internal flags for vmalloc preservations */ 934 #define KHO_VMALLOC_ALLOC 0x0001 935 #define KHO_VMALLOC_HUGE_VMAP 0x0002 936 937 static unsigned short vmalloc_flags_to_kho(unsigned int vm_flags) 938 { 939 unsigned short kho_flags = 0; 940 941 if (vm_flags & VM_ALLOC) 942 kho_flags |= KHO_VMALLOC_ALLOC; 943 if (vm_flags & VM_ALLOW_HUGE_VMAP) 944 kho_flags |= KHO_VMALLOC_HUGE_VMAP; 945 946 return kho_flags; 947 } 948 949 static unsigned int kho_flags_to_vmalloc(unsigned short kho_flags) 950 { 951 unsigned int vm_flags = 0; 952 953 if (kho_flags & KHO_VMALLOC_ALLOC) 954 vm_flags |= VM_ALLOC; 955 if (kho_flags & KHO_VMALLOC_HUGE_VMAP) 956 vm_flags |= VM_ALLOW_HUGE_VMAP; 957 958 return vm_flags; 959 } 960 961 static struct kho_vmalloc_chunk *new_vmalloc_chunk(struct kho_vmalloc_chunk *cur) 962 { 963 struct kho_vmalloc_chunk *chunk; 964 int err; 965 966 chunk = (struct kho_vmalloc_chunk *)get_zeroed_page(GFP_KERNEL); 967 if (!chunk) 968 return NULL; 969 970 err = kho_preserve_pages(virt_to_page(chunk), 1); 971 if (err) 972 goto err_free; 973 if (cur) 974 KHOSER_STORE_PTR(cur->hdr.next, chunk); 975 return chunk; 976 977 err_free: 978 free_page((unsigned long)chunk); 979 return NULL; 980 } 981 982 static void kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk *chunk, 983 unsigned short order) 984 { 985 struct kho_radix_tree *tree = &kho_out.radix_tree; 986 unsigned long pfn = PHYS_PFN(virt_to_phys(chunk)); 987 988 __kho_unpreserve(tree, pfn, pfn + 1); 989 990 for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) { 991 pfn = PHYS_PFN(chunk->phys[i]); 992 __kho_unpreserve(tree, pfn, pfn + (1 << order)); 993 } 994 } 995 996 /** 997 * kho_preserve_vmalloc - preserve memory allocated with vmalloc() across kexec 998 * @ptr: pointer to the area in vmalloc address space 999 * @preservation: placeholder for preservation metadata 1000 * 1001 * Instructs KHO to preserve the area in vmalloc address space at @ptr. The 1002 * physical pages mapped at @ptr will be preserved and on successful return 1003 * @preservation will hold the physical address of a structure that describes 1004 * the preservation. 1005 * 1006 * NOTE: The memory allocated with vmalloc_node() variants cannot be reliably 1007 * restored on the same node 1008 * 1009 * Return: 0 on success, error code on failure 1010 */ 1011 int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation) 1012 { 1013 struct kho_vmalloc_chunk *chunk; 1014 struct vm_struct *vm = find_vm_area(ptr); 1015 unsigned int order, flags, nr_contig_pages; 1016 unsigned int idx = 0; 1017 int err; 1018 1019 if (!vm) 1020 return -EINVAL; 1021 1022 if (vm->flags & ~KHO_VMALLOC_SUPPORTED_FLAGS) 1023 return -EOPNOTSUPP; 1024 1025 flags = vmalloc_flags_to_kho(vm->flags); 1026 order = get_vm_area_page_order(vm); 1027 1028 chunk = new_vmalloc_chunk(NULL); 1029 if (!chunk) 1030 return -ENOMEM; 1031 KHOSER_STORE_PTR(preservation->first, chunk); 1032 1033 nr_contig_pages = (1 << order); 1034 for (int i = 0; i < vm->nr_pages; i += nr_contig_pages) { 1035 phys_addr_t phys = page_to_phys(vm->pages[i]); 1036 1037 err = kho_preserve_pages(vm->pages[i], nr_contig_pages); 1038 if (err) 1039 goto err_free; 1040 1041 chunk->phys[idx++] = phys; 1042 if (idx == ARRAY_SIZE(chunk->phys)) { 1043 chunk = new_vmalloc_chunk(chunk); 1044 if (!chunk) { 1045 err = -ENOMEM; 1046 goto err_free; 1047 } 1048 idx = 0; 1049 } 1050 } 1051 1052 preservation->total_pages = vm->nr_pages; 1053 preservation->flags = flags; 1054 preservation->order = order; 1055 1056 return 0; 1057 1058 err_free: 1059 kho_unpreserve_vmalloc(preservation); 1060 return err; 1061 } 1062 EXPORT_SYMBOL_GPL(kho_preserve_vmalloc); 1063 1064 /** 1065 * kho_unpreserve_vmalloc - unpreserve memory allocated with vmalloc() 1066 * @preservation: preservation metadata returned by kho_preserve_vmalloc() 1067 * 1068 * Instructs KHO to unpreserve the area in vmalloc address space that was 1069 * previously preserved with kho_preserve_vmalloc(). 1070 */ 1071 void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation) 1072 { 1073 struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first); 1074 1075 while (chunk) { 1076 struct kho_vmalloc_chunk *tmp = chunk; 1077 1078 kho_vmalloc_unpreserve_chunk(chunk, preservation->order); 1079 1080 chunk = KHOSER_LOAD_PTR(chunk->hdr.next); 1081 free_page((unsigned long)tmp); 1082 } 1083 } 1084 EXPORT_SYMBOL_GPL(kho_unpreserve_vmalloc); 1085 1086 /** 1087 * kho_restore_vmalloc - recreates and populates an area in vmalloc address 1088 * space from the preserved memory. 1089 * @preservation: preservation metadata. 1090 * 1091 * Recreates an area in vmalloc address space and populates it with memory that 1092 * was preserved using kho_preserve_vmalloc(). 1093 * 1094 * Return: pointer to the area in the vmalloc address space, NULL on failure. 1095 */ 1096 void *kho_restore_vmalloc(const struct kho_vmalloc *preservation) 1097 { 1098 struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first); 1099 kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_PROT_NORMAL; 1100 unsigned int align, order, shift, vm_flags; 1101 unsigned long total_pages, contig_pages; 1102 unsigned long addr, size; 1103 struct vm_struct *area; 1104 struct page **pages; 1105 unsigned int idx = 0; 1106 int err; 1107 1108 vm_flags = kho_flags_to_vmalloc(preservation->flags); 1109 if (vm_flags & ~KHO_VMALLOC_SUPPORTED_FLAGS) 1110 return NULL; 1111 1112 total_pages = preservation->total_pages; 1113 pages = kvmalloc_objs(*pages, total_pages); 1114 if (!pages) 1115 return NULL; 1116 order = preservation->order; 1117 contig_pages = (1 << order); 1118 shift = PAGE_SHIFT + order; 1119 align = 1 << shift; 1120 1121 while (chunk) { 1122 struct page *page; 1123 1124 for (int i = 0; i < ARRAY_SIZE(chunk->phys) && chunk->phys[i]; i++) { 1125 phys_addr_t phys = chunk->phys[i]; 1126 1127 if (idx + contig_pages > total_pages) 1128 goto err_free_pages_array; 1129 1130 page = kho_restore_pages(phys, contig_pages); 1131 if (!page) 1132 goto err_free_pages_array; 1133 1134 for (int j = 0; j < contig_pages; j++) 1135 pages[idx++] = page + j; 1136 1137 phys += contig_pages * PAGE_SIZE; 1138 } 1139 1140 page = kho_restore_pages(virt_to_phys(chunk), 1); 1141 if (!page) 1142 goto err_free_pages_array; 1143 chunk = KHOSER_LOAD_PTR(chunk->hdr.next); 1144 __free_page(page); 1145 } 1146 1147 if (idx != total_pages) 1148 goto err_free_pages_array; 1149 1150 area = __get_vm_area_node(total_pages * PAGE_SIZE, align, shift, 1151 vm_flags | VM_UNINITIALIZED, 1152 VMALLOC_START, VMALLOC_END, 1153 NUMA_NO_NODE, GFP_KERNEL, 1154 __builtin_return_address(0)); 1155 if (!area) 1156 goto err_free_pages_array; 1157 1158 addr = (unsigned long)area->addr; 1159 size = get_vm_area_size(area); 1160 err = vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, shift); 1161 if (err) 1162 goto err_free_vm_area; 1163 1164 area->nr_pages = total_pages; 1165 area->pages = pages; 1166 1167 if (vm_flags & VM_ALLOC) 1168 kasan_flags |= KASAN_VMALLOC_VM_ALLOC; 1169 1170 area->addr = kasan_unpoison_vmalloc(area->addr, total_pages * PAGE_SIZE, 1171 kasan_flags); 1172 clear_vm_uninitialized_flag(area); 1173 1174 return area->addr; 1175 1176 err_free_vm_area: 1177 free_vm_area(area); 1178 err_free_pages_array: 1179 kvfree(pages); 1180 return NULL; 1181 } 1182 EXPORT_SYMBOL_GPL(kho_restore_vmalloc); 1183 1184 /** 1185 * kho_alloc_preserve - Allocate, zero, and preserve memory. 1186 * @size: The number of bytes to allocate. 1187 * 1188 * Allocates a physically contiguous block of zeroed pages that is large 1189 * enough to hold @size bytes. The allocated memory is then registered with 1190 * KHO for preservation across a kexec. 1191 * 1192 * Note: The actual allocated size will be rounded up to the nearest 1193 * power-of-two page boundary. 1194 * 1195 * @return A virtual pointer to the allocated and preserved memory on success, 1196 * or an ERR_PTR() encoded error on failure. 1197 */ 1198 void *kho_alloc_preserve(size_t size) 1199 { 1200 struct folio *folio; 1201 int order, ret; 1202 1203 if (!size) 1204 return ERR_PTR(-EINVAL); 1205 1206 order = get_order(size); 1207 if (order > MAX_PAGE_ORDER) 1208 return ERR_PTR(-E2BIG); 1209 1210 folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, order); 1211 if (!folio) 1212 return ERR_PTR(-ENOMEM); 1213 1214 ret = kho_preserve_folio(folio); 1215 if (ret) { 1216 folio_put(folio); 1217 return ERR_PTR(ret); 1218 } 1219 1220 return folio_address(folio); 1221 } 1222 EXPORT_SYMBOL_GPL(kho_alloc_preserve); 1223 1224 /** 1225 * kho_unpreserve_free - Unpreserve and free memory. 1226 * @mem: Pointer to the memory allocated by kho_alloc_preserve(). 1227 * 1228 * Unregisters the memory from KHO preservation and frees the underlying 1229 * pages back to the system. This function should be called to clean up 1230 * memory allocated with kho_alloc_preserve(). 1231 */ 1232 void kho_unpreserve_free(void *mem) 1233 { 1234 struct folio *folio; 1235 1236 if (!mem) 1237 return; 1238 1239 folio = virt_to_folio(mem); 1240 kho_unpreserve_folio(folio); 1241 folio_put(folio); 1242 } 1243 EXPORT_SYMBOL_GPL(kho_unpreserve_free); 1244 1245 /** 1246 * kho_restore_free - Restore and free memory after kexec. 1247 * @mem: Pointer to the memory (in the new kernel's address space) 1248 * that was allocated by the old kernel. 1249 * 1250 * This function is intended to be called in the new kernel (post-kexec) 1251 * to take ownership of and free a memory region that was preserved by the 1252 * old kernel using kho_alloc_preserve(). 1253 * 1254 * It first restores the pages from KHO (using their physical address) 1255 * and then frees the pages back to the new kernel's page allocator. 1256 */ 1257 void kho_restore_free(void *mem) 1258 { 1259 struct folio *folio; 1260 1261 if (!mem) 1262 return; 1263 1264 folio = kho_restore_folio(__pa(mem)); 1265 if (!WARN_ON(!folio)) 1266 folio_put(folio); 1267 } 1268 EXPORT_SYMBOL_GPL(kho_restore_free); 1269 1270 struct kho_in { 1271 phys_addr_t fdt_phys; 1272 phys_addr_t scratch_phys; 1273 char previous_release[__NEW_UTS_LEN + 1]; 1274 u32 kexec_count; 1275 struct kho_debugfs dbg; 1276 }; 1277 1278 static struct kho_in kho_in = { 1279 }; 1280 1281 static const void *kho_get_fdt(void) 1282 { 1283 return kho_in.fdt_phys ? phys_to_virt(kho_in.fdt_phys) : NULL; 1284 } 1285 1286 /** 1287 * is_kho_boot - check if current kernel was booted via KHO-enabled 1288 * kexec 1289 * 1290 * This function checks if the current kernel was loaded through a kexec 1291 * operation with KHO enabled, by verifying that a valid KHO FDT 1292 * was passed. 1293 * 1294 * Note: This function returns reliable results only after 1295 * kho_populate() has been called during early boot. Before that, 1296 * it may return false even if KHO data is present. 1297 * 1298 * Return: true if booted via KHO-enabled kexec, false otherwise 1299 */ 1300 bool is_kho_boot(void) 1301 { 1302 return !!kho_get_fdt(); 1303 } 1304 EXPORT_SYMBOL_GPL(is_kho_boot); 1305 1306 /** 1307 * kho_retrieve_subtree - retrieve a preserved sub blob by its name. 1308 * @name: the name of the sub blob passed to kho_add_subtree(). 1309 * @phys: if found, the physical address of the sub blob is stored in @phys. 1310 * @size: if not NULL and found, the size of the sub blob is stored in @size. 1311 * 1312 * Retrieve a preserved sub blob named @name and store its physical 1313 * address in @phys and optionally its size in @size. 1314 * 1315 * Return: 0 on success, error code on failure 1316 */ 1317 int kho_retrieve_subtree(const char *name, phys_addr_t *phys, size_t *size) 1318 { 1319 const void *fdt = kho_get_fdt(); 1320 const u64 *val; 1321 int offset, len; 1322 1323 if (!fdt) 1324 return -ENOENT; 1325 1326 if (!phys) 1327 return -EINVAL; 1328 1329 offset = fdt_subnode_offset(fdt, 0, name); 1330 if (offset < 0) 1331 return -ENOENT; 1332 1333 val = fdt_getprop(fdt, offset, KHO_SUB_TREE_PROP_NAME, &len); 1334 if (!val || len != sizeof(*val)) 1335 return -EINVAL; 1336 1337 *phys = (phys_addr_t)*val; 1338 1339 val = fdt_getprop(fdt, offset, KHO_SUB_TREE_SIZE_PROP_NAME, &len); 1340 if (!val || len != sizeof(*val)) { 1341 pr_warn("broken KHO subnode '%s': missing or invalid blob-size property\n", 1342 name); 1343 return -EINVAL; 1344 } 1345 1346 if (size) 1347 *size = (size_t)*val; 1348 1349 return 0; 1350 } 1351 EXPORT_SYMBOL_GPL(kho_retrieve_subtree); 1352 1353 static int __init kho_mem_retrieve(const void *fdt) 1354 { 1355 struct kho_radix_tree tree; 1356 const phys_addr_t *mem; 1357 int len; 1358 1359 /* Retrieve the KHO radix tree from passed-in FDT. */ 1360 mem = fdt_getprop(fdt, 0, KHO_FDT_MEMORY_MAP_PROP_NAME, &len); 1361 1362 if (!mem || len != sizeof(*mem)) { 1363 pr_err("failed to get preserved KHO memory tree\n"); 1364 return -ENOENT; 1365 } 1366 1367 if (!*mem) 1368 return -EINVAL; 1369 1370 tree.root = phys_to_virt(*mem); 1371 mutex_init(&tree.lock); 1372 return kho_radix_walk_tree(&tree, kho_preserved_memory_reserve); 1373 } 1374 1375 static __init int kho_out_fdt_setup(void) 1376 { 1377 struct kho_radix_tree *tree = &kho_out.radix_tree; 1378 void *root = kho_out.fdt; 1379 u64 preserved_mem_tree_pa; 1380 int err; 1381 1382 err = fdt_create(root, PAGE_SIZE); 1383 err |= fdt_finish_reservemap(root); 1384 err |= fdt_begin_node(root, ""); 1385 err |= fdt_property_string(root, "compatible", KHO_FDT_COMPATIBLE); 1386 1387 preserved_mem_tree_pa = virt_to_phys(tree->root); 1388 1389 err |= fdt_property(root, KHO_FDT_MEMORY_MAP_PROP_NAME, 1390 &preserved_mem_tree_pa, 1391 sizeof(preserved_mem_tree_pa)); 1392 1393 err |= fdt_end_node(root); 1394 err |= fdt_finish(root); 1395 1396 return err; 1397 } 1398 1399 static void __init kho_in_kexec_metadata(void) 1400 { 1401 struct kho_kexec_metadata *metadata; 1402 phys_addr_t metadata_phys; 1403 size_t blob_size; 1404 int err; 1405 1406 err = kho_retrieve_subtree(KHO_METADATA_NODE_NAME, &metadata_phys, 1407 &blob_size); 1408 if (err) 1409 /* This is fine, previous kernel didn't export metadata */ 1410 return; 1411 1412 /* Check that, at least, "version" is present */ 1413 if (blob_size < sizeof(u32)) { 1414 pr_warn("kexec-metadata blob too small (%zu bytes)\n", 1415 blob_size); 1416 return; 1417 } 1418 1419 metadata = phys_to_virt(metadata_phys); 1420 1421 if (metadata->version != KHO_KEXEC_METADATA_VERSION) { 1422 pr_warn("kexec-metadata version %u not supported (expected %u)\n", 1423 metadata->version, KHO_KEXEC_METADATA_VERSION); 1424 return; 1425 } 1426 1427 if (blob_size < sizeof(*metadata)) { 1428 pr_warn("kexec-metadata blob too small for v%u (%zu < %zu)\n", 1429 metadata->version, blob_size, sizeof(*metadata)); 1430 return; 1431 } 1432 1433 /* 1434 * Copy data to the kernel structure that will persist during 1435 * kernel lifetime. 1436 */ 1437 kho_in.kexec_count = metadata->kexec_count; 1438 strscpy(kho_in.previous_release, metadata->previous_release, 1439 sizeof(kho_in.previous_release)); 1440 1441 pr_info("exec from: %s (count %u)\n", 1442 kho_in.previous_release, kho_in.kexec_count); 1443 } 1444 1445 /* 1446 * Create kexec metadata to pass kernel version and boot count to the 1447 * next kernel. This keeps the core KHO ABI minimal and allows the 1448 * metadata format to evolve independently. 1449 */ 1450 static __init int kho_out_kexec_metadata(void) 1451 { 1452 struct kho_kexec_metadata *metadata; 1453 int err; 1454 1455 metadata = kho_alloc_preserve(sizeof(*metadata)); 1456 if (IS_ERR(metadata)) 1457 return PTR_ERR(metadata); 1458 1459 metadata->version = KHO_KEXEC_METADATA_VERSION; 1460 strscpy(metadata->previous_release, init_uts_ns.name.release, 1461 sizeof(metadata->previous_release)); 1462 /* kho_in.kexec_count is set to 0 on cold boot */ 1463 metadata->kexec_count = kho_in.kexec_count + 1; 1464 1465 err = kho_add_subtree(KHO_METADATA_NODE_NAME, metadata, 1466 sizeof(*metadata)); 1467 if (err) 1468 kho_unpreserve_free(metadata); 1469 1470 return err; 1471 } 1472 1473 static int __init kho_kexec_metadata_init(const void *fdt) 1474 { 1475 int err; 1476 1477 if (fdt) 1478 kho_in_kexec_metadata(); 1479 1480 /* Populate kexec metadata for the possible next kexec */ 1481 err = kho_out_kexec_metadata(); 1482 if (err) 1483 pr_warn("failed to initialize kexec-metadata subtree: %d\n", 1484 err); 1485 1486 return err; 1487 } 1488 1489 static __init int kho_init(void) 1490 { 1491 struct kho_radix_tree *tree = &kho_out.radix_tree; 1492 const void *fdt = kho_get_fdt(); 1493 int err = 0; 1494 1495 if (!kho_enable) 1496 return 0; 1497 1498 tree->root = kzalloc(PAGE_SIZE, GFP_KERNEL); 1499 if (!tree->root) { 1500 err = -ENOMEM; 1501 goto err_free_scratch; 1502 } 1503 1504 kho_out.fdt = kho_alloc_preserve(PAGE_SIZE); 1505 if (IS_ERR(kho_out.fdt)) { 1506 err = PTR_ERR(kho_out.fdt); 1507 goto err_free_kho_radix_tree_root; 1508 } 1509 1510 err = kho_debugfs_init(); 1511 if (err) 1512 goto err_free_fdt; 1513 1514 err = kho_out_debugfs_init(&kho_out.dbg); 1515 if (err) 1516 goto err_free_fdt; 1517 1518 err = kho_out_fdt_setup(); 1519 if (err) 1520 goto err_free_fdt; 1521 1522 err = kho_kexec_metadata_init(fdt); 1523 if (err) 1524 goto err_free_fdt; 1525 1526 if (fdt) { 1527 kho_in_debugfs_init(&kho_in.dbg, fdt); 1528 return 0; 1529 } 1530 1531 for (int i = 0; i < kho_scratch_cnt; i++) { 1532 unsigned long base_pfn = PHYS_PFN(kho_scratch[i].addr); 1533 unsigned long count = kho_scratch[i].size >> PAGE_SHIFT; 1534 unsigned long pfn; 1535 1536 /* 1537 * When debug_pagealloc is enabled, __free_pages() clears the 1538 * corresponding PRESENT bit in the kernel page table. 1539 * Subsequent kmemleak scans of these pages cause the 1540 * non-PRESENT page faults. 1541 * Mark scratch areas with kmemleak_ignore_phys() to exclude 1542 * them from kmemleak scanning. 1543 */ 1544 kmemleak_ignore_phys(kho_scratch[i].addr); 1545 for (pfn = base_pfn; pfn < base_pfn + count; 1546 pfn += pageblock_nr_pages) 1547 init_cma_reserved_pageblock(pfn_to_page(pfn)); 1548 } 1549 1550 WARN_ON_ONCE(kho_debugfs_blob_add(&kho_out.dbg, "fdt", 1551 kho_out.fdt, 1552 fdt_totalsize(kho_out.fdt), true)); 1553 1554 return 0; 1555 1556 err_free_fdt: 1557 kho_unpreserve_free(kho_out.fdt); 1558 err_free_kho_radix_tree_root: 1559 kfree(tree->root); 1560 tree->root = NULL; 1561 err_free_scratch: 1562 kho_out.fdt = NULL; 1563 for (int i = 0; i < kho_scratch_cnt; i++) { 1564 void *start = __va(kho_scratch[i].addr); 1565 void *end = start + kho_scratch[i].size; 1566 1567 free_reserved_area(start, end, -1, ""); 1568 } 1569 kho_enable = false; 1570 return err; 1571 } 1572 fs_initcall(kho_init); 1573 1574 static void __init kho_release_scratch(void) 1575 { 1576 phys_addr_t start, end; 1577 u64 i; 1578 1579 memmap_init_kho_scratch_pages(); 1580 1581 /* 1582 * Mark scratch mem as CMA before we return it. That way we 1583 * ensure that no kernel allocations happen on it. That means 1584 * we can reuse it as scratch memory again later. 1585 */ 1586 __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, 1587 MEMBLOCK_KHO_SCRATCH, &start, &end, NULL) { 1588 ulong start_pfn = pageblock_start_pfn(PFN_DOWN(start)); 1589 ulong end_pfn = pageblock_align(PFN_UP(end)); 1590 ulong pfn; 1591 1592 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) 1593 init_pageblock_migratetype(pfn_to_page(pfn), 1594 MIGRATE_CMA, false); 1595 } 1596 } 1597 1598 void __init kho_memory_init(void) 1599 { 1600 if (kho_in.scratch_phys) { 1601 kho_scratch = phys_to_virt(kho_in.scratch_phys); 1602 kho_release_scratch(); 1603 1604 if (kho_mem_retrieve(kho_get_fdt())) 1605 kho_in.fdt_phys = 0; 1606 } else { 1607 kho_reserve_scratch(); 1608 } 1609 } 1610 1611 void __init kho_populate(phys_addr_t fdt_phys, u64 fdt_len, 1612 phys_addr_t scratch_phys, u64 scratch_len) 1613 { 1614 unsigned int scratch_cnt = scratch_len / sizeof(*kho_scratch); 1615 struct kho_scratch *scratch = NULL; 1616 phys_addr_t mem_map_phys; 1617 void *fdt = NULL; 1618 bool populated = false; 1619 int err; 1620 1621 /* Validate the input FDT */ 1622 fdt = early_memremap(fdt_phys, fdt_len); 1623 if (!fdt) { 1624 pr_warn("setup: failed to memremap FDT (0x%llx)\n", fdt_phys); 1625 goto report; 1626 } 1627 err = fdt_check_header(fdt); 1628 if (err) { 1629 pr_warn("setup: handover FDT (0x%llx) is invalid: %d\n", 1630 fdt_phys, err); 1631 goto unmap_fdt; 1632 } 1633 err = fdt_node_check_compatible(fdt, 0, KHO_FDT_COMPATIBLE); 1634 if (err) { 1635 pr_warn("setup: handover FDT (0x%llx) is incompatible with '%s': %d\n", 1636 fdt_phys, KHO_FDT_COMPATIBLE, err); 1637 goto unmap_fdt; 1638 } 1639 1640 mem_map_phys = kho_get_mem_map_phys(fdt); 1641 if (!mem_map_phys) 1642 goto unmap_fdt; 1643 1644 scratch = early_memremap(scratch_phys, scratch_len); 1645 if (!scratch) { 1646 pr_warn("setup: failed to memremap scratch (phys=0x%llx, len=%lld)\n", 1647 scratch_phys, scratch_len); 1648 goto unmap_fdt; 1649 } 1650 1651 /* 1652 * We pass a safe contiguous blocks of memory to use for early boot 1653 * purporses from the previous kernel so that we can resize the 1654 * memblock array as needed. 1655 */ 1656 for (int i = 0; i < scratch_cnt; i++) { 1657 struct kho_scratch *area = &scratch[i]; 1658 u64 size = area->size; 1659 1660 memblock_add(area->addr, size); 1661 err = memblock_mark_kho_scratch(area->addr, size); 1662 if (err) { 1663 pr_warn("failed to mark the scratch region 0x%pa+0x%pa: %pe", 1664 &area->addr, &size, ERR_PTR(err)); 1665 goto unmap_scratch; 1666 } 1667 pr_debug("Marked 0x%pa+0x%pa as scratch", &area->addr, &size); 1668 } 1669 1670 memblock_reserve(scratch_phys, scratch_len); 1671 1672 /* 1673 * Now that we have a viable region of scratch memory, let's tell 1674 * the memblocks allocator to only use that for any allocations. 1675 * That way we ensure that nothing scribbles over in use data while 1676 * we initialize the page tables which we will need to ingest all 1677 * memory reservations from the previous kernel. 1678 */ 1679 memblock_set_kho_scratch_only(); 1680 1681 kho_in.fdt_phys = fdt_phys; 1682 kho_in.scratch_phys = scratch_phys; 1683 kho_scratch_cnt = scratch_cnt; 1684 1685 populated = true; 1686 pr_info("found kexec handover data.\n"); 1687 1688 unmap_scratch: 1689 early_memunmap(scratch, scratch_len); 1690 unmap_fdt: 1691 early_memunmap(fdt, fdt_len); 1692 report: 1693 if (!populated) 1694 pr_warn("disabling KHO revival\n"); 1695 } 1696 1697 /* Helper functions for kexec_file_load */ 1698 1699 int kho_fill_kimage(struct kimage *image) 1700 { 1701 ssize_t scratch_size; 1702 int err = 0; 1703 struct kexec_buf scratch; 1704 1705 if (!kho_enable) 1706 return 0; 1707 1708 image->kho.fdt = virt_to_phys(kho_out.fdt); 1709 1710 scratch_size = sizeof(*kho_scratch) * kho_scratch_cnt; 1711 scratch = (struct kexec_buf){ 1712 .image = image, 1713 .buffer = kho_scratch, 1714 .bufsz = scratch_size, 1715 .mem = KEXEC_BUF_MEM_UNKNOWN, 1716 .memsz = scratch_size, 1717 .buf_align = SZ_64K, /* Makes it easier to map */ 1718 .buf_max = ULONG_MAX, 1719 .top_down = true, 1720 }; 1721 err = kexec_add_buffer(&scratch); 1722 if (err) 1723 return err; 1724 image->kho.scratch = &image->segment[image->nr_segments - 1]; 1725 1726 return 0; 1727 } 1728 1729 static int kho_walk_scratch(struct kexec_buf *kbuf, 1730 int (*func)(struct resource *, void *)) 1731 { 1732 int ret = 0; 1733 int i; 1734 1735 for (i = 0; i < kho_scratch_cnt; i++) { 1736 struct resource res = { 1737 .start = kho_scratch[i].addr, 1738 .end = kho_scratch[i].addr + kho_scratch[i].size - 1, 1739 }; 1740 1741 /* Try to fit the kimage into our KHO scratch region */ 1742 ret = func(&res, kbuf); 1743 if (ret) 1744 break; 1745 } 1746 1747 return ret; 1748 } 1749 1750 int kho_locate_mem_hole(struct kexec_buf *kbuf, 1751 int (*func)(struct resource *, void *)) 1752 { 1753 int ret; 1754 1755 if (!kho_enable || kbuf->image->type == KEXEC_TYPE_CRASH) 1756 return 1; 1757 1758 ret = kho_walk_scratch(kbuf, func); 1759 1760 return ret == 1 ? 0 : -EADDRNOTAVAIL; 1761 } 1762