Lines Matching +full:reserve +full:- +full:mem +full:- +full:v1
1 // SPDX-License-Identifier: GPL-2.0-only
3 * kexec_handover.c - kexec handover metadata processing
20 #include <linux/page-isolation.h>
32 #define KHO_FDT_COMPATIBLE "kho-v1"
33 #define PROP_PRESERVED_MEMORY_MAP "preserved-memory-map"
39 * KHO uses page->private, which is an unsigned long, to store page metadata.
50 static_assert(sizeof(union kho_page_info) == sizeof(((struct page *)0)->private));
69 * The serializing side uses two levels of xarrays to manage chunks of per-order
144 return ERR_PTR(-ENOMEM); in xa_load_or_alloc()
166 min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn)); in __kho_unpreserve()
169 physxa = xa_load(&track->orders, order); in __kho_unpreserve()
173 bits = xa_load(&physxa->phys_bits, pfn_high / PRESERVE_BITS); in __kho_unpreserve()
177 clear_bit(pfn_high % PRESERVE_BITS, bits->preserve); in __kho_unpreserve()
193 return -EBUSY; in __kho_preserve_order()
195 physxa = xa_load(&track->orders, order); in __kho_preserve_order()
201 return -ENOMEM; in __kho_preserve_order()
203 xa_init(&new_physxa->phys_bits); in __kho_preserve_order()
204 physxa = xa_cmpxchg(&track->orders, order, NULL, new_physxa, in __kho_preserve_order()
209 xa_destroy(&new_physxa->phys_bits); in __kho_preserve_order()
219 bits = xa_load_or_alloc(&physxa->phys_bits, pfn_high / PRESERVE_BITS, in __kho_preserve_order()
224 set_bit(pfn_high % PRESERVE_BITS, bits->preserve); in __kho_preserve_order()
238 info.page_private = page->private; in kho_restore_page()
241 * check also implicitly makes sure phys is order-aligned since for in kho_restore_page()
242 * non-order-aligned phys addresses, magic will never be set. in kho_restore_page()
249 page->private = 0; in kho_restore_page()
265 * kho_restore_folio - recreates the folio from the preserved memory.
279 * kho_restore_pages - restore list of contiguous order 0 pages.
296 min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn)); in kho_restore_pages()
335 ((PAGE_SIZE - sizeof(struct khoser_mem_chunk_hdr)) / \
353 chunk->hdr.order = order; in new_chunk()
355 KHOSER_STORE_PTR(cur_chunk->hdr.next, chunk); in new_chunk()
366 chunk = KHOSER_LOAD_PTR(chunk->hdr.next); in kho_mem_ser_free()
378 xa_for_each(&ser->track.orders, order, physxa) { in kho_mem_serialize()
389 xa_for_each(&physxa->phys_bits, phys, bits) { in kho_mem_serialize()
392 if (chunk->hdr.num_elms == ARRAY_SIZE(chunk->bitmaps)) { in kho_mem_serialize()
398 elm = &chunk->bitmaps[chunk->hdr.num_elms]; in kho_mem_serialize()
399 chunk->hdr.num_elms++; in kho_mem_serialize()
400 elm->phys_start = (phys * PRESERVE_BITS) in kho_mem_serialize()
402 KHOSER_STORE_PTR(elm->bitmap, bits); in kho_mem_serialize()
406 ser->preserved_mem_map = first_chunk; in kho_mem_serialize()
412 return -ENOMEM; in kho_mem_serialize()
418 struct kho_mem_phys_bits *bitmap = KHOSER_LOAD_PTR(elm->bitmap); in deserialize_bitmap()
421 for_each_set_bit(bit, bitmap->preserve, PRESERVE_BITS) { in deserialize_bitmap()
424 elm->phys_start + (bit << (order + PAGE_SHIFT)); in deserialize_bitmap()
432 page->private = info.page_private; in deserialize_bitmap()
439 const phys_addr_t *mem; in kho_mem_deserialize() local
442 mem = fdt_getprop(fdt, 0, PROP_PRESERVED_MEMORY_MAP, &len); in kho_mem_deserialize()
444 if (!mem || len != sizeof(*mem)) { in kho_mem_deserialize()
449 chunk = *mem ? phys_to_virt(*mem) : NULL; in kho_mem_deserialize()
453 for (i = 0; i != chunk->hdr.num_elms; i++) in kho_mem_deserialize()
454 deserialize_bitmap(chunk->hdr.order, in kho_mem_deserialize()
455 &chunk->bitmaps[i]); in kho_mem_deserialize()
456 chunk = KHOSER_LOAD_PTR(chunk->hdr.next); in kho_mem_deserialize()
478 * per-node scratch areas:
497 return -EINVAL; in kho_parse_scratch_size()
501 return -EINVAL; in kho_parse_scratch_size()
504 if (p[len - 1] == '%') { in kho_parse_scratch_size()
510 return -EINVAL; in kho_parse_scratch_size()
512 memcpy(s_scale, p, len - 1); in kho_parse_scratch_size()
525 return -EINVAL; in kho_parse_scratch_size()
531 return -EINVAL; in kho_parse_scratch_size()
537 return -EINVAL; in kho_parse_scratch_size()
541 return -EINVAL; in kho_parse_scratch_size()
571 size = size * scratch_scale / 100 - scratch_size_lowmem; in scratch_size_update()
591 * kho_reserve_scratch - Reserve a contiguous chunk of memory for kexec
609 /* FIXME: deal with node hot-plug/remove */ in kho_reserve_scratch()
617 * reserve scratch area in low memory for lowmem allocations in the in kho_reserve_scratch()
630 /* reserve large contiguous area for allocations without nid */ in kho_reserve_scratch()
656 for (i--; i >= 0; i--) in kho_reserve_scratch()
661 pr_warn("Failed to reserve scratch area, disabling kexec handover\n"); in kho_reserve_scratch()
679 return -ENOMEM; in kho_debugfs_fdt_add()
681 f->wrapper.data = (void *)fdt; in kho_debugfs_fdt_add()
682 f->wrapper.size = fdt_totalsize(fdt); in kho_debugfs_fdt_add()
684 file = debugfs_create_blob(name, 0400, dir, &f->wrapper); in kho_debugfs_fdt_add()
690 f->file = file; in kho_debugfs_fdt_add()
691 list_add(&f->list, list); in kho_debugfs_fdt_add()
697 * kho_add_subtree - record the physical address of a sub FDT in KHO root tree.
715 void *root = page_to_virt(ser->fdt); in kho_add_subtree()
724 return kho_debugfs_fdt_add(&ser->fdt_list, ser->sub_fdt_dir, name, fdt); in kho_add_subtree()
741 * kho_preserve_folio - preserve a folio across kexec.
760 * kho_preserve_pages - preserve contiguous pages across kexec
780 min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn)); in kho_preserve_pages()
803 ((PAGE_SIZE - sizeof(struct kho_vmalloc_hdr)) / \
857 KHOSER_STORE_PTR(cur->hdr.next, chunk); in new_vmalloc_chunk()
872 for (int i = 0; chunk->phys[i]; i++) { in kho_vmalloc_unpreserve_chunk()
873 pfn = PHYS_PFN(chunk->phys[i]); in kho_vmalloc_unpreserve_chunk()
880 struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(kho_vmalloc->first); in kho_vmalloc_free_chunks()
887 chunk = KHOSER_LOAD_PTR(chunk->hdr.next); in kho_vmalloc_free_chunks()
893 * kho_preserve_vmalloc - preserve memory allocated with vmalloc() across kexec
916 return -EINVAL; in kho_preserve_vmalloc()
918 if (vm->flags & ~KHO_VMALLOC_SUPPORTED_FLAGS) in kho_preserve_vmalloc()
919 return -EOPNOTSUPP; in kho_preserve_vmalloc()
921 flags = vmalloc_flags_to_kho(vm->flags); in kho_preserve_vmalloc()
926 return -ENOMEM; in kho_preserve_vmalloc()
927 KHOSER_STORE_PTR(preservation->first, chunk); in kho_preserve_vmalloc()
930 for (int i = 0; i < vm->nr_pages; i += nr_contig_pages) { in kho_preserve_vmalloc()
931 phys_addr_t phys = page_to_phys(vm->pages[i]); in kho_preserve_vmalloc()
933 err = kho_preserve_pages(vm->pages[i], nr_contig_pages); in kho_preserve_vmalloc()
937 chunk->phys[idx++] = phys; in kho_preserve_vmalloc()
938 if (idx == ARRAY_SIZE(chunk->phys)) { in kho_preserve_vmalloc()
946 preservation->total_pages = vm->nr_pages; in kho_preserve_vmalloc()
947 preservation->flags = flags; in kho_preserve_vmalloc()
948 preservation->order = order; in kho_preserve_vmalloc()
959 * kho_restore_vmalloc - recreates and populates an area in vmalloc address
970 struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first); in kho_restore_vmalloc()
979 vm_flags = kho_flags_to_vmalloc(preservation->flags); in kho_restore_vmalloc()
983 total_pages = preservation->total_pages; in kho_restore_vmalloc()
987 order = preservation->order; in kho_restore_vmalloc()
995 for (int i = 0; chunk->phys[i]; i++) { in kho_restore_vmalloc()
996 phys_addr_t phys = chunk->phys[i]; in kho_restore_vmalloc()
1014 chunk = KHOSER_LOAD_PTR(chunk->hdr.next); in kho_restore_vmalloc()
1028 addr = (unsigned long)area->addr; in kho_restore_vmalloc()
1034 area->nr_pages = total_pages; in kho_restore_vmalloc()
1035 area->pages = pages; in kho_restore_vmalloc()
1037 return area->addr; in kho_restore_vmalloc()
1061 debugfs_remove(ff->file); in kho_out_update_debugfs_fdt()
1062 list_del(&ff->list); in kho_out_update_debugfs_fdt()
1080 xa_for_each(&physxa->phys_bits, phys, bits) in kho_abort()
1083 xa_destroy(&physxa->phys_bits); in kho_abort()
1114 * Reserve the preserved-memory-map property in the root FDT, so in kho_finalize()
1170 ret = -EEXIST; in kho_out_finalize_set()
1172 ret = -ENOENT; in kho_out_finalize_set()
1219 return -ENOMEM; in kho_out_debugfs_init()
1246 return -ENOENT; in kho_out_debugfs_init()
1266 * is_kho_boot - check if current kernel was booted via KHO-enabled
1277 * Return: true if booted via KHO-enabled kexec, false otherwise
1286 * kho_retrieve_subtree - retrieve a preserved sub FDT by its name.
1302 return -ENOENT; in kho_retrieve_subtree()
1305 return -EINVAL; in kho_retrieve_subtree()
1309 return -ENOENT; in kho_retrieve_subtree()
1313 return -EINVAL; in kho_retrieve_subtree()
1381 err = -ENOMEM; in kho_init()
1387 err = -ENOENT; in kho_init()
1429 free_reserved_area(start, end, -1, ""); in kho_init()
1444 * Mark scratch mem as CMA before we return it. That way we in kho_release_scratch()
1489 err = -EFAULT; in kho_populate()
1496 err = -EINVAL; in kho_populate()
1503 err = -EINVAL; in kho_populate()
1511 err = -EFAULT; in kho_populate()
1522 u64 size = area->size; in kho_populate()
1524 memblock_add(area->addr, size); in kho_populate()
1525 err = memblock_mark_kho_scratch(area->addr, size); in kho_populate()
1528 &area->addr, &size, err); in kho_populate()
1531 pr_debug("Marked 0x%pa+0x%pa as scratch", &area->addr, &size); in kho_populate()
1570 image->kho.fdt = page_to_phys(kho_out.ser.fdt); in kho_fill_kimage()
1577 .mem = KEXEC_BUF_MEM_UNKNOWN, in kho_fill_kimage()
1586 image->kho.scratch = &image->segment[image->nr_segments - 1]; in kho_fill_kimage()
1600 .end = kho_scratch[i].addr + kho_scratch[i].size - 1, in kho_walk_scratch()
1617 if (!kho_enable || kbuf->image->type == KEXEC_TYPE_CRASH) in kho_locate_mem_hole()
1622 return ret == 1 ? 0 : -EADDRNOTAVAIL; in kho_locate_mem_hole()