Lines Matching +full:preserved +full:- +full:memory +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
3 * kexec_handover.c - kexec handover metadata processing
20 #include <linux/page-isolation.h>
31 #define KHO_FDT_COMPATIBLE "kho-v1"
32 #define PROP_PRESERVED_MEMORY_MAP "preserved-memory-map"
50 * Keep track of memory that is to be preserved across KHO.
52 * The serializing side uses two levels of xarrays to manage chunks of per-order
55 * each bitmap will cover 16M of address space. Thus, for 16G of memory at most
56 * 512K of bitmap memory will be needed for order 0.
90 /* First chunk of serialized preserved memory map */
104 return ERR_PTR(-ENOMEM); in xa_load_or_alloc()
126 min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn)); in __kho_unpreserve()
129 physxa = xa_load(&track->orders, order); in __kho_unpreserve()
133 bits = xa_load(&physxa->phys_bits, pfn_high / PRESERVE_BITS); in __kho_unpreserve()
137 clear_bit(pfn_high % PRESERVE_BITS, bits->preserve); in __kho_unpreserve()
152 physxa = xa_load(&track->orders, order); in __kho_preserve_order()
158 return -ENOMEM; in __kho_preserve_order()
160 xa_init(&new_physxa->phys_bits); in __kho_preserve_order()
161 physxa = xa_cmpxchg(&track->orders, order, NULL, new_physxa, in __kho_preserve_order()
166 xa_destroy(&new_physxa->phys_bits); in __kho_preserve_order()
176 bits = xa_load_or_alloc(&physxa->phys_bits, pfn_high / PRESERVE_BITS, in __kho_preserve_order()
181 set_bit(pfn_high % PRESERVE_BITS, bits->preserve); in __kho_preserve_order()
205 * kho_restore_folio - recreates the folio from the preserved memory.
218 order = page->private; in kho_restore_folio()
235 * All of this memory is normal kmalloc() memory and is not marked for
237 * until it completes processing this list. Once processed all the memory
253 ((PAGE_SIZE - sizeof(struct khoser_mem_chunk_hdr)) / \
271 chunk->hdr.order = order; in new_chunk()
273 KHOSER_STORE_PTR(cur_chunk->hdr.next, chunk); in new_chunk()
284 chunk = KHOSER_LOAD_PTR(chunk->hdr.next); in kho_mem_ser_free()
296 xa_for_each(&ser->track.orders, order, physxa) { in kho_mem_serialize()
307 xa_for_each(&physxa->phys_bits, phys, bits) { in kho_mem_serialize()
310 if (chunk->hdr.num_elms == ARRAY_SIZE(chunk->bitmaps)) { in kho_mem_serialize()
316 elm = &chunk->bitmaps[chunk->hdr.num_elms]; in kho_mem_serialize()
317 chunk->hdr.num_elms++; in kho_mem_serialize()
318 elm->phys_start = (phys * PRESERVE_BITS) in kho_mem_serialize()
320 KHOSER_STORE_PTR(elm->bitmap, bits); in kho_mem_serialize()
324 ser->preserved_mem_map = first_chunk; in kho_mem_serialize()
330 return -ENOMEM; in kho_mem_serialize()
336 struct kho_mem_phys_bits *bitmap = KHOSER_LOAD_PTR(elm->bitmap); in deserialize_bitmap()
339 for_each_set_bit(bit, bitmap->preserve, PRESERVE_BITS) { in deserialize_bitmap()
342 elm->phys_start + (bit << (order + PAGE_SHIFT)); in deserialize_bitmap()
347 page->private = order; in deserialize_bitmap()
360 pr_err("failed to get preserved memory bitmaps\n"); in kho_mem_deserialize()
368 for (i = 0; i != chunk->hdr.num_elms; i++) in kho_mem_deserialize()
369 deserialize_bitmap(chunk->hdr.order, in kho_mem_deserialize()
370 &chunk->bitmaps[i]); in kho_mem_deserialize()
371 chunk = KHOSER_LOAD_PTR(chunk->hdr.next); in kho_mem_deserialize()
376 * With KHO enabled, memory can become fragmented because KHO regions may
387 * The scratch areas are scaled by default as percent of memory allocated from
393 * per-node scratch areas:
411 return -EINVAL; in kho_parse_scratch_size()
415 return -EINVAL; in kho_parse_scratch_size()
418 if (p[len - 1] == '%') { in kho_parse_scratch_size()
424 return -EINVAL; in kho_parse_scratch_size()
426 memcpy(s_scale, p, len - 1); in kho_parse_scratch_size()
439 return -EINVAL; in kho_parse_scratch_size()
445 return -EINVAL; in kho_parse_scratch_size()
477 size = size * scratch_scale / 100 - scratch_size_lowmem; in scratch_size_update()
497 * kho_reserve_scratch - Reserve a contiguous chunk of memory for kexec
500 * have a large contiguous region of memory when we search the physical address
501 * space for target memory, let's make sure we always have a large CMA region
515 /* FIXME: deal with node hot-plug/remove */ in kho_reserve_scratch()
523 * reserve scratch area in low memory for lowmem allocations in the in kho_reserve_scratch()
562 for (i--; i >= 0; i--) in kho_reserve_scratch()
585 return -ENOMEM; in kho_debugfs_fdt_add()
587 f->wrapper.data = (void *)fdt; in kho_debugfs_fdt_add()
588 f->wrapper.size = fdt_totalsize(fdt); in kho_debugfs_fdt_add()
590 file = debugfs_create_blob(name, 0400, dir, &f->wrapper); in kho_debugfs_fdt_add()
596 f->file = file; in kho_debugfs_fdt_add()
597 list_add(&f->list, list); in kho_debugfs_fdt_add()
603 * kho_add_subtree - record the physical address of a sub FDT in KHO root tree.
609 * the physical address of @fdt. The pages of @fdt must also be preserved
621 void *root = page_to_virt(ser->fdt); in kho_add_subtree()
630 return kho_debugfs_fdt_add(&ser->fdt_list, ser->sub_fdt_dir, name, fdt); in kho_add_subtree()
670 * kho_preserve_folio - preserve a folio across kexec.
674 * will be preserved as well.
685 return -EBUSY; in kho_preserve_folio()
692 * kho_preserve_phys - preserve a physically contiguous range across kexec.
696 * Instructs KHO to preserve the memory range from @phys to @phys + @size
711 return -EBUSY; in kho_preserve_phys()
714 return -EINVAL; in kho_preserve_phys()
718 min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn)); in kho_preserve_phys()
750 debugfs_remove(ff->file); in kho_out_update_debugfs_fdt()
751 list_del(&ff->list); in kho_out_update_debugfs_fdt()
769 xa_for_each(&physxa->phys_bits, phys, bits) in kho_abort()
772 xa_destroy(&physxa->phys_bits); in kho_abort()
803 * Reserve the preserved-memory-map property in the root FDT, so in kho_finalize()
859 ret = -EEXIST; in kho_out_finalize_set()
861 ret = -ENOENT; in kho_out_finalize_set()
908 return -ENOMEM; in kho_out_debugfs_init()
935 return -ENOENT; in kho_out_debugfs_init()
955 * kho_retrieve_subtree - retrieve a preserved sub FDT by its name.
959 * Retrieve a preserved sub FDT named @name and store its physical
971 return -ENOENT; in kho_retrieve_subtree()
974 return -EINVAL; in kho_retrieve_subtree()
978 return -ENOENT; in kho_retrieve_subtree()
982 return -EINVAL; in kho_retrieve_subtree()
1050 err = -ENOMEM; in kho_init()
1056 err = -ENOENT; in kho_init()
1098 free_reserved_area(start, end, -1, ""); in kho_init()
1115 * we can reuse it as scratch memory again later. in kho_release_scratch()
1117 __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, in kho_release_scratch()
1158 err = -EFAULT; in kho_populate()
1165 err = -EINVAL; in kho_populate()
1172 err = -EINVAL; in kho_populate()
1180 err = -EFAULT; in kho_populate()
1185 * We pass a safe contiguous blocks of memory to use for early boot in kho_populate()
1191 u64 size = area->size; in kho_populate()
1193 memblock_add(area->addr, size); in kho_populate()
1194 err = memblock_mark_kho_scratch(area->addr, size); in kho_populate()
1197 &area->addr, &size, err); in kho_populate()
1200 pr_debug("Marked 0x%pa+0x%pa as scratch", &area->addr, &size); in kho_populate()
1206 * Now that we have a viable region of scratch memory, let's tell in kho_populate()
1210 * memory reservations from the previous kernel. in kho_populate()
1239 image->kho.fdt = page_to_phys(kho_out.ser.fdt); in kho_fill_kimage()
1248 .buf_align = SZ_64K, /* Makes it easier to map */ in kho_fill_kimage()
1255 image->kho.scratch = &image->segment[image->nr_segments - 1]; in kho_fill_kimage()
1269 .end = kho_scratch[i].addr + kho_scratch[i].size - 1, in kho_walk_scratch()
1286 if (!kho_enable || kbuf->image->type == KEXEC_TYPE_CRASH) in kho_locate_mem_hole()
1291 return ret == 1 ? 0 : -EADDRNOTAVAIL; in kho_locate_mem_hole()