Lines Matching full:scratch
318 * preservation. The successor kernel will remain isolated to the scratch space
462 * be anywhere in physical address space. The scratch regions give us a
464 * can later safely load our new kexec images into and then use the scratch
472 * The scratch areas are scaled by default as percent of memory allocated from
478 * per-node scratch areas:
515 pr_notice("scratch scale is %d%%\n", scratch_scale); in kho_parse_scratch_size()
548 pr_notice("scratch areas: lowmem: %lluMiB global: %lluMiB pernode: %lldMiB\n", in kho_parse_scratch_size()
617 * reserve scratch area in low memory for lowmem allocations in the in kho_reserve_scratch()
661 pr_warn("Failed to reserve scratch area, disabling kexec handover\n"); in kho_reserve_scratch()
1444 * Mark scratch mem as CMA before we return it. That way we in kho_release_scratch()
1446 * we can reuse it as scratch memory again later. in kho_release_scratch()
1481 struct kho_scratch *scratch = NULL; in kho_populate() local
1507 scratch = early_memremap(scratch_phys, scratch_len); in kho_populate()
1508 if (!scratch) { in kho_populate()
1509 pr_warn("setup: failed to memremap scratch (phys=0x%llx, len=%lld)\n", in kho_populate()
1521 struct kho_scratch *area = &scratch[i]; in kho_populate()
1527 pr_warn("failed to mark the scratch region 0x%pa+0x%pa: %d", in kho_populate()
1531 pr_debug("Marked 0x%pa+0x%pa as scratch", &area->addr, &size); in kho_populate()
1537 * Now that we have a viable region of scratch memory, let's tell in kho_populate()
1553 if (scratch) in kho_populate()
1554 early_memunmap(scratch, scratch_len); in kho_populate()
1565 struct kexec_buf scratch; in kho_fill_kimage() local
1573 scratch = (struct kexec_buf){ in kho_fill_kimage()
1583 err = kexec_add_buffer(&scratch); in kho_fill_kimage()
1586 image->kho.scratch = &image->segment[image->nr_segments - 1]; in kho_fill_kimage()
1603 /* Try to fit the kimage into our KHO scratch region */ in kho_walk_scratch()