Lines Matching +full:memory +full:- +full:region

1 // SPDX-License-Identifier: GPL-2.0
8 * handles walking the physical memory maps (and tracking memory regions
9 * to avoid) in order to select a physical memory location that can
41 /* Simplified build-specific string for starting entropy. */
53 hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); in rotate_xor()
82 * Store memory limit: MAXMEM on 64-bit and KERNEL_IMAGE_SIZE on 32-bit.
87 /* Number of immovable memory regions */
96 MEM_AVOID_MEMMAP_END = MEM_AVOID_MEMMAP_BEGIN + MAX_MEMMAP_REGIONS - 1,
105 if (one->start + one->size <= two->start) in mem_overlaps()
108 if (one->start >= two->start + two->size) in mem_overlaps()
128 return -EINVAL; in parse_memmap()
132 return -EINVAL; in parse_memmap()
137 return -EINVAL; in parse_memmap()
147 * memmap=nn@ss specifies usable region, should in parse_memmap()
156 * system can use. Region above the limit should be avoided. in parse_memmap()
162 return -EINVAL; in parse_memmap()
186 /* Store the specified memory limit if size > 0 */ in mem_avoid_memmap()
242 len = strnlen(args, COMMAND_LINE_SIZE-1); in handle_mem_options()
256 /* Stop at -- */ in handle_mem_options()
257 if (!val && strcmp(param, "--") == 0) in handle_mem_options()
284 * on 64-bit, and [16M, KERNEL_IMAGE_SIZE) on 32-bit.
291 * memory ranges lead to really hard to debug boot failures.
297 * What is not obvious how to avoid is the range of memory that is used
307 * in header.S, and the memory diagram is based on the one found in misc.c.
311 * - input + input_size >= output + output_size
312 * - kernel_total_size <= init_size
313 * - kernel_total_size <= output_size (see Note below)
314 * - output + init_size >= output + output_size
327 * |-----|--------|--------|--------------|-----------|--|-------------|
330 * output+init_size-ZO_INIT_SIZE output+output_size output+kernel_total_size
332 * [output, output+init_size) is the entire memory range used for
342 * [output+init_size-ZO_INIT_SIZE, output+init_size) is the worst-case
361 unsigned long init_size = boot_params_ptr->hdr.init_size; in mem_avoid_init()
366 * Avoid the region that is unsafe to overlap during in mem_avoid_init()
370 mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input; in mem_avoid_init()
373 initrd_start = (u64)boot_params_ptr->ext_ramdisk_image << 32; in mem_avoid_init()
374 initrd_start |= boot_params_ptr->hdr.ramdisk_image; in mem_avoid_init()
375 initrd_size = (u64)boot_params_ptr->ext_ramdisk_size << 32; in mem_avoid_init()
376 initrd_size |= boot_params_ptr->hdr.ramdisk_size; in mem_avoid_init()
385 cmd_line_size = strnlen((char *)cmd_line, COMMAND_LINE_SIZE-1) + 1; in mem_avoid_init()
399 /* Enumerate the immovable memory regions */ in mem_avoid_init()
404 * Does this memory vector overlap a known avoided area? If so, record the
405 * overlap region with the lowest address.
412 u64 earliest = img->start + img->size; in mem_avoid_overlap()
419 earliest = overlap->start; in mem_avoid_overlap()
425 ptr = (struct setup_data *)(unsigned long)boot_params_ptr->hdr.setup_data; in mem_avoid_overlap()
430 avoid.size = sizeof(*ptr) + ptr->len; in mem_avoid_overlap()
434 earliest = overlap->start; in mem_avoid_overlap()
438 if (ptr->type == SETUP_INDIRECT && in mem_avoid_overlap()
439 ((struct setup_indirect *)ptr->data)->type != SETUP_INDIRECT) { in mem_avoid_overlap()
440 avoid.start = ((struct setup_indirect *)ptr->data)->addr; in mem_avoid_overlap()
441 avoid.size = ((struct setup_indirect *)ptr->data)->len; in mem_avoid_overlap()
445 earliest = overlap->start; in mem_avoid_overlap()
450 ptr = (struct setup_data *)(unsigned long)ptr->next; in mem_avoid_overlap()
467 static void store_slot_info(struct mem_vector *region, unsigned long image_size) in store_slot_info() argument
474 slot_area.addr = region->start; in store_slot_info()
475 slot_area.num = 1 + (region->size - image_size) / CONFIG_PHYSICAL_ALIGN; in store_slot_info()
482 * Skip as many 1GB huge pages as possible in the passed region
486 process_gb_huge_pages(struct mem_vector *region, unsigned long image_size) in process_gb_huge_pages() argument
493 store_slot_info(region, image_size); in process_gb_huge_pages()
497 /* Are there any 1GB pages in the region? */ in process_gb_huge_pages()
498 pud_start = ALIGN(region->start, PUD_SIZE); in process_gb_huge_pages()
499 pud_end = ALIGN_DOWN(region->start + region->size, PUD_SIZE); in process_gb_huge_pages()
503 store_slot_info(region, image_size); in process_gb_huge_pages()
507 /* Check if the head part of the region is usable. */ in process_gb_huge_pages()
508 if (pud_start >= region->start + image_size) { in process_gb_huge_pages()
509 tmp.start = region->start; in process_gb_huge_pages()
510 tmp.size = pud_start - region->start; in process_gb_huge_pages()
515 gb_huge_pages = (pud_end - pud_start) >> PUD_SHIFT; in process_gb_huge_pages()
520 max_gb_huge_pages -= gb_huge_pages; in process_gb_huge_pages()
523 /* Check if the tail part of the region is usable. */ in process_gb_huge_pages()
524 if (region->start + region->size >= pud_end + image_size) { in process_gb_huge_pages()
526 tmp.size = region->start + region->size - pud_end; in process_gb_huge_pages()
544 slot -= slot_areas[i].num; in slots_fetch_random()
559 struct mem_vector region, overlap; in __process_mem_region() local
562 /* Enforce minimum and memory limit. */ in __process_mem_region()
563 region.start = max_t(u64, entry->start, minimum); in __process_mem_region()
564 region_end = min(entry->start + entry->size, mem_limit); in __process_mem_region()
569 region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN); in __process_mem_region()
571 /* Did we raise the address above the passed in memory entry? */ in __process_mem_region()
572 if (region.start > region_end) in __process_mem_region()
576 region.size = region_end - region.start; in __process_mem_region()
578 /* Return if region can't contain decompressed kernel */ in __process_mem_region()
579 if (region.size < image_size) in __process_mem_region()
582 /* If nothing overlaps, store the region and return. */ in __process_mem_region()
583 if (!mem_avoid_overlap(&region, &overlap)) { in __process_mem_region()
584 process_gb_huge_pages(&region, image_size); in __process_mem_region()
588 /* Store beginning of region if holds at least image_size. */ in __process_mem_region()
589 if (overlap.start >= region.start + image_size) { in __process_mem_region()
590 region.size = overlap.start - region.start; in __process_mem_region()
591 process_gb_huge_pages(&region, image_size); in __process_mem_region()
594 /* Clip off the overlapping region and start over. */ in __process_mem_region()
595 region.start = overlap.start + overlap.size; in __process_mem_region()
599 static bool process_mem_region(struct mem_vector *region, in process_mem_region() argument
605 * If no immovable memory found, or MEMORY_HOTREMOVE disabled, in process_mem_region()
606 * use @region directly. in process_mem_region()
609 __process_mem_region(region, minimum, image_size); in process_mem_region()
620 * If immovable memory found, filter the intersection between in process_mem_region()
621 * immovable memory and @region. in process_mem_region()
627 if (!mem_overlaps(region, &immovable_mem[i])) in process_mem_region()
632 region_end = region->start + region->size; in process_mem_region()
634 entry.start = clamp(region->start, start, end); in process_mem_region()
636 entry.size = entry_end - entry.start; in process_mem_region()
655 * Pick free memory more conservatively than the EFI spec allows: according to
656 * the spec, EFI_BOOT_SERVICES_{CODE|DATA} are also free memory and thus
658 * where using that memory leads to crashes. Buggy vendor EFI code registers
660 * that EFI_BOOT_SERVICES_DATA memory has not been touched by loader yet, which
667 if (md->type == EFI_CONVENTIONAL_MEMORY) in memory_type_is_free()
671 md->type == EFI_UNACCEPTED_MEMORY) in memory_type_is_free()
684 struct efi_info *e = &boot_params_ptr->efi_info; in process_efi_entries()
686 struct mem_vector region; in process_efi_entries() local
693 signature = (char *)&e->efi_loader_signature; in process_efi_entries()
700 if (e->efi_memmap_hi) { in process_efi_entries()
704 pmap = e->efi_memmap; in process_efi_entries()
706 pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32)); in process_efi_entries()
709 nr_desc = e->efi_memmap_size / e->efi_memdesc_size; in process_efi_entries()
711 md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i); in process_efi_entries()
712 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) { in process_efi_entries()
719 md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i); in process_efi_entries()
725 (md->attribute & EFI_MEMORY_SP)) in process_efi_entries()
729 !(md->attribute & EFI_MEMORY_MORE_RELIABLE)) in process_efi_entries()
732 region.start = md->phys_addr; in process_efi_entries()
733 region.size = md->num_pages << EFI_PAGE_SHIFT; in process_efi_entries()
734 if (process_mem_region(&region, minimum, image_size)) in process_efi_entries()
751 struct mem_vector region; in process_e820_entries() local
755 for (i = 0; i < boot_params_ptr->e820_entries; i++) { in process_e820_entries()
756 entry = &boot_params_ptr->e820_table[i]; in process_e820_entries()
757 /* Skip non-RAM entries. */ in process_e820_entries()
758 if (entry->type != E820_TYPE_RAM) in process_e820_entries()
760 region.start = entry->addr; in process_e820_entries()
761 region.size = entry->size; in process_e820_entries()
762 if (process_mem_region(&region, minimum, image_size)) in process_e820_entries()
778 debug_putstr("Aborted memory entries scan (more than 4 memmap= args)!\n"); in find_random_phys_addr()
802 * There are how many CONFIG_PHYSICAL_ALIGN-sized slots in find_random_virt_addr()
806 slots = 1 + (KERNEL_IMAGE_SIZE - minimum - image_size) / CONFIG_PHYSICAL_ALIGN; in find_random_virt_addr()
830 boot_params_ptr->hdr.loadflags |= KASLR_FLAG; in choose_random_location()
837 /* Record the various known unsafe memory ranges. */ in choose_random_location()
849 /* Walk available memory entries to find a random address. */ in choose_random_location()
852 warn("Physical KASLR disabled: no suitable memory region!"); in choose_random_location()