Lines Matching +full:high +full:- +full:end

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1995-2005 Russell King
24 #include <linux/dma-direct.h>
25 #include <linux/dma-map-ops.h>
40 #include <asm/kernel-pgtable.h>
50 #include <asm/xen/swiotlb-xen.h>
58 s64 memstart_addr __ro_after_init = -1;
63 * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
64 * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
65 * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
101 bool high = false; in arch_reserve_crashkernel() local
109 &low_size, NULL, &high); in arch_reserve_crashkernel()
113 reserve_crashkernel_generic(crash_size, crash_base, low_size, high); in arch_reserve_crashkernel()
118 return min(zone_limit, memblock_end_of_DRAM() - 1) + 1; in max_zone_phys()
134 * Information we get from firmware (e.g. DT dma-ranges) describe DMA in zone_sizes_init()
136 * Some of them rely on DMA zone in low 32-bit memory. Keep low RAM in zone_sizes_init()
187 s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual); in arm64_memblock_init()
190 * Corner case: 52-bit VA capable systems running KVM in nVHE mode may in arm64_memblock_init()
212 if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size) in arm64_memblock_init()
218 * high in memory. in arm64_memblock_init()
224 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size, in arm64_memblock_init()
230 * If we are running with a 52-bit kernel VA config on a system that in arm64_memblock_init()
232 * memory in the 48-bit addressable part of the linear region, i.e., in arm64_memblock_init()
237 memstart_addr -= _PAGE_OFFSET(vabits_actual) - _PAGE_OFFSET(52); in arm64_memblock_init()
241 * high up in memory, add back the kernel region that must be accessible in arm64_memblock_init()
246 memblock_add(__pa_symbol(_text), (resource_size_t)(_end - _text)); in arm64_memblock_init()
253 * Otherwise, this is a no-op in arm64_memblock_init()
256 resource_size_t size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base; in arm64_memblock_init()
259 * We can only add back the initrd memory if we don't end up in arm64_memblock_init()
269 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { in arm64_memblock_init()
282 memblock_reserve(__pa_symbol(_text), _end - _text); in arm64_memblock_init()
396 memblock_free(lm_init_begin, lm_init_end - lm_init_begin); in free_initmem()
422 * Choose a random page-aligned base address for a window of 'size' bytes which
423 * entirely contains the interval [start, end - 1].
425 static u64 __init random_bounding_box(u64 size, u64 start, u64 end) in random_bounding_box() argument
429 if ((end - start) >= size) in random_bounding_box()
432 max_pgoff = (size - (end - start)) / PAGE_SIZE; in random_bounding_box()
435 return start - pgoff * PAGE_SIZE; in random_bounding_box()
440 * image and other modules. References using PREL32 relocations have a +/-2G
446 * CALL26/JUMP26 relocations with a +/-128M range. Without PLTs, we must ensure
462 u64 kernel_size = kernel_end - kernel_start; in module_init_limits()
473 module_direct_base = kernel_end - SZ_128M; in module_init_limits()
475 module_plt_base = kernel_end - SZ_2G; in module_init_limits()
493 pr_info("%llu pages in range for non-PLT usage", in module_init_limits()
494 module_direct_base ? (SZ_128M - kernel_size) / PAGE_SIZE : 0); in module_init_limits()
496 module_plt_base ? (SZ_2G - kernel_size) / PAGE_SIZE : 0); in module_init_limits()
506 unsigned long start = 0, end = 0; in execmem_arch_setup() local
516 end = module_direct_base + SZ_128M; in execmem_arch_setup()
524 end = module_plt_base + SZ_2G; in execmem_arch_setup()
531 .end = end, in execmem_arch_setup()
539 .end = VMALLOC_END, in execmem_arch_setup()
545 .end = VMALLOC_END, in execmem_arch_setup()