Lines Matching +full:dma +full:- +full:window

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1995-2005 Russell King
24 #include <linux/dma-direct.h>
25 #include <linux/dma-map-ops.h>
40 #include <asm/kernel-pgtable.h>
50 #include <asm/xen/swiotlb-xen.h>
58 s64 memstart_addr __ro_after_init = -1;
63 * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
64 * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
65 * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
118 return min(zone_limit, memblock_end_of_DRAM() - 1) + 1; in max_zone_phys()
134 * Information we get from firmware (e.g. DT dma-ranges) describe DMA in zone_sizes_init()
135 * bus constraints. Devices using DMA might have their own limitations. in zone_sizes_init()
136 * Some of them rely on DMA zone in low 32-bit memory. Keep low RAM in zone_sizes_init()
137 * DMA zone on platforms that have RAM there. in zone_sizes_init()
187 s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual); in arm64_memblock_init()
190 * Corner case: 52-bit VA capable systems running KVM in nVHE mode may in arm64_memblock_init()
212 if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size) in arm64_memblock_init()
224 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size, in arm64_memblock_init()
230 * If we are running with a 52-bit kernel VA config on a system that in arm64_memblock_init()
232 * memory in the 48-bit addressable part of the linear region, i.e., in arm64_memblock_init()
237 memstart_addr -= _PAGE_OFFSET(vabits_actual) - _PAGE_OFFSET(52); in arm64_memblock_init()
246 memblock_add(__pa_symbol(_text), (resource_size_t)(_end - _text)); in arm64_memblock_init()
253 * Otherwise, this is a no-op in arm64_memblock_init()
256 resource_size_t size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base; in arm64_memblock_init()
269 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { in arm64_memblock_init()
282 memblock_reserve(__pa_symbol(_text), _end - _text); in arm64_memblock_init()
396 memblock_free(lm_init_begin, lm_init_end - lm_init_begin); in free_initmem()
422 * Choose a random page-aligned base address for a window of 'size' bytes which
423 * entirely contains the interval [start, end - 1].
429 if ((end - start) >= size) in random_bounding_box()
432 max_pgoff = (size - (end - start)) / PAGE_SIZE; in random_bounding_box()
435 return start - pgoff * PAGE_SIZE; in random_bounding_box()
440 * image and other modules. References using PREL32 relocations have a +/-2G
442 * fall within a 2G window such that these are always within range.
446 * CALL26/JUMP26 relocations with a +/-128M range. Without PLTs, we must ensure
447 * that the entire kernel text and all module text falls within a 128M window
449 * 2G window.
462 u64 kernel_size = kernel_end - kernel_start; in module_init_limits()
473 module_direct_base = kernel_end - SZ_128M; in module_init_limits()
475 module_plt_base = kernel_end - SZ_2G; in module_init_limits()
493 pr_info("%llu pages in range for non-PLT usage", in module_init_limits()
494 module_direct_base ? (SZ_128M - kernel_size) / PAGE_SIZE : 0); in module_init_limits()
496 module_plt_base ? (SZ_2G - kernel_size) / PAGE_SIZE : 0); in module_init_limits()