Lines Matching +full:clip +full:- +full:x +full:- +full:low

1 /* SPDX-License-Identifier: GPL-2.0-only */
5 * Copyright (C) 2000-2002 Russell King
8 * Note: this file should not be included by non-asm/.h files
15 #include <asm/page-def.h>
19 * IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses.
24 * VMEMMAP_SIZE - allows the whole linear region to be covered by
27 * If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE
28 * needs to cover the memory region from the beginning of the 52-bit
29 * PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to
31 * of the VMEMMAP where 52-bit support is not available in hardware.
33 #define VMEMMAP_RANGE (_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET)
37 * PAGE_OFFSET - the virtual address of the start of the linear map, at the
39 * PAGE_END - the end of the linear map, where all other kernel mappings begin.
40 * KIMAGE_VADDR - the virtual address of the start of the kernel image.
41 * VA_BITS - the maximum number of bits for virtual addresses.
44 #define _PAGE_OFFSET(va) (-(UL(1) << (va)))
50 #define VMEMMAP_START (VMEMMAP_END - VMEMMAP_SIZE)
51 #define VMEMMAP_END (-UL(SZ_1G))
54 #define FIXADDR_TOP (-UL(SZ_8M))
66 #define _PAGE_END(va) (-(UL(1) << ((va) - 1)))
72 * Generic and Software Tag-Based KASAN modes require 1/8th and 1/16th of the
103 #define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) + KASAN_SHADOW_OFFSET)
104 #define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (UL(1) << ((va) - KASAN_SHADOW_SCALE_SHIFT)))
113 #define PHYSMEM_END __pa(PAGE_END - 1)
128 #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
179 * Memory types for Stage-2 translation
186 * Memory types for Stage-2 translation when ID_AA64MMFR2_EL1.FWB is 0001
187 * Stage-2 enforces Normal-WB and Device-nGnRE
200 * Open-coded (swapper_pg_dir - reserved_pg_dir) as this cannot be calculated
206 * Open-coded (swapper_pg_dir - tramp_pg_dir) as this cannot be calculated
233 #define vabits_actual (64 - ((read_tcr() >> 16) & 63))
239 /* PHYS_OFFSET - the physical address of the start of memory. */
247 return (u64)&_text - KIMAGE_VADDR; in kaslr_offset()
263 * Allow all memory at the discovery stage. We will clip it later.
273 * direct-mapped view. We assume this is the first page
333 #define __is_lm_address(addr) (((u64)(addr) - PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
335 #define __lm_to_phys(addr) (((addr) - PAGE_OFFSET) + PHYS_OFFSET)
336 #define __kimg_to_phys(addr) ((addr) - kimage_voffset)
338 #define __virt_to_phys_nodebug(x) ({ \ argument
339 phys_addr_t __x = (phys_addr_t)(__tag_reset(x)); \
343 #define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x)) argument
346 extern phys_addr_t __virt_to_phys(unsigned long x);
347 extern phys_addr_t __phys_addr_symbol(unsigned long x);
349 #define __virt_to_phys(x) __virt_to_phys_nodebug(x) argument
350 #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) argument
353 #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) argument
354 #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) argument
365 * DMA support - see dma-mapping.h.
368 static inline phys_addr_t virt_to_phys(const volatile void *x) in virt_to_phys() argument
370 return __virt_to_phys((unsigned long)(x)); in virt_to_phys()
374 static inline void *phys_to_virt(phys_addr_t x) in phys_to_virt() argument
376 return (void *)(__phys_to_virt(x)); in phys_to_virt()
380 #include <asm-generic/memory_model.h>
390 #define __pa(x) __virt_to_phys((unsigned long)(x)) argument
391 #define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0)) argument
392 #define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x)) argument
393 #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) argument
395 #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) argument
398 * virt_to_page(x) convert a _valid_ virtual address to struct page *
399 * virt_addr_valid(x) indicates whether a virtual address is valid
404 #define page_to_virt(x) ({ \ argument
405 __typeof__(x) __page = x; \
409 #define virt_to_page(x) pfn_to_page(virt_to_pfn(x)) argument
411 #define page_to_virt(x) ({ \ argument
412 __typeof__(x) __page = x; \
413 u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\
418 #define virt_to_page(x) ({ \ argument
419 u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \