xref: /linux/arch/arm64/include/asm/kernel-pgtable.h (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Kernel page table mapping
4  *
5  * Copyright (C) 2015 ARM Ltd.
6  */
7 
8 #ifndef __ASM_KERNEL_PGTABLE_H
9 #define __ASM_KERNEL_PGTABLE_H
10 
11 #include <asm/boot.h>
12 #include <asm/pgtable-hwdef.h>
13 #include <asm/sparsemem.h>
14 
15 /*
16  * The physical and virtual addresses of the start of the kernel image are
17  * equal modulo 2 MiB (per the arm64 booting.txt requirements). Hence we can
18  * use section mapping with 4K (section size = 2M) but not with 16K (section
19  * size = 32M) or 64K (section size = 512M).
20  */
21 #if defined(PMD_SIZE) && PMD_SIZE <= MIN_KIMG_ALIGN
22 #define SWAPPER_BLOCK_SHIFT	PMD_SHIFT
23 #define SWAPPER_SKIP_LEVEL	1
24 #else
25 #define SWAPPER_BLOCK_SHIFT	PAGE_SHIFT
26 #define SWAPPER_SKIP_LEVEL	0
27 #endif
28 #define SWAPPER_BLOCK_SIZE	(UL(1) << SWAPPER_BLOCK_SHIFT)
29 #define SWAPPER_TABLE_SHIFT	(SWAPPER_BLOCK_SHIFT + PAGE_SHIFT - 3)
30 
31 #define SWAPPER_PGTABLE_LEVELS		(CONFIG_PGTABLE_LEVELS - SWAPPER_SKIP_LEVEL)
32 #define INIT_IDMAP_PGTABLE_LEVELS	(IDMAP_LEVELS - SWAPPER_SKIP_LEVEL)
33 
34 #define IDMAP_VA_BITS		48
35 #define IDMAP_LEVELS		ARM64_HW_PGTABLE_LEVELS(IDMAP_VA_BITS)
36 #define IDMAP_ROOT_LEVEL	(4 - IDMAP_LEVELS)
37 
38 /*
39  * A relocatable kernel may execute from an address that differs from the one at
40  * which it was linked. In the worst case, its runtime placement may intersect
41  * with two adjacent PGDIR entries, which means that an additional page table
42  * may be needed at each subordinate level.
43  */
44 #define EXTRA_PAGE	__is_defined(CONFIG_RELOCATABLE)
45 
46 #define SPAN_NR_ENTRIES(vstart, vend, shift) \
47 	((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1)
48 
49 #define EARLY_ENTRIES(vstart, vend, shift, add) \
50 	(SPAN_NR_ENTRIES(vstart, vend, shift) + (add))
51 
52 #define EARLY_LEVEL(lvl, lvls, vstart, vend, add)	\
53 	(lvls > lvl ? EARLY_ENTRIES(vstart, vend, SWAPPER_BLOCK_SHIFT + lvl * (PAGE_SHIFT - 3), add) : 0)
54 
55 #define EARLY_PAGES(lvls, vstart, vend, add) (1 	/* PGDIR page */				\
56 	+ EARLY_LEVEL(3, (lvls), (vstart), (vend), add) /* each entry needs a next level page table */	\
57 	+ EARLY_LEVEL(2, (lvls), (vstart), (vend), add)	/* each entry needs a next level page table */	\
58 	+ EARLY_LEVEL(1, (lvls), (vstart), (vend), add))/* each entry needs a next level page table */
59 #define INIT_DIR_SIZE (PAGE_SIZE * (EARLY_PAGES(SWAPPER_PGTABLE_LEVELS, KIMAGE_VADDR, _end, EXTRA_PAGE) \
60 				    + EARLY_SEGMENT_EXTRA_PAGES))
61 
62 #define INIT_IDMAP_DIR_PAGES	(EARLY_PAGES(INIT_IDMAP_PGTABLE_LEVELS, KIMAGE_VADDR, _end, 1))
63 #define INIT_IDMAP_DIR_SIZE	((INIT_IDMAP_DIR_PAGES + EARLY_IDMAP_EXTRA_PAGES) * PAGE_SIZE)
64 
65 #define INIT_IDMAP_FDT_PAGES	(EARLY_PAGES(INIT_IDMAP_PGTABLE_LEVELS, 0UL, UL(MAX_FDT_SIZE), 1) - 1)
66 #define INIT_IDMAP_FDT_SIZE	((INIT_IDMAP_FDT_PAGES + EARLY_IDMAP_EXTRA_FDT_PAGES) * PAGE_SIZE)
67 
68 /* The number of segments in the kernel image (text, rodata, inittext, initdata, data+bss) */
69 #define KERNEL_SEGMENT_COUNT	5
70 
71 #if SWAPPER_BLOCK_SIZE > SEGMENT_ALIGN
72 #define EARLY_SEGMENT_EXTRA_PAGES (KERNEL_SEGMENT_COUNT + 1)
73 /*
74  * The initial ID map consists of the kernel image, mapped as two separate
75  * segments, and may appear misaligned wrt the swapper block size. This means
76  * we need 3 additional pages. The DT could straddle a swapper block boundary,
77  * so it may need 2.
78  */
79 #define EARLY_IDMAP_EXTRA_PAGES		3
80 #define EARLY_IDMAP_EXTRA_FDT_PAGES	2
81 #else
82 #define EARLY_SEGMENT_EXTRA_PAGES	0
83 #define EARLY_IDMAP_EXTRA_PAGES		0
84 #define EARLY_IDMAP_EXTRA_FDT_PAGES	0
85 #endif
86 
87 #endif	/* __ASM_KERNEL_PGTABLE_H */
88