xref: /linux/arch/arc/include/asm/pgtable-levels.h (revision 34f7c6e7d4396090692a09789db231e12cb4762b)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com)
4  */
5 
6 /*
7  * Helpers for implemenintg paging levels
8  */
9 
10 #ifndef _ASM_ARC_PGTABLE_LEVELS_H
11 #define _ASM_ARC_PGTABLE_LEVELS_H
12 
13 #if CONFIG_PGTABLE_LEVELS == 2
14 
15 /*
16  * 2 level paging setup for software walked MMUv3 (ARC700) and MMUv4 (HS)
17  *
18  * [31]            32 bit virtual address              [0]
19  * -------------------------------------------------------
20  * |               | <---------- PGDIR_SHIFT ----------> |
21  * |               |                | <-- PAGE_SHIFT --> |
22  * -------------------------------------------------------
23  *       |                  |                |
24  *       |                  |                --> off in page frame
25  *       |                  ---> index into Page Table
26  *       ----> index into Page Directory
27  *
28  * Given software walk, the vaddr split is arbitrary set to 11:8:13
29  * However enabling of super page in a 2 level regime pegs PGDIR_SHIFT to
30  * super page size.
31  */
32 
33 #if defined(CONFIG_ARC_HUGEPAGE_16M)
34 #define PGDIR_SHIFT		24
35 #elif defined(CONFIG_ARC_HUGEPAGE_2M)
36 #define PGDIR_SHIFT		21
37 #else
38 /*
39  * No Super page case
40  * Default value provides 11:8:13 (8K), 10:10:12 (4K)
41  * Limits imposed by pgtable_t only PAGE_SIZE long
42  * (so 4K page can only have 1K entries: or 10 bits)
43  */
44 #ifdef CONFIG_ARC_PAGE_SIZE_4K
45 #define PGDIR_SHIFT		22
46 #else
47 #define PGDIR_SHIFT		21
48 #endif
49 
50 #endif
51 
52 #else /* CONFIG_PGTABLE_LEVELS != 2 */
53 
54 /*
55  * A default 3 level paging testing setup in software walked MMU
56  *   MMUv4 (8K page): <4> : <7> : <8> : <13>
57  * A default 4 level paging testing setup in software walked MMU
58  *   MMUv4 (8K page): <4> : <3> : <4> : <8> : <13>
59  */
60 #define PGDIR_SHIFT		28
61 #if CONFIG_PGTABLE_LEVELS > 3
62 #define PUD_SHIFT		25
63 #endif
64 #if CONFIG_PGTABLE_LEVELS > 2
65 #define PMD_SHIFT		21
66 #endif
67 
68 #endif /* CONFIG_PGTABLE_LEVELS */
69 
70 #define PGDIR_SIZE		BIT(PGDIR_SHIFT)
71 #define PGDIR_MASK		(~(PGDIR_SIZE - 1))
72 #define PTRS_PER_PGD		BIT(32 - PGDIR_SHIFT)
73 
74 #if CONFIG_PGTABLE_LEVELS > 3
75 #define PUD_SIZE		BIT(PUD_SHIFT)
76 #define PUD_MASK		(~(PUD_SIZE - 1))
77 #define PTRS_PER_PUD		BIT(PGDIR_SHIFT - PUD_SHIFT)
78 #endif
79 
80 #if CONFIG_PGTABLE_LEVELS > 2
81 #define PMD_SIZE		BIT(PMD_SHIFT)
82 #define PMD_MASK		(~(PMD_SIZE - 1))
83 #define PTRS_PER_PMD		BIT(PUD_SHIFT - PMD_SHIFT)
84 #endif
85 
86 #define PTRS_PER_PTE		BIT(PMD_SHIFT - PAGE_SHIFT)
87 
88 #ifndef __ASSEMBLY__
89 
90 #if CONFIG_PGTABLE_LEVELS > 3
91 #include <asm-generic/pgtable-nop4d.h>
92 #elif CONFIG_PGTABLE_LEVELS > 2
93 #include <asm-generic/pgtable-nopud.h>
94 #else
95 #include <asm-generic/pgtable-nopmd.h>
96 #endif
97 
98 /*
99  * 1st level paging: pgd
100  */
101 #define pgd_index(addr)		((addr) >> PGDIR_SHIFT)
102 #define pgd_offset(mm, addr)	(((mm)->pgd) + pgd_index(addr))
103 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
104 #define pgd_ERROR(e) \
105 	pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
106 
107 #if CONFIG_PGTABLE_LEVELS > 3
108 
109 /* In 4 level paging, p4d_* macros work on pgd */
110 #define p4d_none(x)		(!p4d_val(x))
111 #define p4d_bad(x)		((p4d_val(x) & ~PAGE_MASK))
112 #define p4d_present(x)		(p4d_val(x))
113 #define p4d_clear(xp)		do { p4d_val(*(xp)) = 0; } while (0)
114 #define p4d_pgtable(p4d)	((pud_t *)(p4d_val(p4d) & PAGE_MASK))
115 #define p4d_page(p4d)		virt_to_page(p4d_pgtable(p4d))
116 #define set_p4d(p4dp, p4d)	(*(p4dp) = p4d)
117 
118 /*
119  * 2nd level paging: pud
120  */
121 #define pud_ERROR(e) \
122 	pr_crit("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
123 
124 #endif
125 
126 #if CONFIG_PGTABLE_LEVELS > 2
127 
128 /*
129  * In 3 level paging, pud_* macros work on pgd
130  * In 4 level paging, pud_* macros work on pud
131  */
132 #define pud_none(x)		(!pud_val(x))
133 #define pud_bad(x)		((pud_val(x) & ~PAGE_MASK))
134 #define pud_present(x)		(pud_val(x))
135 #define pud_clear(xp)		do { pud_val(*(xp)) = 0; } while (0)
136 #define pud_pgtable(pud)	((pmd_t *)(pud_val(pud) & PAGE_MASK))
137 #define pud_page(pud)		virt_to_page(pud_pgtable(pud))
138 #define set_pud(pudp, pud)	(*(pudp) = pud)
139 
140 /*
141  * 3rd level paging: pmd
142  */
143 #define pmd_ERROR(e) \
144 	pr_crit("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
145 
146 #define pmd_pfn(pmd)		((pmd_val(pmd) & PMD_MASK) >> PAGE_SHIFT)
147 #define pfn_pmd(pfn,prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
148 #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
149 
150 #endif
151 
152 /*
153  * Due to the strange way generic pgtable level folding works, the pmd_* macros
154  *  - are valid even for 2 levels (which supposedly only has pgd - pte)
155  *  - behave differently for 2 vs. 3
156  * In 2  level paging        (pgd -> pte), pmd_* macros work on pgd
157  * In 3+ level paging (pgd -> pmd -> pte), pmd_* macros work on pmd
158  */
159 #define pmd_none(x)		(!pmd_val(x))
160 #define pmd_bad(x)		((pmd_val(x) & ~PAGE_MASK))
161 #define pmd_present(x)		(pmd_val(x))
162 #define pmd_clear(xp)		do { pmd_val(*(xp)) = 0; } while (0)
163 #define pmd_page_vaddr(pmd)	(pmd_val(pmd) & PAGE_MASK)
164 #define pmd_pfn(pmd)		((pmd_val(pmd) & PAGE_MASK) >> PAGE_SHIFT)
165 #define pmd_page(pmd)		virt_to_page(pmd_page_vaddr(pmd))
166 #define set_pmd(pmdp, pmd)	(*(pmdp) = pmd)
167 #define pmd_pgtable(pmd)	((pgtable_t) pmd_page_vaddr(pmd))
168 
169 /*
170  * 4th level paging: pte
171  */
172 #define pte_ERROR(e) \
173 	pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
174 
175 #define pte_none(x)		(!pte_val(x))
176 #define pte_present(x)		(pte_val(x) & _PAGE_PRESENT)
177 #define pte_clear(mm,addr,ptep)	set_pte_at(mm, addr, ptep, __pte(0))
178 #define pte_page(pte)		pfn_to_page(pte_pfn(pte))
179 #define set_pte(ptep, pte)	((*(ptep)) = (pte))
180 #define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)
181 #define pfn_pte(pfn, prot)	__pte(__pfn_to_phys(pfn) | pgprot_val(prot))
182 #define mk_pte(page, prot)	pfn_pte(page_to_pfn(page), prot)
183 
184 #ifdef CONFIG_ISA_ARCV2
185 #define pmd_leaf(x)		(pmd_val(x) & _PAGE_HW_SZ)
186 #endif
187 
188 #endif	/* !__ASSEMBLY__ */
189 
190 #endif
191