1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Helper routines for building identity mapping page tables. This is
4 * included by both the compressed kernel and the regular kernel.
5 */
6
free_pte(struct x86_mapping_info * info,pmd_t * pmd)7 static void free_pte(struct x86_mapping_info *info, pmd_t *pmd)
8 {
9 pte_t *pte = pte_offset_kernel(pmd, 0);
10
11 info->free_pgt_page(pte, info->context);
12 }
13
free_pmd(struct x86_mapping_info * info,pud_t * pud)14 static void free_pmd(struct x86_mapping_info *info, pud_t *pud)
15 {
16 pmd_t *pmd = pmd_offset(pud, 0);
17 int i;
18
19 for (i = 0; i < PTRS_PER_PMD; i++) {
20 if (!pmd_present(pmd[i]))
21 continue;
22
23 if (pmd_leaf(pmd[i]))
24 continue;
25
26 free_pte(info, &pmd[i]);
27 }
28
29 info->free_pgt_page(pmd, info->context);
30 }
31
free_pud(struct x86_mapping_info * info,p4d_t * p4d)32 static void free_pud(struct x86_mapping_info *info, p4d_t *p4d)
33 {
34 pud_t *pud = pud_offset(p4d, 0);
35 int i;
36
37 for (i = 0; i < PTRS_PER_PUD; i++) {
38 if (!pud_present(pud[i]))
39 continue;
40
41 if (pud_leaf(pud[i]))
42 continue;
43
44 free_pmd(info, &pud[i]);
45 }
46
47 info->free_pgt_page(pud, info->context);
48 }
49
free_p4d(struct x86_mapping_info * info,pgd_t * pgd)50 static void free_p4d(struct x86_mapping_info *info, pgd_t *pgd)
51 {
52 p4d_t *p4d = p4d_offset(pgd, 0);
53 int i;
54
55 for (i = 0; i < PTRS_PER_P4D; i++) {
56 if (!p4d_present(p4d[i]))
57 continue;
58
59 free_pud(info, &p4d[i]);
60 }
61
62 if (pgtable_l5_enabled())
63 info->free_pgt_page(p4d, info->context);
64 }
65
kernel_ident_mapping_free(struct x86_mapping_info * info,pgd_t * pgd)66 void kernel_ident_mapping_free(struct x86_mapping_info *info, pgd_t *pgd)
67 {
68 int i;
69
70 for (i = 0; i < PTRS_PER_PGD; i++) {
71 if (!pgd_present(pgd[i]))
72 continue;
73
74 free_p4d(info, &pgd[i]);
75 }
76
77 info->free_pgt_page(pgd, info->context);
78 }
79
ident_pmd_init(struct x86_mapping_info * info,pmd_t * pmd_page,unsigned long addr,unsigned long end)80 static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
81 unsigned long addr, unsigned long end)
82 {
83 addr &= PMD_MASK;
84 for (; addr < end; addr += PMD_SIZE) {
85 pmd_t *pmd = pmd_page + pmd_index(addr);
86
87 if (pmd_present(*pmd))
88 continue;
89
90 set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
91 }
92 }
93
ident_pud_init(struct x86_mapping_info * info,pud_t * pud_page,unsigned long addr,unsigned long end)94 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
95 unsigned long addr, unsigned long end)
96 {
97 unsigned long next;
98
99 for (; addr < end; addr = next) {
100 pud_t *pud = pud_page + pud_index(addr);
101 pmd_t *pmd;
102 bool use_gbpage;
103
104 next = (addr & PUD_MASK) + PUD_SIZE;
105 if (next > end)
106 next = end;
107
108 /* if this is already a gbpage, this portion is already mapped */
109 if (pud_leaf(*pud))
110 continue;
111
112 /* Is using a gbpage allowed? */
113 use_gbpage = info->direct_gbpages;
114
115 /* Don't use gbpage if it maps more than the requested region. */
116 /* at the begining: */
117 use_gbpage &= ((addr & ~PUD_MASK) == 0);
118 /* ... or at the end: */
119 use_gbpage &= ((next & ~PUD_MASK) == 0);
120
121 /* Never overwrite existing mappings */
122 use_gbpage &= !pud_present(*pud);
123
124 if (use_gbpage) {
125 pud_t pudval;
126
127 pudval = __pud((addr - info->offset) | info->page_flag);
128 set_pud(pud, pudval);
129 continue;
130 }
131
132 if (pud_present(*pud)) {
133 pmd = pmd_offset(pud, 0);
134 ident_pmd_init(info, pmd, addr, next);
135 continue;
136 }
137 pmd = (pmd_t *)info->alloc_pgt_page(info->context);
138 if (!pmd)
139 return -ENOMEM;
140 ident_pmd_init(info, pmd, addr, next);
141 set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag));
142 }
143
144 return 0;
145 }
146
ident_p4d_init(struct x86_mapping_info * info,p4d_t * p4d_page,unsigned long addr,unsigned long end)147 static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
148 unsigned long addr, unsigned long end)
149 {
150 unsigned long next;
151 int result;
152
153 for (; addr < end; addr = next) {
154 p4d_t *p4d = p4d_page + p4d_index(addr);
155 pud_t *pud;
156
157 next = (addr & P4D_MASK) + P4D_SIZE;
158 if (next > end)
159 next = end;
160
161 if (p4d_present(*p4d)) {
162 pud = pud_offset(p4d, 0);
163 result = ident_pud_init(info, pud, addr, next);
164 if (result)
165 return result;
166
167 continue;
168 }
169 pud = (pud_t *)info->alloc_pgt_page(info->context);
170 if (!pud)
171 return -ENOMEM;
172
173 result = ident_pud_init(info, pud, addr, next);
174 if (result)
175 return result;
176
177 set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
178 }
179
180 return 0;
181 }
182
kernel_ident_mapping_init(struct x86_mapping_info * info,pgd_t * pgd_page,unsigned long pstart,unsigned long pend)183 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
184 unsigned long pstart, unsigned long pend)
185 {
186 unsigned long addr = pstart + info->offset;
187 unsigned long end = pend + info->offset;
188 unsigned long next;
189 int result;
190
191 /* Set the default pagetable flags if not supplied */
192 if (!info->kernpg_flag)
193 info->kernpg_flag = _KERNPG_TABLE;
194
195 /* Filter out unsupported __PAGE_KERNEL_* bits: */
196 info->kernpg_flag &= __default_kernel_pte_mask;
197
198 for (; addr < end; addr = next) {
199 pgd_t *pgd = pgd_page + pgd_index(addr);
200 p4d_t *p4d;
201
202 next = (addr & PGDIR_MASK) + PGDIR_SIZE;
203 if (next > end)
204 next = end;
205
206 if (pgd_present(*pgd)) {
207 p4d = p4d_offset(pgd, 0);
208 result = ident_p4d_init(info, p4d, addr, next);
209 if (result)
210 return result;
211 continue;
212 }
213
214 p4d = (p4d_t *)info->alloc_pgt_page(info->context);
215 if (!p4d)
216 return -ENOMEM;
217 result = ident_p4d_init(info, p4d, addr, next);
218 if (result)
219 return result;
220 if (pgtable_l5_enabled()) {
221 set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag | _PAGE_NOPTISHADOW));
222 } else {
223 /*
224 * With p4d folded, pgd is equal to p4d.
225 * The pgd entry has to point to the pud page table in this case.
226 */
227 pud_t *pud = pud_offset(p4d, 0);
228 set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
229 }
230 }
231
232 return 0;
233 }
234