xref: /linux/arch/x86/mm/ident_map.c (revision 2a6b6c9a226279b4f6668450ddb21ae655558087)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Helper routines for building identity mapping page tables. This is
4  * included by both the compressed kernel and the regular kernel.
5  */
6 
7 static void free_pte(struct x86_mapping_info *info, pmd_t *pmd)
8 {
9 	pte_t *pte = pte_offset_kernel(pmd, 0);
10 
11 	info->free_pgt_page(pte, info->context);
12 }
13 
14 static void free_pmd(struct x86_mapping_info *info, pud_t *pud)
15 {
16 	pmd_t *pmd = pmd_offset(pud, 0);
17 	int i;
18 
19 	for (i = 0; i < PTRS_PER_PMD; i++) {
20 		if (!pmd_present(pmd[i]))
21 			continue;
22 
23 		if (pmd_leaf(pmd[i]))
24 			continue;
25 
26 		free_pte(info, &pmd[i]);
27 	}
28 
29 	info->free_pgt_page(pmd, info->context);
30 }
31 
32 static void free_pud(struct x86_mapping_info *info, p4d_t *p4d)
33 {
34 	pud_t *pud = pud_offset(p4d, 0);
35 	int i;
36 
37 	for (i = 0; i < PTRS_PER_PUD; i++) {
38 		if (!pud_present(pud[i]))
39 			continue;
40 
41 		if (pud_leaf(pud[i]))
42 			continue;
43 
44 		free_pmd(info, &pud[i]);
45 	}
46 
47 	info->free_pgt_page(pud, info->context);
48 }
49 
50 static void free_p4d(struct x86_mapping_info *info, pgd_t *pgd)
51 {
52 	p4d_t *p4d = p4d_offset(pgd, 0);
53 	int i;
54 
55 	for (i = 0; i < PTRS_PER_P4D; i++) {
56 		if (!p4d_present(p4d[i]))
57 			continue;
58 
59 		free_pud(info, &p4d[i]);
60 	}
61 
62 	if (pgtable_l5_enabled())
63 		info->free_pgt_page(p4d, info->context);
64 }
65 
66 void kernel_ident_mapping_free(struct x86_mapping_info *info, pgd_t *pgd)
67 {
68 	int i;
69 
70 	for (i = 0; i < PTRS_PER_PGD; i++) {
71 		if (!pgd_present(pgd[i]))
72 			continue;
73 
74 		free_p4d(info, &pgd[i]);
75 	}
76 
77 	info->free_pgt_page(pgd, info->context);
78 }
79 
80 static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
81 			   unsigned long addr, unsigned long end)
82 {
83 	addr &= PMD_MASK;
84 	for (; addr < end; addr += PMD_SIZE) {
85 		pmd_t *pmd = pmd_page + pmd_index(addr);
86 
87 		if (pmd_present(*pmd))
88 			continue;
89 
90 		set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
91 	}
92 }
93 
94 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
95 			  unsigned long addr, unsigned long end)
96 {
97 	unsigned long next;
98 
99 	for (; addr < end; addr = next) {
100 		pud_t *pud = pud_page + pud_index(addr);
101 		pmd_t *pmd;
102 
103 		next = (addr & PUD_MASK) + PUD_SIZE;
104 		if (next > end)
105 			next = end;
106 
107 		if (info->direct_gbpages) {
108 			pud_t pudval;
109 
110 			if (pud_present(*pud))
111 				continue;
112 
113 			addr &= PUD_MASK;
114 			pudval = __pud((addr - info->offset) | info->page_flag);
115 			set_pud(pud, pudval);
116 			continue;
117 		}
118 
119 		if (pud_present(*pud)) {
120 			pmd = pmd_offset(pud, 0);
121 			ident_pmd_init(info, pmd, addr, next);
122 			continue;
123 		}
124 		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
125 		if (!pmd)
126 			return -ENOMEM;
127 		ident_pmd_init(info, pmd, addr, next);
128 		set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag));
129 	}
130 
131 	return 0;
132 }
133 
134 static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
135 			  unsigned long addr, unsigned long end)
136 {
137 	unsigned long next;
138 	int result;
139 
140 	for (; addr < end; addr = next) {
141 		p4d_t *p4d = p4d_page + p4d_index(addr);
142 		pud_t *pud;
143 
144 		next = (addr & P4D_MASK) + P4D_SIZE;
145 		if (next > end)
146 			next = end;
147 
148 		if (p4d_present(*p4d)) {
149 			pud = pud_offset(p4d, 0);
150 			result = ident_pud_init(info, pud, addr, next);
151 			if (result)
152 				return result;
153 
154 			continue;
155 		}
156 		pud = (pud_t *)info->alloc_pgt_page(info->context);
157 		if (!pud)
158 			return -ENOMEM;
159 
160 		result = ident_pud_init(info, pud, addr, next);
161 		if (result)
162 			return result;
163 
164 		set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
165 	}
166 
167 	return 0;
168 }
169 
170 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
171 			      unsigned long pstart, unsigned long pend)
172 {
173 	unsigned long addr = pstart + info->offset;
174 	unsigned long end = pend + info->offset;
175 	unsigned long next;
176 	int result;
177 
178 	/* Set the default pagetable flags if not supplied */
179 	if (!info->kernpg_flag)
180 		info->kernpg_flag = _KERNPG_TABLE;
181 
182 	/* Filter out unsupported __PAGE_KERNEL_* bits: */
183 	info->kernpg_flag &= __default_kernel_pte_mask;
184 
185 	for (; addr < end; addr = next) {
186 		pgd_t *pgd = pgd_page + pgd_index(addr);
187 		p4d_t *p4d;
188 
189 		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
190 		if (next > end)
191 			next = end;
192 
193 		if (pgd_present(*pgd)) {
194 			p4d = p4d_offset(pgd, 0);
195 			result = ident_p4d_init(info, p4d, addr, next);
196 			if (result)
197 				return result;
198 			continue;
199 		}
200 
201 		p4d = (p4d_t *)info->alloc_pgt_page(info->context);
202 		if (!p4d)
203 			return -ENOMEM;
204 		result = ident_p4d_init(info, p4d, addr, next);
205 		if (result)
206 			return result;
207 		if (pgtable_l5_enabled()) {
208 			set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
209 		} else {
210 			/*
211 			 * With p4d folded, pgd is equal to p4d.
212 			 * The pgd entry has to point to the pud page table in this case.
213 			 */
214 			pud_t *pud = pud_offset(p4d, 0);
215 			set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
216 		}
217 	}
218 
219 	return 0;
220 }
221