xref: /linux/arch/x86/mm/ident_map.c (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2cb18ef0dSKees Cook /*
3cb18ef0dSKees Cook  * Helper routines for building identity mapping page tables. This is
4cb18ef0dSKees Cook  * included by both the compressed kernel and the regular kernel.
5cb18ef0dSKees Cook  */
6cf4fb15bSYinghai Lu 
7*d88e7b3eSKirill A. Shutemov static void free_pte(struct x86_mapping_info *info, pmd_t *pmd)
8*d88e7b3eSKirill A. Shutemov {
9*d88e7b3eSKirill A. Shutemov 	pte_t *pte = pte_offset_kernel(pmd, 0);
10*d88e7b3eSKirill A. Shutemov 
11*d88e7b3eSKirill A. Shutemov 	info->free_pgt_page(pte, info->context);
12*d88e7b3eSKirill A. Shutemov }
13*d88e7b3eSKirill A. Shutemov 
14*d88e7b3eSKirill A. Shutemov static void free_pmd(struct x86_mapping_info *info, pud_t *pud)
15*d88e7b3eSKirill A. Shutemov {
16*d88e7b3eSKirill A. Shutemov 	pmd_t *pmd = pmd_offset(pud, 0);
17*d88e7b3eSKirill A. Shutemov 	int i;
18*d88e7b3eSKirill A. Shutemov 
19*d88e7b3eSKirill A. Shutemov 	for (i = 0; i < PTRS_PER_PMD; i++) {
20*d88e7b3eSKirill A. Shutemov 		if (!pmd_present(pmd[i]))
21*d88e7b3eSKirill A. Shutemov 			continue;
22*d88e7b3eSKirill A. Shutemov 
23*d88e7b3eSKirill A. Shutemov 		if (pmd_leaf(pmd[i]))
24*d88e7b3eSKirill A. Shutemov 			continue;
25*d88e7b3eSKirill A. Shutemov 
26*d88e7b3eSKirill A. Shutemov 		free_pte(info, &pmd[i]);
27*d88e7b3eSKirill A. Shutemov 	}
28*d88e7b3eSKirill A. Shutemov 
29*d88e7b3eSKirill A. Shutemov 	info->free_pgt_page(pmd, info->context);
30*d88e7b3eSKirill A. Shutemov }
31*d88e7b3eSKirill A. Shutemov 
32*d88e7b3eSKirill A. Shutemov static void free_pud(struct x86_mapping_info *info, p4d_t *p4d)
33*d88e7b3eSKirill A. Shutemov {
34*d88e7b3eSKirill A. Shutemov 	pud_t *pud = pud_offset(p4d, 0);
35*d88e7b3eSKirill A. Shutemov 	int i;
36*d88e7b3eSKirill A. Shutemov 
37*d88e7b3eSKirill A. Shutemov 	for (i = 0; i < PTRS_PER_PUD; i++) {
38*d88e7b3eSKirill A. Shutemov 		if (!pud_present(pud[i]))
39*d88e7b3eSKirill A. Shutemov 			continue;
40*d88e7b3eSKirill A. Shutemov 
41*d88e7b3eSKirill A. Shutemov 		if (pud_leaf(pud[i]))
42*d88e7b3eSKirill A. Shutemov 			continue;
43*d88e7b3eSKirill A. Shutemov 
44*d88e7b3eSKirill A. Shutemov 		free_pmd(info, &pud[i]);
45*d88e7b3eSKirill A. Shutemov 	}
46*d88e7b3eSKirill A. Shutemov 
47*d88e7b3eSKirill A. Shutemov 	info->free_pgt_page(pud, info->context);
48*d88e7b3eSKirill A. Shutemov }
49*d88e7b3eSKirill A. Shutemov 
50*d88e7b3eSKirill A. Shutemov static void free_p4d(struct x86_mapping_info *info, pgd_t *pgd)
51*d88e7b3eSKirill A. Shutemov {
52*d88e7b3eSKirill A. Shutemov 	p4d_t *p4d = p4d_offset(pgd, 0);
53*d88e7b3eSKirill A. Shutemov 	int i;
54*d88e7b3eSKirill A. Shutemov 
55*d88e7b3eSKirill A. Shutemov 	for (i = 0; i < PTRS_PER_P4D; i++) {
56*d88e7b3eSKirill A. Shutemov 		if (!p4d_present(p4d[i]))
57*d88e7b3eSKirill A. Shutemov 			continue;
58*d88e7b3eSKirill A. Shutemov 
59*d88e7b3eSKirill A. Shutemov 		free_pud(info, &p4d[i]);
60*d88e7b3eSKirill A. Shutemov 	}
61*d88e7b3eSKirill A. Shutemov 
62*d88e7b3eSKirill A. Shutemov 	if (pgtable_l5_enabled())
63*d88e7b3eSKirill A. Shutemov 		info->free_pgt_page(p4d, info->context);
64*d88e7b3eSKirill A. Shutemov }
65*d88e7b3eSKirill A. Shutemov 
66*d88e7b3eSKirill A. Shutemov void kernel_ident_mapping_free(struct x86_mapping_info *info, pgd_t *pgd)
67*d88e7b3eSKirill A. Shutemov {
68*d88e7b3eSKirill A. Shutemov 	int i;
69*d88e7b3eSKirill A. Shutemov 
70*d88e7b3eSKirill A. Shutemov 	for (i = 0; i < PTRS_PER_PGD; i++) {
71*d88e7b3eSKirill A. Shutemov 		if (!pgd_present(pgd[i]))
72*d88e7b3eSKirill A. Shutemov 			continue;
73*d88e7b3eSKirill A. Shutemov 
74*d88e7b3eSKirill A. Shutemov 		free_p4d(info, &pgd[i]);
75*d88e7b3eSKirill A. Shutemov 	}
76*d88e7b3eSKirill A. Shutemov 
77*d88e7b3eSKirill A. Shutemov 	info->free_pgt_page(pgd, info->context);
78*d88e7b3eSKirill A. Shutemov }
79*d88e7b3eSKirill A. Shutemov 
80e4630fddSRafael J. Wysocki static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
81cf4fb15bSYinghai Lu 			   unsigned long addr, unsigned long end)
82cf4fb15bSYinghai Lu {
83cf4fb15bSYinghai Lu 	addr &= PMD_MASK;
84cf4fb15bSYinghai Lu 	for (; addr < end; addr += PMD_SIZE) {
85cf4fb15bSYinghai Lu 		pmd_t *pmd = pmd_page + pmd_index(addr);
86cf4fb15bSYinghai Lu 
87e4630fddSRafael J. Wysocki 		if (pmd_present(*pmd))
88e4630fddSRafael J. Wysocki 			continue;
89e4630fddSRafael J. Wysocki 
9066aad4fdSXunlei Pang 		set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
91cf4fb15bSYinghai Lu 	}
92cf4fb15bSYinghai Lu }
93cb18ef0dSKees Cook 
94cf4fb15bSYinghai Lu static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
95cf4fb15bSYinghai Lu 			  unsigned long addr, unsigned long end)
96cf4fb15bSYinghai Lu {
97cf4fb15bSYinghai Lu 	unsigned long next;
98cf4fb15bSYinghai Lu 
99cf4fb15bSYinghai Lu 	for (; addr < end; addr = next) {
100cf4fb15bSYinghai Lu 		pud_t *pud = pud_page + pud_index(addr);
101cf4fb15bSYinghai Lu 		pmd_t *pmd;
102cf4fb15bSYinghai Lu 
103cf4fb15bSYinghai Lu 		next = (addr & PUD_MASK) + PUD_SIZE;
104cf4fb15bSYinghai Lu 		if (next > end)
105cf4fb15bSYinghai Lu 			next = end;
106cf4fb15bSYinghai Lu 
107c567f294SIngo Molnar 		if (info->direct_gbpages) {
108d794734cSSteve Wahl 			pud_t pudval;
109d794734cSSteve Wahl 
110c567f294SIngo Molnar 			if (pud_present(*pud))
111c567f294SIngo Molnar 				continue;
112c567f294SIngo Molnar 
113c567f294SIngo Molnar 			addr &= PUD_MASK;
11466aad4fdSXunlei Pang 			pudval = __pud((addr - info->offset) | info->page_flag);
11566aad4fdSXunlei Pang 			set_pud(pud, pudval);
11666aad4fdSXunlei Pang 			continue;
11766aad4fdSXunlei Pang 		}
11866aad4fdSXunlei Pang 
119cf4fb15bSYinghai Lu 		if (pud_present(*pud)) {
120cf4fb15bSYinghai Lu 			pmd = pmd_offset(pud, 0);
121e4630fddSRafael J. Wysocki 			ident_pmd_init(info, pmd, addr, next);
122cf4fb15bSYinghai Lu 			continue;
123cf4fb15bSYinghai Lu 		}
124cf4fb15bSYinghai Lu 		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
125cf4fb15bSYinghai Lu 		if (!pmd)
126cf4fb15bSYinghai Lu 			return -ENOMEM;
127e4630fddSRafael J. Wysocki 		ident_pmd_init(info, pmd, addr, next);
128bba4ed01STom Lendacky 		set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag));
129cf4fb15bSYinghai Lu 	}
130cf4fb15bSYinghai Lu 
131cf4fb15bSYinghai Lu 	return 0;
132cf4fb15bSYinghai Lu }
133cf4fb15bSYinghai Lu 
134ea3b5e60SKirill A. Shutemov static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
135ea3b5e60SKirill A. Shutemov 			  unsigned long addr, unsigned long end)
136ea3b5e60SKirill A. Shutemov {
137ea3b5e60SKirill A. Shutemov 	unsigned long next;
1381fcd0091SArvind Sankar 	int result;
139ea3b5e60SKirill A. Shutemov 
140ea3b5e60SKirill A. Shutemov 	for (; addr < end; addr = next) {
141ea3b5e60SKirill A. Shutemov 		p4d_t *p4d = p4d_page + p4d_index(addr);
142ea3b5e60SKirill A. Shutemov 		pud_t *pud;
143ea3b5e60SKirill A. Shutemov 
144ea3b5e60SKirill A. Shutemov 		next = (addr & P4D_MASK) + P4D_SIZE;
145ea3b5e60SKirill A. Shutemov 		if (next > end)
146ea3b5e60SKirill A. Shutemov 			next = end;
147ea3b5e60SKirill A. Shutemov 
148ea3b5e60SKirill A. Shutemov 		if (p4d_present(*p4d)) {
149ea3b5e60SKirill A. Shutemov 			pud = pud_offset(p4d, 0);
1501fcd0091SArvind Sankar 			result = ident_pud_init(info, pud, addr, next);
1511fcd0091SArvind Sankar 			if (result)
1521fcd0091SArvind Sankar 				return result;
1531fcd0091SArvind Sankar 
154ea3b5e60SKirill A. Shutemov 			continue;
155ea3b5e60SKirill A. Shutemov 		}
156ea3b5e60SKirill A. Shutemov 		pud = (pud_t *)info->alloc_pgt_page(info->context);
157ea3b5e60SKirill A. Shutemov 		if (!pud)
158ea3b5e60SKirill A. Shutemov 			return -ENOMEM;
1591fcd0091SArvind Sankar 
1601fcd0091SArvind Sankar 		result = ident_pud_init(info, pud, addr, next);
1611fcd0091SArvind Sankar 		if (result)
1621fcd0091SArvind Sankar 			return result;
1631fcd0091SArvind Sankar 
164bba4ed01STom Lendacky 		set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
165ea3b5e60SKirill A. Shutemov 	}
166ea3b5e60SKirill A. Shutemov 
167ea3b5e60SKirill A. Shutemov 	return 0;
168ea3b5e60SKirill A. Shutemov }
169ea3b5e60SKirill A. Shutemov 
170cf4fb15bSYinghai Lu int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
171e4630fddSRafael J. Wysocki 			      unsigned long pstart, unsigned long pend)
172cf4fb15bSYinghai Lu {
173e4630fddSRafael J. Wysocki 	unsigned long addr = pstart + info->offset;
174e4630fddSRafael J. Wysocki 	unsigned long end = pend + info->offset;
175cf4fb15bSYinghai Lu 	unsigned long next;
176cf4fb15bSYinghai Lu 	int result;
177cf4fb15bSYinghai Lu 
178bba4ed01STom Lendacky 	/* Set the default pagetable flags if not supplied */
179bba4ed01STom Lendacky 	if (!info->kernpg_flag)
180bba4ed01STom Lendacky 		info->kernpg_flag = _KERNPG_TABLE;
181bba4ed01STom Lendacky 
182fb43d6cbSDave Hansen 	/* Filter out unsupported __PAGE_KERNEL_* bits: */
183fb43d6cbSDave Hansen 	info->kernpg_flag &= __default_kernel_pte_mask;
184fb43d6cbSDave Hansen 
185cf4fb15bSYinghai Lu 	for (; addr < end; addr = next) {
186e4630fddSRafael J. Wysocki 		pgd_t *pgd = pgd_page + pgd_index(addr);
187ea3b5e60SKirill A. Shutemov 		p4d_t *p4d;
188cf4fb15bSYinghai Lu 
189cf4fb15bSYinghai Lu 		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
190cf4fb15bSYinghai Lu 		if (next > end)
191cf4fb15bSYinghai Lu 			next = end;
192cf4fb15bSYinghai Lu 
193cf4fb15bSYinghai Lu 		if (pgd_present(*pgd)) {
194ea3b5e60SKirill A. Shutemov 			p4d = p4d_offset(pgd, 0);
195ea3b5e60SKirill A. Shutemov 			result = ident_p4d_init(info, p4d, addr, next);
196cf4fb15bSYinghai Lu 			if (result)
197cf4fb15bSYinghai Lu 				return result;
198cf4fb15bSYinghai Lu 			continue;
199cf4fb15bSYinghai Lu 		}
200cf4fb15bSYinghai Lu 
201ea3b5e60SKirill A. Shutemov 		p4d = (p4d_t *)info->alloc_pgt_page(info->context);
202ea3b5e60SKirill A. Shutemov 		if (!p4d)
203cf4fb15bSYinghai Lu 			return -ENOMEM;
204ea3b5e60SKirill A. Shutemov 		result = ident_p4d_init(info, p4d, addr, next);
205cf4fb15bSYinghai Lu 		if (result)
206cf4fb15bSYinghai Lu 			return result;
207ed7588d5SKirill A. Shutemov 		if (pgtable_l5_enabled()) {
208bba4ed01STom Lendacky 			set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
209ea3b5e60SKirill A. Shutemov 		} else {
210ea3b5e60SKirill A. Shutemov 			/*
211ea3b5e60SKirill A. Shutemov 			 * With p4d folded, pgd is equal to p4d.
212ea3b5e60SKirill A. Shutemov 			 * The pgd entry has to point to the pud page table in this case.
213ea3b5e60SKirill A. Shutemov 			 */
214ea3b5e60SKirill A. Shutemov 			pud_t *pud = pud_offset(p4d, 0);
215bba4ed01STom Lendacky 			set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
216cf4fb15bSYinghai Lu 		}
217ea3b5e60SKirill A. Shutemov 	}
218cf4fb15bSYinghai Lu 
219cf4fb15bSYinghai Lu 	return 0;
220cf4fb15bSYinghai Lu }
221