xref: /linux/arch/x86/mm/ident_map.c (revision 905e46acd3272d04566fec49afbd7ad9e2ed9ae3)
1 /*
2  * Helper routines for building identity mapping page tables. This is
3  * included by both the compressed kernel and the regular kernel.
4  */
5 
6 static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
7 			   unsigned long addr, unsigned long end)
8 {
9 	addr &= PMD_MASK;
10 	for (; addr < end; addr += PMD_SIZE) {
11 		pmd_t *pmd = pmd_page + pmd_index(addr);
12 
13 		if (pmd_present(*pmd))
14 			continue;
15 
16 		set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
17 	}
18 }
19 
20 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
21 			  unsigned long addr, unsigned long end)
22 {
23 	unsigned long next;
24 
25 	for (; addr < end; addr = next) {
26 		pud_t *pud = pud_page + pud_index(addr);
27 		pmd_t *pmd;
28 
29 		next = (addr & PUD_MASK) + PUD_SIZE;
30 		if (next > end)
31 			next = end;
32 
33 		if (info->direct_gbpages) {
34 			pud_t pudval;
35 
36 			if (pud_present(*pud))
37 				continue;
38 
39 			addr &= PUD_MASK;
40 			pudval = __pud((addr - info->offset) | info->page_flag);
41 			set_pud(pud, pudval);
42 			continue;
43 		}
44 
45 		if (pud_present(*pud)) {
46 			pmd = pmd_offset(pud, 0);
47 			ident_pmd_init(info, pmd, addr, next);
48 			continue;
49 		}
50 		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
51 		if (!pmd)
52 			return -ENOMEM;
53 		ident_pmd_init(info, pmd, addr, next);
54 		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
55 	}
56 
57 	return 0;
58 }
59 
60 static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
61 			  unsigned long addr, unsigned long end)
62 {
63 	unsigned long next;
64 
65 	for (; addr < end; addr = next) {
66 		p4d_t *p4d = p4d_page + p4d_index(addr);
67 		pud_t *pud;
68 
69 		next = (addr & P4D_MASK) + P4D_SIZE;
70 		if (next > end)
71 			next = end;
72 
73 		if (p4d_present(*p4d)) {
74 			pud = pud_offset(p4d, 0);
75 			ident_pud_init(info, pud, addr, next);
76 			continue;
77 		}
78 		pud = (pud_t *)info->alloc_pgt_page(info->context);
79 		if (!pud)
80 			return -ENOMEM;
81 		ident_pud_init(info, pud, addr, next);
82 		set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
83 	}
84 
85 	return 0;
86 }
87 
88 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
89 			      unsigned long pstart, unsigned long pend)
90 {
91 	unsigned long addr = pstart + info->offset;
92 	unsigned long end = pend + info->offset;
93 	unsigned long next;
94 	int result;
95 
96 	for (; addr < end; addr = next) {
97 		pgd_t *pgd = pgd_page + pgd_index(addr);
98 		p4d_t *p4d;
99 
100 		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
101 		if (next > end)
102 			next = end;
103 
104 		if (pgd_present(*pgd)) {
105 			p4d = p4d_offset(pgd, 0);
106 			result = ident_p4d_init(info, p4d, addr, next);
107 			if (result)
108 				return result;
109 			continue;
110 		}
111 
112 		p4d = (p4d_t *)info->alloc_pgt_page(info->context);
113 		if (!p4d)
114 			return -ENOMEM;
115 		result = ident_p4d_init(info, p4d, addr, next);
116 		if (result)
117 			return result;
118 		if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
119 			set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
120 		} else {
121 			/*
122 			 * With p4d folded, pgd is equal to p4d.
123 			 * The pgd entry has to point to the pud page table in this case.
124 			 */
125 			pud_t *pud = pud_offset(p4d, 0);
126 			set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
127 		}
128 	}
129 
130 	return 0;
131 }
132