xref: /linux/arch/arm/mm/pgd.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/mm/pgd.c
4  *
5  *  Copyright (C) 1998-2005 Russell King
6  */
7 #include <linux/mm.h>
8 #include <linux/gfp.h>
9 #include <linux/highmem.h>
10 #include <linux/slab.h>
11 
12 #include <asm/cp15.h>
13 #include <asm/pgalloc.h>
14 #include <asm/page.h>
15 #include <asm/tlbflush.h>
16 
17 #include "mm.h"
18 
19 #ifdef CONFIG_ARM_LPAE
20 #define __pgd_alloc()	kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL)
21 #define __pgd_free(pgd)	kfree(pgd)
22 #else
23 #define __pgd_alloc()	(pgd_t *)__get_free_pages(GFP_KERNEL, 2)
24 #define __pgd_free(pgd)	free_pages((unsigned long)pgd, 2)
25 #endif
26 
27 /*
28  * need to get a 16k page for level 1
29  */
30 pgd_t *pgd_alloc(struct mm_struct *mm)
31 {
32 	pgd_t *new_pgd, *init_pgd;
33 	p4d_t *new_p4d, *init_p4d;
34 	pud_t *new_pud, *init_pud;
35 	pmd_t *new_pmd, *init_pmd;
36 	pte_t *new_pte, *init_pte;
37 
38 	new_pgd = __pgd_alloc();
39 	if (!new_pgd)
40 		goto no_pgd;
41 
42 	memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
43 
44 	/*
45 	 * Copy over the kernel and IO PGD entries
46 	 */
47 	init_pgd = pgd_offset_k(0);
48 	memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
49 		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
50 
51 	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
52 
53 #ifdef CONFIG_ARM_LPAE
54 	/*
55 	 * Allocate PMD table for modules and pkmap mappings.
56 	 */
57 	new_p4d = p4d_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
58 			    MODULES_VADDR);
59 	if (!new_p4d)
60 		goto no_p4d;
61 
62 	new_pud = pud_alloc(mm, new_p4d, MODULES_VADDR);
63 	if (!new_pud)
64 		goto no_pud;
65 
66 	new_pmd = pmd_alloc(mm, new_pud, 0);
67 	if (!new_pmd)
68 		goto no_pmd;
69 #ifdef CONFIG_KASAN
70 	/*
71 	 * Copy PMD table for KASAN shadow mappings.
72 	 */
73 	init_pgd = pgd_offset_k(TASK_SIZE);
74 	init_p4d = p4d_offset(init_pgd, TASK_SIZE);
75 	init_pud = pud_offset(init_p4d, TASK_SIZE);
76 	init_pmd = pmd_offset(init_pud, TASK_SIZE);
77 	new_pmd = pmd_offset(new_pud, TASK_SIZE);
78 	memcpy(new_pmd, init_pmd,
79 	       (pmd_index(MODULES_VADDR) - pmd_index(TASK_SIZE))
80 	       * sizeof(pmd_t));
81 	clean_dcache_area(new_pmd, PTRS_PER_PMD * sizeof(pmd_t));
82 #endif /* CONFIG_KASAN */
83 #endif /* CONFIG_LPAE */
84 
85 	if (!vectors_high()) {
86 		/*
87 		 * On ARM, first page must always be allocated since it
88 		 * contains the machine vectors. The vectors are always high
89 		 * with LPAE.
90 		 */
91 		new_p4d = p4d_alloc(mm, new_pgd, 0);
92 		if (!new_p4d)
93 			goto no_p4d;
94 
95 		new_pud = pud_alloc(mm, new_p4d, 0);
96 		if (!new_pud)
97 			goto no_pud;
98 
99 		new_pmd = pmd_alloc(mm, new_pud, 0);
100 		if (!new_pmd)
101 			goto no_pmd;
102 
103 		new_pte = pte_alloc_map(mm, new_pmd, 0);
104 		if (!new_pte)
105 			goto no_pte;
106 
107 #ifndef CONFIG_ARM_LPAE
108 		/*
109 		 * Modify the PTE pointer to have the correct domain.  This
110 		 * needs to be the vectors domain to avoid the low vectors
111 		 * being unmapped.
112 		 */
113 		pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
114 		pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
115 #endif
116 
117 		init_p4d = p4d_offset(init_pgd, 0);
118 		init_pud = pud_offset(init_p4d, 0);
119 		init_pmd = pmd_offset(init_pud, 0);
120 		init_pte = pte_offset_map(init_pmd, 0);
121 		set_pte_ext(new_pte + 0, init_pte[0], 0);
122 		set_pte_ext(new_pte + 1, init_pte[1], 0);
123 		pte_unmap(init_pte);
124 		pte_unmap(new_pte);
125 	}
126 
127 	return new_pgd;
128 
129 no_pte:
130 	pmd_free(mm, new_pmd);
131 	mm_dec_nr_pmds(mm);
132 no_pmd:
133 	pud_free(mm, new_pud);
134 no_pud:
135 	p4d_free(mm, new_p4d);
136 no_p4d:
137 	__pgd_free(new_pgd);
138 no_pgd:
139 	return NULL;
140 }
141 
142 void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
143 {
144 	pgd_t *pgd;
145 	p4d_t *p4d;
146 	pud_t *pud;
147 	pmd_t *pmd;
148 	pgtable_t pte;
149 
150 	if (!pgd_base)
151 		return;
152 
153 	pgd = pgd_base + pgd_index(0);
154 	if (pgd_none_or_clear_bad(pgd))
155 		goto no_pgd;
156 
157 	p4d = p4d_offset(pgd, 0);
158 	if (p4d_none_or_clear_bad(p4d))
159 		goto no_p4d;
160 
161 	pud = pud_offset(p4d, 0);
162 	if (pud_none_or_clear_bad(pud))
163 		goto no_pud;
164 
165 	pmd = pmd_offset(pud, 0);
166 	if (pmd_none_or_clear_bad(pmd))
167 		goto no_pmd;
168 
169 	pte = pmd_pgtable(*pmd);
170 	pmd_clear(pmd);
171 	pte_free(mm, pte);
172 	mm_dec_nr_ptes(mm);
173 no_pmd:
174 	pud_clear(pud);
175 	pmd_free(mm, pmd);
176 	mm_dec_nr_pmds(mm);
177 no_pud:
178 	p4d_clear(p4d);
179 	pud_free(mm, pud);
180 no_p4d:
181 	pgd_clear(pgd);
182 	p4d_free(mm, p4d);
183 no_pgd:
184 #ifdef CONFIG_ARM_LPAE
185 	/*
186 	 * Free modules/pkmap or identity pmd tables.
187 	 */
188 	for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
189 		if (pgd_none_or_clear_bad(pgd))
190 			continue;
191 		if (pgd_val(*pgd) & L_PGD_SWAPPER)
192 			continue;
193 		p4d = p4d_offset(pgd, 0);
194 		if (p4d_none_or_clear_bad(p4d))
195 			continue;
196 		pud = pud_offset(p4d, 0);
197 		if (pud_none_or_clear_bad(pud))
198 			continue;
199 		pmd = pmd_offset(pud, 0);
200 		pud_clear(pud);
201 		pmd_free(mm, pmd);
202 		mm_dec_nr_pmds(mm);
203 		p4d_clear(p4d);
204 		pud_free(mm, pud);
205 		mm_dec_nr_puds(mm);
206 		pgd_clear(pgd);
207 		p4d_free(mm, p4d);
208 	}
209 #endif
210 	__pgd_free(pgd_base);
211 }
212