1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 */
5
6 #include <linux/stddef.h>
7 #include <linux/module.h>
8 #include <linux/memblock.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <asm/sections.h>
14 #include <asm/page.h>
15 #include <asm/pgalloc.h>
16 #include <as-layout.h>
17 #include <init.h>
18 #include <kern.h>
19 #include <kern_util.h>
20 #include <mem_user.h>
21 #include <os.h>
22 #include <um_malloc.h>
23 #include <linux/sched/task.h>
24 #include <linux/kasan.h>
25
26 #ifdef CONFIG_KASAN
kasan_init(void)27 void __init kasan_init(void)
28 {
29 /*
30 * kasan_map_memory will map all of the required address space and
31 * the host machine will allocate physical memory as necessary.
32 */
33 kasan_map_memory((void *)KASAN_SHADOW_START, KASAN_SHADOW_SIZE);
34 init_task.kasan_depth = 0;
35 /*
36 * Since kasan_init() is called before main(),
37 * KASAN is initialized but the enablement is deferred after
38 * jump_label_init(). See arch_mm_preinit().
39 */
40 }
41
42 static void (*kasan_init_ptr)(void)
43 __section(".kasan_init") __used
44 = kasan_init;
45 #endif
46
47 /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
48 unsigned long *empty_zero_page = NULL;
49 EXPORT_SYMBOL(empty_zero_page);
50
51 /*
52 * Initialized during boot, and readonly for initializing page tables
53 * afterwards
54 */
55 pgd_t swapper_pg_dir[PTRS_PER_PGD];
56
57 /* Initialized at boot time, and readonly after that */
58 int kmalloc_ok = 0;
59
60 /* Used during early boot */
61 static unsigned long brk_end;
62
arch_mm_preinit(void)63 void __init arch_mm_preinit(void)
64 {
65 /* Safe to call after jump_label_init(). Enables KASAN. */
66 kasan_init_generic();
67
68 /* clear the zero-page */
69 memset(empty_zero_page, 0, PAGE_SIZE);
70
71 /* Map in the area just after the brk now that kmalloc is about
72 * to be turned on.
73 */
74 brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
75 map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
76 memblock_free((void *)brk_end, uml_reserved - brk_end);
77 uml_reserved = brk_end;
78 min_low_pfn = PFN_UP(__pa(uml_reserved));
79 max_pfn = max_low_pfn;
80 }
81
mem_init(void)82 void __init mem_init(void)
83 {
84 kmalloc_ok = 1;
85 }
86
87 #if IS_ENABLED(CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA)
88 /*
89 * Create a page table and place a pointer to it in a middle page
90 * directory entry.
91 */
one_page_table_init(pmd_t * pmd)92 static void __init one_page_table_init(pmd_t *pmd)
93 {
94 if (pmd_none(*pmd)) {
95 pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
96 PAGE_SIZE);
97 if (!pte)
98 panic("%s: Failed to allocate %lu bytes align=%lx\n",
99 __func__, PAGE_SIZE, PAGE_SIZE);
100
101 set_pmd(pmd, __pmd(_KERNPG_TABLE +
102 (unsigned long) __pa(pte)));
103 BUG_ON(pte != pte_offset_kernel(pmd, 0));
104 }
105 }
106
one_md_table_init(pud_t * pud)107 static void __init one_md_table_init(pud_t *pud)
108 {
109 #if CONFIG_PGTABLE_LEVELS > 2
110 pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
111 if (!pmd_table)
112 panic("%s: Failed to allocate %lu bytes align=%lx\n",
113 __func__, PAGE_SIZE, PAGE_SIZE);
114
115 set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
116 BUG_ON(pmd_table != pmd_offset(pud, 0));
117 #endif
118 }
119
one_ud_table_init(p4d_t * p4d)120 static void __init one_ud_table_init(p4d_t *p4d)
121 {
122 #if CONFIG_PGTABLE_LEVELS > 3
123 pud_t *pud_table = (pud_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
124 if (!pud_table)
125 panic("%s: Failed to allocate %lu bytes align=%lx\n",
126 __func__, PAGE_SIZE, PAGE_SIZE);
127
128 set_p4d(p4d, __p4d(_KERNPG_TABLE + (unsigned long) __pa(pud_table)));
129 BUG_ON(pud_table != pud_offset(p4d, 0));
130 #endif
131 }
132
fixrange_init(unsigned long start,unsigned long end,pgd_t * pgd_base)133 static void __init fixrange_init(unsigned long start, unsigned long end,
134 pgd_t *pgd_base)
135 {
136 pgd_t *pgd;
137 p4d_t *p4d;
138 pud_t *pud;
139 pmd_t *pmd;
140 int i, j;
141 unsigned long vaddr;
142
143 vaddr = start;
144 i = pgd_index(vaddr);
145 j = pmd_index(vaddr);
146 pgd = pgd_base + i;
147
148 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
149 p4d = p4d_offset(pgd, vaddr);
150 if (p4d_none(*p4d))
151 one_ud_table_init(p4d);
152 pud = pud_offset(p4d, vaddr);
153 if (pud_none(*pud))
154 one_md_table_init(pud);
155 pmd = pmd_offset(pud, vaddr);
156 for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
157 one_page_table_init(pmd);
158 vaddr += PMD_SIZE;
159 }
160 j = 0;
161 }
162 }
163
fixaddr_user_init(void)164 static void __init fixaddr_user_init( void)
165 {
166 long size = FIXADDR_USER_END - FIXADDR_USER_START;
167 pte_t *pte;
168 phys_t p;
169 unsigned long v, vaddr = FIXADDR_USER_START;
170
171 if (!size)
172 return;
173
174 fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
175 v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
176 if (!v)
177 panic("%s: Failed to allocate %lu bytes align=%lx\n",
178 __func__, size, PAGE_SIZE);
179
180 memcpy((void *) v , (void *) FIXADDR_USER_START, size);
181 p = __pa(v);
182 for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
183 p += PAGE_SIZE) {
184 pte = virt_to_kpte(vaddr);
185 pte_set_val(*pte, p, PAGE_READONLY);
186 }
187 }
188 #endif
189
paging_init(void)190 void __init paging_init(void)
191 {
192 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
193
194 empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
195 PAGE_SIZE);
196 if (!empty_zero_page)
197 panic("%s: Failed to allocate %lu bytes align=%lx\n",
198 __func__, PAGE_SIZE, PAGE_SIZE);
199
200 max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
201 free_area_init(max_zone_pfn);
202
203 #if IS_ENABLED(CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA)
204 fixaddr_user_init();
205 #endif
206 }
207
208 /*
209 * This can't do anything because nothing in the kernel image can be freed
210 * since it's not in kernel physical memory.
211 */
212
free_initmem(void)213 void free_initmem(void)
214 {
215 }
216
217 /* Allocate and free page tables. */
218
pgd_alloc(struct mm_struct * mm)219 pgd_t *pgd_alloc(struct mm_struct *mm)
220 {
221 pgd_t *pgd = __pgd_alloc(mm, 0);
222
223 if (pgd)
224 memcpy(pgd + USER_PTRS_PER_PGD,
225 swapper_pg_dir + USER_PTRS_PER_PGD,
226 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
227
228 return pgd;
229 }
230
uml_kmalloc(int size,int flags)231 void *uml_kmalloc(int size, int flags)
232 {
233 return kmalloc(size, flags);
234 }
235
236 static const pgprot_t protection_map[16] = {
237 [VM_NONE] = PAGE_NONE,
238 [VM_READ] = PAGE_READONLY,
239 [VM_WRITE] = PAGE_COPY,
240 [VM_WRITE | VM_READ] = PAGE_COPY,
241 [VM_EXEC] = PAGE_READONLY,
242 [VM_EXEC | VM_READ] = PAGE_READONLY,
243 [VM_EXEC | VM_WRITE] = PAGE_COPY,
244 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
245 [VM_SHARED] = PAGE_NONE,
246 [VM_SHARED | VM_READ] = PAGE_READONLY,
247 [VM_SHARED | VM_WRITE] = PAGE_SHARED,
248 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
249 [VM_SHARED | VM_EXEC] = PAGE_READONLY,
250 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY,
251 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
252 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
253 };
254 DECLARE_VM_GET_PAGE_PROT
255
mark_rodata_ro(void)256 void mark_rodata_ro(void)
257 {
258 unsigned long rodata_start = PFN_ALIGN(__start_rodata);
259 unsigned long rodata_end = PFN_ALIGN(__end_rodata);
260
261 os_protect_memory((void *)rodata_start, rodata_end - rodata_start, 1, 0, 0);
262 }
263