xref: /linux/arch/um/kernel/mem.c (revision e20706d5385b10a6f6a2fe5ad6b1333dad2d1416)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4  */
5 
6 #include <linux/stddef.h>
7 #include <linux/module.h>
8 #include <linux/memblock.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/slab.h>
12 #include <asm/page.h>
13 #include <asm/pgalloc.h>
14 #include <as-layout.h>
15 #include <init.h>
16 #include <kern.h>
17 #include <kern_util.h>
18 #include <mem_user.h>
19 #include <os.h>
20 #include <um_malloc.h>
21 #include <linux/sched/task.h>
22 
23 #ifdef CONFIG_KASAN
24 int kasan_um_is_ready;
25 void kasan_init(void)
26 {
27 	/*
28 	 * kasan_map_memory will map all of the required address space and
29 	 * the host machine will allocate physical memory as necessary.
30 	 */
31 	kasan_map_memory((void *)KASAN_SHADOW_START, KASAN_SHADOW_SIZE);
32 	init_task.kasan_depth = 0;
33 	kasan_um_is_ready = true;
34 }
35 
36 static void (*kasan_init_ptr)(void)
37 __section(".kasan_init") __used
38 = kasan_init;
39 #endif
40 
41 /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
42 unsigned long *empty_zero_page = NULL;
43 EXPORT_SYMBOL(empty_zero_page);
44 
45 /*
46  * Initialized during boot, and readonly for initializing page tables
47  * afterwards
48  */
49 pgd_t swapper_pg_dir[PTRS_PER_PGD];
50 
51 /* Initialized at boot time, and readonly after that */
52 int kmalloc_ok = 0;
53 
54 /* Used during early boot */
55 static unsigned long brk_end;
56 
57 void __init arch_mm_preinit(void)
58 {
59 	/* clear the zero-page */
60 	memset(empty_zero_page, 0, PAGE_SIZE);
61 
62 	/* Map in the area just after the brk now that kmalloc is about
63 	 * to be turned on.
64 	 */
65 	brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
66 	map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
67 	memblock_free((void *)brk_end, uml_reserved - brk_end);
68 	uml_reserved = brk_end;
69 	max_pfn = max_low_pfn;
70 }
71 
72 void __init mem_init(void)
73 {
74 	kmalloc_ok = 1;
75 }
76 
77 #if IS_ENABLED(CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA)
78 /*
79  * Create a page table and place a pointer to it in a middle page
80  * directory entry.
81  */
82 static void __init one_page_table_init(pmd_t *pmd)
83 {
84 	if (pmd_none(*pmd)) {
85 		pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
86 							  PAGE_SIZE);
87 		if (!pte)
88 			panic("%s: Failed to allocate %lu bytes align=%lx\n",
89 			      __func__, PAGE_SIZE, PAGE_SIZE);
90 
91 		set_pmd(pmd, __pmd(_KERNPG_TABLE +
92 					   (unsigned long) __pa(pte)));
93 		BUG_ON(pte != pte_offset_kernel(pmd, 0));
94 	}
95 }
96 
97 static void __init one_md_table_init(pud_t *pud)
98 {
99 #if CONFIG_PGTABLE_LEVELS > 2
100 	pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
101 	if (!pmd_table)
102 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
103 		      __func__, PAGE_SIZE, PAGE_SIZE);
104 
105 	set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
106 	BUG_ON(pmd_table != pmd_offset(pud, 0));
107 #endif
108 }
109 
110 static void __init one_ud_table_init(p4d_t *p4d)
111 {
112 #if CONFIG_PGTABLE_LEVELS > 3
113 	pud_t *pud_table = (pud_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
114 	if (!pud_table)
115 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
116 		      __func__, PAGE_SIZE, PAGE_SIZE);
117 
118 	set_p4d(p4d, __p4d(_KERNPG_TABLE + (unsigned long) __pa(pud_table)));
119 	BUG_ON(pud_table != pud_offset(p4d, 0));
120 #endif
121 }
122 
123 static void __init fixrange_init(unsigned long start, unsigned long end,
124 				 pgd_t *pgd_base)
125 {
126 	pgd_t *pgd;
127 	p4d_t *p4d;
128 	pud_t *pud;
129 	pmd_t *pmd;
130 	int i, j;
131 	unsigned long vaddr;
132 
133 	vaddr = start;
134 	i = pgd_index(vaddr);
135 	j = pmd_index(vaddr);
136 	pgd = pgd_base + i;
137 
138 	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
139 		p4d = p4d_offset(pgd, vaddr);
140 		if (p4d_none(*p4d))
141 			one_ud_table_init(p4d);
142 		pud = pud_offset(p4d, vaddr);
143 		if (pud_none(*pud))
144 			one_md_table_init(pud);
145 		pmd = pmd_offset(pud, vaddr);
146 		for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
147 			one_page_table_init(pmd);
148 			vaddr += PMD_SIZE;
149 		}
150 		j = 0;
151 	}
152 }
153 
154 static void __init fixaddr_user_init( void)
155 {
156 	long size = FIXADDR_USER_END - FIXADDR_USER_START;
157 	pte_t *pte;
158 	phys_t p;
159 	unsigned long v, vaddr = FIXADDR_USER_START;
160 
161 	if (!size)
162 		return;
163 
164 	fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
165 	v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
166 	if (!v)
167 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
168 		      __func__, size, PAGE_SIZE);
169 
170 	memcpy((void *) v , (void *) FIXADDR_USER_START, size);
171 	p = __pa(v);
172 	for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
173 		      p += PAGE_SIZE) {
174 		pte = virt_to_kpte(vaddr);
175 		pte_set_val(*pte, p, PAGE_READONLY);
176 	}
177 }
178 #endif
179 
180 void __init paging_init(void)
181 {
182 	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
183 
184 	empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
185 							       PAGE_SIZE);
186 	if (!empty_zero_page)
187 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
188 		      __func__, PAGE_SIZE, PAGE_SIZE);
189 
190 	max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
191 	free_area_init(max_zone_pfn);
192 
193 #if IS_ENABLED(CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA)
194 	fixaddr_user_init();
195 #endif
196 }
197 
198 /*
199  * This can't do anything because nothing in the kernel image can be freed
200  * since it's not in kernel physical memory.
201  */
202 
203 void free_initmem(void)
204 {
205 }
206 
207 /* Allocate and free page tables. */
208 
209 pgd_t *pgd_alloc(struct mm_struct *mm)
210 {
211 	pgd_t *pgd = __pgd_alloc(mm, 0);
212 
213 	if (pgd)
214 		memcpy(pgd + USER_PTRS_PER_PGD,
215 		       swapper_pg_dir + USER_PTRS_PER_PGD,
216 		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
217 
218 	return pgd;
219 }
220 
221 void *uml_kmalloc(int size, int flags)
222 {
223 	return kmalloc(size, flags);
224 }
225 
226 static const pgprot_t protection_map[16] = {
227 	[VM_NONE]					= PAGE_NONE,
228 	[VM_READ]					= PAGE_READONLY,
229 	[VM_WRITE]					= PAGE_COPY,
230 	[VM_WRITE | VM_READ]				= PAGE_COPY,
231 	[VM_EXEC]					= PAGE_READONLY,
232 	[VM_EXEC | VM_READ]				= PAGE_READONLY,
233 	[VM_EXEC | VM_WRITE]				= PAGE_COPY,
234 	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY,
235 	[VM_SHARED]					= PAGE_NONE,
236 	[VM_SHARED | VM_READ]				= PAGE_READONLY,
237 	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
238 	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
239 	[VM_SHARED | VM_EXEC]				= PAGE_READONLY,
240 	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY,
241 	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED,
242 	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED
243 };
244 DECLARE_VM_GET_PAGE_PROT
245