xref: /linux/arch/um/kernel/mem.c (revision 6a34dfa15d6edf7e78b8118d862d2db0889cf669)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4  */
5 
6 #include <linux/stddef.h>
7 #include <linux/module.h>
8 #include <linux/memblock.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/slab.h>
12 #include <asm/fixmap.h>
13 #include <asm/page.h>
14 #include <asm/pgalloc.h>
15 #include <as-layout.h>
16 #include <init.h>
17 #include <kern.h>
18 #include <kern_util.h>
19 #include <mem_user.h>
20 #include <os.h>
21 #include <um_malloc.h>
22 #include <linux/sched/task.h>
23 
24 #ifdef CONFIG_KASAN
25 int kasan_um_is_ready;
26 void kasan_init(void)
27 {
28 	/*
29 	 * kasan_map_memory will map all of the required address space and
30 	 * the host machine will allocate physical memory as necessary.
31 	 */
32 	kasan_map_memory((void *)KASAN_SHADOW_START, KASAN_SHADOW_SIZE);
33 	init_task.kasan_depth = 0;
34 	kasan_um_is_ready = true;
35 }
36 
37 static void (*kasan_init_ptr)(void)
38 __section(".kasan_init") __used
39 = kasan_init;
40 #endif
41 
42 /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
43 unsigned long *empty_zero_page = NULL;
44 EXPORT_SYMBOL(empty_zero_page);
45 
46 /*
47  * Initialized during boot, and readonly for initializing page tables
48  * afterwards
49  */
50 pgd_t swapper_pg_dir[PTRS_PER_PGD];
51 
52 /* Initialized at boot time, and readonly after that */
53 int kmalloc_ok = 0;
54 
55 /* Used during early boot */
56 static unsigned long brk_end;
57 
58 void __init mem_init(void)
59 {
60 	/* clear the zero-page */
61 	memset(empty_zero_page, 0, PAGE_SIZE);
62 
63 	/* Map in the area just after the brk now that kmalloc is about
64 	 * to be turned on.
65 	 */
66 	brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
67 	map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
68 	memblock_free((void *)brk_end, uml_reserved - brk_end);
69 	uml_reserved = brk_end;
70 
71 	/* this will put all low memory onto the freelists */
72 	memblock_free_all();
73 	max_pfn = max_low_pfn;
74 	kmalloc_ok = 1;
75 }
76 
77 /*
78  * Create a page table and place a pointer to it in a middle page
79  * directory entry.
80  */
81 static void __init one_page_table_init(pmd_t *pmd)
82 {
83 	if (pmd_none(*pmd)) {
84 		pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
85 							  PAGE_SIZE);
86 		if (!pte)
87 			panic("%s: Failed to allocate %lu bytes align=%lx\n",
88 			      __func__, PAGE_SIZE, PAGE_SIZE);
89 
90 		set_pmd(pmd, __pmd(_KERNPG_TABLE +
91 					   (unsigned long) __pa(pte)));
92 		BUG_ON(pte != pte_offset_kernel(pmd, 0));
93 	}
94 }
95 
96 static void __init one_md_table_init(pud_t *pud)
97 {
98 #if CONFIG_PGTABLE_LEVELS > 2
99 	pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
100 	if (!pmd_table)
101 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
102 		      __func__, PAGE_SIZE, PAGE_SIZE);
103 
104 	set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
105 	BUG_ON(pmd_table != pmd_offset(pud, 0));
106 #endif
107 }
108 
109 static void __init one_ud_table_init(p4d_t *p4d)
110 {
111 #if CONFIG_PGTABLE_LEVELS > 3
112 	pud_t *pud_table = (pud_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
113 	if (!pud_table)
114 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
115 		      __func__, PAGE_SIZE, PAGE_SIZE);
116 
117 	set_p4d(p4d, __p4d(_KERNPG_TABLE + (unsigned long) __pa(pud_table)));
118 	BUG_ON(pud_table != pud_offset(p4d, 0));
119 #endif
120 }
121 
122 static void __init fixrange_init(unsigned long start, unsigned long end,
123 				 pgd_t *pgd_base)
124 {
125 	pgd_t *pgd;
126 	p4d_t *p4d;
127 	pud_t *pud;
128 	pmd_t *pmd;
129 	int i, j;
130 	unsigned long vaddr;
131 
132 	vaddr = start;
133 	i = pgd_index(vaddr);
134 	j = pmd_index(vaddr);
135 	pgd = pgd_base + i;
136 
137 	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
138 		p4d = p4d_offset(pgd, vaddr);
139 		if (p4d_none(*p4d))
140 			one_ud_table_init(p4d);
141 		pud = pud_offset(p4d, vaddr);
142 		if (pud_none(*pud))
143 			one_md_table_init(pud);
144 		pmd = pmd_offset(pud, vaddr);
145 		for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
146 			one_page_table_init(pmd);
147 			vaddr += PMD_SIZE;
148 		}
149 		j = 0;
150 	}
151 }
152 
153 static void __init fixaddr_user_init( void)
154 {
155 #ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
156 	long size = FIXADDR_USER_END - FIXADDR_USER_START;
157 	pte_t *pte;
158 	phys_t p;
159 	unsigned long v, vaddr = FIXADDR_USER_START;
160 
161 	if (!size)
162 		return;
163 
164 	fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
165 	v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
166 	if (!v)
167 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
168 		      __func__, size, PAGE_SIZE);
169 
170 	memcpy((void *) v , (void *) FIXADDR_USER_START, size);
171 	p = __pa(v);
172 	for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
173 		      p += PAGE_SIZE) {
174 		pte = virt_to_kpte(vaddr);
175 		pte_set_val(*pte, p, PAGE_READONLY);
176 	}
177 #endif
178 }
179 
180 void __init paging_init(void)
181 {
182 	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
183 	unsigned long vaddr;
184 
185 	empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
186 							       PAGE_SIZE);
187 	if (!empty_zero_page)
188 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
189 		      __func__, PAGE_SIZE, PAGE_SIZE);
190 
191 	max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
192 	free_area_init(max_zone_pfn);
193 
194 	/*
195 	 * Fixed mappings, only the page table structure has to be
196 	 * created - mappings will be set by set_fixmap():
197 	 */
198 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
199 	fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
200 
201 	fixaddr_user_init();
202 }
203 
204 /*
205  * This can't do anything because nothing in the kernel image can be freed
206  * since it's not in kernel physical memory.
207  */
208 
209 void free_initmem(void)
210 {
211 }
212 
213 /* Allocate and free page tables. */
214 
215 pgd_t *pgd_alloc(struct mm_struct *mm)
216 {
217 	pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
218 
219 	if (pgd) {
220 		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
221 		memcpy(pgd + USER_PTRS_PER_PGD,
222 		       swapper_pg_dir + USER_PTRS_PER_PGD,
223 		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
224 	}
225 	return pgd;
226 }
227 
228 void *uml_kmalloc(int size, int flags)
229 {
230 	return kmalloc(size, flags);
231 }
232 
233 static const pgprot_t protection_map[16] = {
234 	[VM_NONE]					= PAGE_NONE,
235 	[VM_READ]					= PAGE_READONLY,
236 	[VM_WRITE]					= PAGE_COPY,
237 	[VM_WRITE | VM_READ]				= PAGE_COPY,
238 	[VM_EXEC]					= PAGE_READONLY,
239 	[VM_EXEC | VM_READ]				= PAGE_READONLY,
240 	[VM_EXEC | VM_WRITE]				= PAGE_COPY,
241 	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY,
242 	[VM_SHARED]					= PAGE_NONE,
243 	[VM_SHARED | VM_READ]				= PAGE_READONLY,
244 	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
245 	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
246 	[VM_SHARED | VM_EXEC]				= PAGE_READONLY,
247 	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY,
248 	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED,
249 	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED
250 };
251 DECLARE_VM_GET_PAGE_PROT
252