xref: /linux/arch/um/kernel/mem.c (revision 987b741c52c7c6c68d46fbaeb95b8d1087f10b7f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4  */
5 
6 #include <linux/stddef.h>
7 #include <linux/module.h>
8 #include <linux/memblock.h>
9 #include <linux/highmem.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/slab.h>
13 #include <asm/fixmap.h>
14 #include <asm/page.h>
15 #include <as-layout.h>
16 #include <init.h>
17 #include <kern.h>
18 #include <kern_util.h>
19 #include <mem_user.h>
20 #include <os.h>
21 
22 /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
23 unsigned long *empty_zero_page = NULL;
24 EXPORT_SYMBOL(empty_zero_page);
25 
26 /*
27  * Initialized during boot, and readonly for initializing page tables
28  * afterwards
29  */
30 pgd_t swapper_pg_dir[PTRS_PER_PGD];
31 
32 /* Initialized at boot time, and readonly after that */
33 unsigned long long highmem;
34 EXPORT_SYMBOL(highmem);
35 int kmalloc_ok = 0;
36 
37 /* Used during early boot */
38 static unsigned long brk_end;
39 
40 void __init mem_init(void)
41 {
42 	/* clear the zero-page */
43 	memset(empty_zero_page, 0, PAGE_SIZE);
44 
45 	/* Map in the area just after the brk now that kmalloc is about
46 	 * to be turned on.
47 	 */
48 	brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
49 	map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
50 	memblock_free(__pa(brk_end), uml_reserved - brk_end);
51 	uml_reserved = brk_end;
52 
53 	/* this will put all low memory onto the freelists */
54 	memblock_free_all();
55 	max_low_pfn = totalram_pages();
56 	max_pfn = max_low_pfn;
57 	kmalloc_ok = 1;
58 }
59 
60 /*
61  * Create a page table and place a pointer to it in a middle page
62  * directory entry.
63  */
64 static void __init one_page_table_init(pmd_t *pmd)
65 {
66 	if (pmd_none(*pmd)) {
67 		pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
68 							  PAGE_SIZE);
69 		if (!pte)
70 			panic("%s: Failed to allocate %lu bytes align=%lx\n",
71 			      __func__, PAGE_SIZE, PAGE_SIZE);
72 
73 		set_pmd(pmd, __pmd(_KERNPG_TABLE +
74 					   (unsigned long) __pa(pte)));
75 		BUG_ON(pte != pte_offset_kernel(pmd, 0));
76 	}
77 }
78 
79 static void __init one_md_table_init(pud_t *pud)
80 {
81 #ifdef CONFIG_3_LEVEL_PGTABLES
82 	pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
83 	if (!pmd_table)
84 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
85 		      __func__, PAGE_SIZE, PAGE_SIZE);
86 
87 	set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
88 	if (pmd_table != pmd_offset(pud, 0))
89 		BUG();
90 #endif
91 }
92 
93 static void __init fixrange_init(unsigned long start, unsigned long end,
94 				 pgd_t *pgd_base)
95 {
96 	pgd_t *pgd;
97 	p4d_t *p4d;
98 	pud_t *pud;
99 	pmd_t *pmd;
100 	int i, j;
101 	unsigned long vaddr;
102 
103 	vaddr = start;
104 	i = pgd_index(vaddr);
105 	j = pmd_index(vaddr);
106 	pgd = pgd_base + i;
107 
108 	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
109 		p4d = p4d_offset(pgd, vaddr);
110 		pud = pud_offset(p4d, vaddr);
111 		if (pud_none(*pud))
112 			one_md_table_init(pud);
113 		pmd = pmd_offset(pud, vaddr);
114 		for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
115 			one_page_table_init(pmd);
116 			vaddr += PMD_SIZE;
117 		}
118 		j = 0;
119 	}
120 }
121 
122 static void __init fixaddr_user_init( void)
123 {
124 #ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
125 	long size = FIXADDR_USER_END - FIXADDR_USER_START;
126 	pte_t *pte;
127 	phys_t p;
128 	unsigned long v, vaddr = FIXADDR_USER_START;
129 
130 	if (!size)
131 		return;
132 
133 	fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
134 	v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
135 	if (!v)
136 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
137 		      __func__, size, PAGE_SIZE);
138 
139 	memcpy((void *) v , (void *) FIXADDR_USER_START, size);
140 	p = __pa(v);
141 	for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
142 		      p += PAGE_SIZE) {
143 		pte = virt_to_kpte(vaddr);
144 		pte_set_val(*pte, p, PAGE_READONLY);
145 	}
146 #endif
147 }
148 
149 void __init paging_init(void)
150 {
151 	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
152 	unsigned long vaddr;
153 
154 	empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
155 							       PAGE_SIZE);
156 	if (!empty_zero_page)
157 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
158 		      __func__, PAGE_SIZE, PAGE_SIZE);
159 
160 	max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
161 	free_area_init(max_zone_pfn);
162 
163 	/*
164 	 * Fixed mappings, only the page table structure has to be
165 	 * created - mappings will be set by set_fixmap():
166 	 */
167 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
168 	fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
169 
170 	fixaddr_user_init();
171 }
172 
173 /*
174  * This can't do anything because nothing in the kernel image can be freed
175  * since it's not in kernel physical memory.
176  */
177 
178 void free_initmem(void)
179 {
180 }
181 
182 /* Allocate and free page tables. */
183 
184 pgd_t *pgd_alloc(struct mm_struct *mm)
185 {
186 	pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
187 
188 	if (pgd) {
189 		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
190 		memcpy(pgd + USER_PTRS_PER_PGD,
191 		       swapper_pg_dir + USER_PTRS_PER_PGD,
192 		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
193 	}
194 	return pgd;
195 }
196 
197 void *uml_kmalloc(int size, int flags)
198 {
199 	return kmalloc(size, flags);
200 }
201