xref: /linux/arch/um/kernel/mem.c (revision b7019ac550eb3916f34d79db583e9b7ea2524afa)
1 /*
2  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3  * Licensed under the GPL
4  */
5 
6 #include <linux/stddef.h>
7 #include <linux/module.h>
8 #include <linux/memblock.h>
9 #include <linux/highmem.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/slab.h>
13 #include <asm/fixmap.h>
14 #include <asm/page.h>
15 #include <as-layout.h>
16 #include <init.h>
17 #include <kern.h>
18 #include <kern_util.h>
19 #include <mem_user.h>
20 #include <os.h>
21 
22 /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
23 unsigned long *empty_zero_page = NULL;
24 EXPORT_SYMBOL(empty_zero_page);
25 
26 /*
27  * Initialized during boot, and readonly for initializing page tables
28  * afterwards
29  */
30 pgd_t swapper_pg_dir[PTRS_PER_PGD];
31 
32 /* Initialized at boot time, and readonly after that */
33 unsigned long long highmem;
34 int kmalloc_ok = 0;
35 
36 /* Used during early boot */
37 static unsigned long brk_end;
38 
39 void __init mem_init(void)
40 {
41 	/* clear the zero-page */
42 	memset(empty_zero_page, 0, PAGE_SIZE);
43 
44 	/* Map in the area just after the brk now that kmalloc is about
45 	 * to be turned on.
46 	 */
47 	brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
48 	map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
49 	memblock_free(__pa(brk_end), uml_reserved - brk_end);
50 	uml_reserved = brk_end;
51 
52 	/* this will put all low memory onto the freelists */
53 	memblock_free_all();
54 	max_low_pfn = totalram_pages();
55 	max_pfn = max_low_pfn;
56 	mem_init_print_info(NULL);
57 	kmalloc_ok = 1;
58 }
59 
60 /*
61  * Create a page table and place a pointer to it in a middle page
62  * directory entry.
63  */
64 static void __init one_page_table_init(pmd_t *pmd)
65 {
66 	if (pmd_none(*pmd)) {
67 		pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
68 							  PAGE_SIZE);
69 		if (!pte)
70 			panic("%s: Failed to allocate %lu bytes align=%lx\n",
71 			      __func__, PAGE_SIZE, PAGE_SIZE);
72 
73 		set_pmd(pmd, __pmd(_KERNPG_TABLE +
74 					   (unsigned long) __pa(pte)));
75 		if (pte != pte_offset_kernel(pmd, 0))
76 			BUG();
77 	}
78 }
79 
80 static void __init one_md_table_init(pud_t *pud)
81 {
82 #ifdef CONFIG_3_LEVEL_PGTABLES
83 	pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
84 	if (!pmd_table)
85 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
86 		      __func__, PAGE_SIZE, PAGE_SIZE);
87 
88 	set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
89 	if (pmd_table != pmd_offset(pud, 0))
90 		BUG();
91 #endif
92 }
93 
94 static void __init fixrange_init(unsigned long start, unsigned long end,
95 				 pgd_t *pgd_base)
96 {
97 	pgd_t *pgd;
98 	pud_t *pud;
99 	pmd_t *pmd;
100 	int i, j;
101 	unsigned long vaddr;
102 
103 	vaddr = start;
104 	i = pgd_index(vaddr);
105 	j = pmd_index(vaddr);
106 	pgd = pgd_base + i;
107 
108 	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
109 		pud = pud_offset(pgd, vaddr);
110 		if (pud_none(*pud))
111 			one_md_table_init(pud);
112 		pmd = pmd_offset(pud, vaddr);
113 		for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
114 			one_page_table_init(pmd);
115 			vaddr += PMD_SIZE;
116 		}
117 		j = 0;
118 	}
119 }
120 
121 static void __init fixaddr_user_init( void)
122 {
123 #ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
124 	long size = FIXADDR_USER_END - FIXADDR_USER_START;
125 	pgd_t *pgd;
126 	pud_t *pud;
127 	pmd_t *pmd;
128 	pte_t *pte;
129 	phys_t p;
130 	unsigned long v, vaddr = FIXADDR_USER_START;
131 
132 	if (!size)
133 		return;
134 
135 	fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
136 	v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
137 	if (!v)
138 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
139 		      __func__, size, PAGE_SIZE);
140 
141 	memcpy((void *) v , (void *) FIXADDR_USER_START, size);
142 	p = __pa(v);
143 	for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
144 		      p += PAGE_SIZE) {
145 		pgd = swapper_pg_dir + pgd_index(vaddr);
146 		pud = pud_offset(pgd, vaddr);
147 		pmd = pmd_offset(pud, vaddr);
148 		pte = pte_offset_kernel(pmd, vaddr);
149 		pte_set_val(*pte, p, PAGE_READONLY);
150 	}
151 #endif
152 }
153 
154 void __init paging_init(void)
155 {
156 	unsigned long zones_size[MAX_NR_ZONES], vaddr;
157 	int i;
158 
159 	empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
160 							       PAGE_SIZE);
161 	if (!empty_zero_page)
162 		panic("%s: Failed to allocate %lu bytes align=%lx\n",
163 		      __func__, PAGE_SIZE, PAGE_SIZE);
164 
165 	for (i = 0; i < ARRAY_SIZE(zones_size); i++)
166 		zones_size[i] = 0;
167 
168 	zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
169 		(uml_physmem >> PAGE_SHIFT);
170 	free_area_init(zones_size);
171 
172 	/*
173 	 * Fixed mappings, only the page table structure has to be
174 	 * created - mappings will be set by set_fixmap():
175 	 */
176 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
177 	fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
178 
179 	fixaddr_user_init();
180 }
181 
182 /*
183  * This can't do anything because nothing in the kernel image can be freed
184  * since it's not in kernel physical memory.
185  */
186 
187 void free_initmem(void)
188 {
189 }
190 
191 /* Allocate and free page tables. */
192 
193 pgd_t *pgd_alloc(struct mm_struct *mm)
194 {
195 	pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
196 
197 	if (pgd) {
198 		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
199 		memcpy(pgd + USER_PTRS_PER_PGD,
200 		       swapper_pg_dir + USER_PTRS_PER_PGD,
201 		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
202 	}
203 	return pgd;
204 }
205 
206 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
207 {
208 	free_page((unsigned long) pgd);
209 }
210 
211 pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
212 {
213 	pte_t *pte;
214 
215 	pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
216 	return pte;
217 }
218 
219 pgtable_t pte_alloc_one(struct mm_struct *mm)
220 {
221 	struct page *pte;
222 
223 	pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
224 	if (!pte)
225 		return NULL;
226 	if (!pgtable_page_ctor(pte)) {
227 		__free_page(pte);
228 		return NULL;
229 	}
230 	return pte;
231 }
232 
233 #ifdef CONFIG_3_LEVEL_PGTABLES
234 pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
235 {
236 	pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
237 
238 	if (pmd)
239 		memset(pmd, 0, PAGE_SIZE);
240 
241 	return pmd;
242 }
243 #endif
244 
245 void *uml_kmalloc(int size, int flags)
246 {
247 	return kmalloc(size, flags);
248 }
249