1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 4 */ 5 6 #include <linux/stddef.h> 7 #include <linux/module.h> 8 #include <linux/memblock.h> 9 #include <linux/highmem.h> 10 #include <linux/mm.h> 11 #include <linux/swap.h> 12 #include <linux/slab.h> 13 #include <asm/fixmap.h> 14 #include <asm/page.h> 15 #include <as-layout.h> 16 #include <init.h> 17 #include <kern.h> 18 #include <kern_util.h> 19 #include <mem_user.h> 20 #include <os.h> 21 22 /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */ 23 unsigned long *empty_zero_page = NULL; 24 EXPORT_SYMBOL(empty_zero_page); 25 26 /* 27 * Initialized during boot, and readonly for initializing page tables 28 * afterwards 29 */ 30 pgd_t swapper_pg_dir[PTRS_PER_PGD]; 31 32 /* Initialized at boot time, and readonly after that */ 33 unsigned long long highmem; 34 EXPORT_SYMBOL(highmem); 35 int kmalloc_ok = 0; 36 37 /* Used during early boot */ 38 static unsigned long brk_end; 39 40 void __init mem_init(void) 41 { 42 /* clear the zero-page */ 43 memset(empty_zero_page, 0, PAGE_SIZE); 44 45 /* Map in the area just after the brk now that kmalloc is about 46 * to be turned on. 47 */ 48 brk_end = (unsigned long) UML_ROUND_UP(sbrk(0)); 49 map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0); 50 memblock_free(__pa(brk_end), uml_reserved - brk_end); 51 uml_reserved = brk_end; 52 53 /* this will put all low memory onto the freelists */ 54 memblock_free_all(); 55 max_low_pfn = totalram_pages(); 56 max_pfn = max_low_pfn; 57 mem_init_print_info(NULL); 58 kmalloc_ok = 1; 59 } 60 61 /* 62 * Create a page table and place a pointer to it in a middle page 63 * directory entry. 64 */ 65 static void __init one_page_table_init(pmd_t *pmd) 66 { 67 if (pmd_none(*pmd)) { 68 pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, 69 PAGE_SIZE); 70 if (!pte) 71 panic("%s: Failed to allocate %lu bytes align=%lx\n", 72 __func__, PAGE_SIZE, PAGE_SIZE); 73 74 set_pmd(pmd, __pmd(_KERNPG_TABLE + 75 (unsigned long) __pa(pte))); 76 if (pte != pte_offset_kernel(pmd, 0)) 77 BUG(); 78 } 79 } 80 81 static void __init one_md_table_init(pud_t *pud) 82 { 83 #ifdef CONFIG_3_LEVEL_PGTABLES 84 pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); 85 if (!pmd_table) 86 panic("%s: Failed to allocate %lu bytes align=%lx\n", 87 __func__, PAGE_SIZE, PAGE_SIZE); 88 89 set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table))); 90 if (pmd_table != pmd_offset(pud, 0)) 91 BUG(); 92 #endif 93 } 94 95 static void __init fixrange_init(unsigned long start, unsigned long end, 96 pgd_t *pgd_base) 97 { 98 pgd_t *pgd; 99 p4d_t *p4d; 100 pud_t *pud; 101 pmd_t *pmd; 102 int i, j; 103 unsigned long vaddr; 104 105 vaddr = start; 106 i = pgd_index(vaddr); 107 j = pmd_index(vaddr); 108 pgd = pgd_base + i; 109 110 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { 111 p4d = p4d_offset(pgd, vaddr); 112 pud = pud_offset(p4d, vaddr); 113 if (pud_none(*pud)) 114 one_md_table_init(pud); 115 pmd = pmd_offset(pud, vaddr); 116 for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) { 117 one_page_table_init(pmd); 118 vaddr += PMD_SIZE; 119 } 120 j = 0; 121 } 122 } 123 124 static void __init fixaddr_user_init( void) 125 { 126 #ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA 127 long size = FIXADDR_USER_END - FIXADDR_USER_START; 128 pgd_t *pgd; 129 p4d_t *p4d; 130 pud_t *pud; 131 pmd_t *pmd; 132 pte_t *pte; 133 phys_t p; 134 unsigned long v, vaddr = FIXADDR_USER_START; 135 136 if (!size) 137 return; 138 139 fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir); 140 v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE); 141 if (!v) 142 panic("%s: Failed to allocate %lu bytes align=%lx\n", 143 __func__, size, PAGE_SIZE); 144 145 memcpy((void *) v , (void *) FIXADDR_USER_START, size); 146 p = __pa(v); 147 for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE, 148 p += PAGE_SIZE) { 149 pgd = swapper_pg_dir + pgd_index(vaddr); 150 p4d = p4d_offset(pgd, vaddr); 151 pud = pud_offset(p4d, vaddr); 152 pmd = pmd_offset(pud, vaddr); 153 pte = pte_offset_kernel(pmd, vaddr); 154 pte_set_val(*pte, p, PAGE_READONLY); 155 } 156 #endif 157 } 158 159 void __init paging_init(void) 160 { 161 unsigned long zones_size[MAX_NR_ZONES], vaddr; 162 int i; 163 164 empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE, 165 PAGE_SIZE); 166 if (!empty_zero_page) 167 panic("%s: Failed to allocate %lu bytes align=%lx\n", 168 __func__, PAGE_SIZE, PAGE_SIZE); 169 170 for (i = 0; i < ARRAY_SIZE(zones_size); i++) 171 zones_size[i] = 0; 172 173 zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) - 174 (uml_physmem >> PAGE_SHIFT); 175 free_area_init(zones_size); 176 177 /* 178 * Fixed mappings, only the page table structure has to be 179 * created - mappings will be set by set_fixmap(): 180 */ 181 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 182 fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir); 183 184 fixaddr_user_init(); 185 } 186 187 /* 188 * This can't do anything because nothing in the kernel image can be freed 189 * since it's not in kernel physical memory. 190 */ 191 192 void free_initmem(void) 193 { 194 } 195 196 /* Allocate and free page tables. */ 197 198 pgd_t *pgd_alloc(struct mm_struct *mm) 199 { 200 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL); 201 202 if (pgd) { 203 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); 204 memcpy(pgd + USER_PTRS_PER_PGD, 205 swapper_pg_dir + USER_PTRS_PER_PGD, 206 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 207 } 208 return pgd; 209 } 210 211 void pgd_free(struct mm_struct *mm, pgd_t *pgd) 212 { 213 free_page((unsigned long) pgd); 214 } 215 216 #ifdef CONFIG_3_LEVEL_PGTABLES 217 pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) 218 { 219 pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL); 220 221 if (pmd) 222 memset(pmd, 0, PAGE_SIZE); 223 224 return pmd; 225 } 226 #endif 227 228 void *uml_kmalloc(int size, int flags) 229 { 230 return kmalloc(size, flags); 231 } 232