1 /* 2 * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu> 3 * Copyright (C) 2006 Atmark Techno, Inc. 4 * 5 * This file is subject to the terms and conditions of the GNU General Public 6 * License. See the file "COPYING" in the main directory of this archive 7 * for more details. 8 */ 9 10 #include <linux/dma-map-ops.h> 11 #include <linux/memblock.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/mm.h> /* mem_init */ 15 #include <linux/initrd.h> 16 #include <linux/of_fdt.h> 17 #include <linux/pagemap.h> 18 #include <linux/pfn.h> 19 #include <linux/slab.h> 20 #include <linux/swap.h> 21 #include <linux/export.h> 22 23 #include <asm/page.h> 24 #include <asm/mmu_context.h> 25 #include <asm/pgalloc.h> 26 #include <asm/sections.h> 27 #include <asm/tlb.h> 28 #include <asm/fixmap.h> 29 30 /* Use for MMU and noMMU because of PCI generic code */ 31 int mem_init_done; 32 33 char *klimit = _end; 34 35 /* 36 * Initialize the bootmem system and give it all the memory we 37 * have available. 38 */ 39 unsigned long memory_start; 40 EXPORT_SYMBOL(memory_start); 41 unsigned long memory_size; 42 EXPORT_SYMBOL(memory_size); 43 unsigned long lowmem_size; 44 45 EXPORT_SYMBOL(min_low_pfn); 46 EXPORT_SYMBOL(max_low_pfn); 47 48 #ifdef CONFIG_HIGHMEM 49 static void __init highmem_init(void) 50 { 51 pr_debug("%x\n", (u32)PKMAP_BASE); 52 map_page(PKMAP_BASE, 0, 0); /* XXX gross */ 53 pkmap_page_table = virt_to_kpte(PKMAP_BASE); 54 } 55 56 static void __meminit highmem_setup(void) 57 { 58 unsigned long pfn; 59 60 for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) { 61 struct page *page = pfn_to_page(pfn); 62 63 /* FIXME not sure about */ 64 if (!memblock_is_reserved(pfn << PAGE_SHIFT)) 65 free_highmem_page(page); 66 } 67 } 68 #endif /* CONFIG_HIGHMEM */ 69 70 /* 71 * paging_init() sets up the page tables - in fact we've already done this. 72 */ 73 static void __init paging_init(void) 74 { 75 unsigned long zones_size[MAX_NR_ZONES]; 76 int idx; 77 78 /* Setup fixmaps */ 79 for (idx = 0; idx < __end_of_fixed_addresses; idx++) 80 clear_fixmap(idx); 81 82 /* Clean every zones */ 83 memset(zones_size, 0, sizeof(zones_size)); 84 85 #ifdef CONFIG_HIGHMEM 86 highmem_init(); 87 88 zones_size[ZONE_DMA] = max_low_pfn; 89 zones_size[ZONE_HIGHMEM] = max_pfn; 90 #else 91 zones_size[ZONE_DMA] = max_pfn; 92 #endif 93 94 /* We don't have holes in memory map */ 95 free_area_init(zones_size); 96 } 97 98 void __init setup_memory(void) 99 { 100 /* 101 * Kernel: 102 * start: base phys address of kernel - page align 103 * end: base phys address of kernel - page align 104 * 105 * min_low_pfn - the first page (mm/bootmem.c - node_boot_start) 106 * max_low_pfn 107 * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn) 108 */ 109 110 /* memory start is from the kernel end (aligned) to higher addr */ 111 min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ 112 /* RAM is assumed contiguous */ 113 max_mapnr = memory_size >> PAGE_SHIFT; 114 max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT; 115 max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT; 116 117 pr_info("%s: max_mapnr: %#lx\n", __func__, max_mapnr); 118 pr_info("%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); 119 pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); 120 pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); 121 122 paging_init(); 123 } 124 125 void __init mem_init(void) 126 { 127 high_memory = (void *)__va(memory_start + lowmem_size - 1); 128 129 /* this will put all memory onto the freelists */ 130 memblock_free_all(); 131 #ifdef CONFIG_HIGHMEM 132 highmem_setup(); 133 #endif 134 135 mem_init_done = 1; 136 } 137 138 int page_is_ram(unsigned long pfn) 139 { 140 return pfn < max_low_pfn; 141 } 142 143 /* 144 * Check for command-line options that affect what MMU_init will do. 145 */ 146 static void mm_cmdline_setup(void) 147 { 148 unsigned long maxmem = 0; 149 char *p = cmd_line; 150 151 /* Look for mem= option on command line */ 152 p = strstr(cmd_line, "mem="); 153 if (p) { 154 p += 4; 155 maxmem = memparse(p, &p); 156 if (maxmem && memory_size > maxmem) { 157 memory_size = maxmem; 158 memblock.memory.regions[0].size = memory_size; 159 } 160 } 161 } 162 163 /* 164 * MMU_init_hw does the chip-specific initialization of the MMU hardware. 165 */ 166 static void __init mmu_init_hw(void) 167 { 168 /* 169 * The Zone Protection Register (ZPR) defines how protection will 170 * be applied to every page which is a member of a given zone. At 171 * present, we utilize only two of the zones. 172 * The zone index bits (of ZSEL) in the PTE are used for software 173 * indicators, except the LSB. For user access, zone 1 is used, 174 * for kernel access, zone 0 is used. We set all but zone 1 175 * to zero, allowing only kernel access as indicated in the PTE. 176 * For zone 1, we set a 01 binary (a value of 10 will not work) 177 * to allow user access as indicated in the PTE. This also allows 178 * kernel access as indicated in the PTE. 179 */ 180 __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \ 181 "mts rzpr, r11;" 182 : : : "r11"); 183 } 184 185 /* 186 * MMU_init sets up the basic memory mappings for the kernel, 187 * including both RAM and possibly some I/O regions, 188 * and sets up the page tables and the MMU hardware ready to go. 189 */ 190 191 /* called from head.S */ 192 asmlinkage void __init mmu_init(void) 193 { 194 unsigned int kstart, ksize; 195 196 if (!memblock.reserved.cnt) { 197 pr_emerg("Error memory count\n"); 198 machine_restart(NULL); 199 } 200 201 if ((u32) memblock.memory.regions[0].size < 0x400000) { 202 pr_emerg("Memory must be greater than 4MB\n"); 203 machine_restart(NULL); 204 } 205 206 if ((u32) memblock.memory.regions[0].size < kernel_tlb) { 207 pr_emerg("Kernel size is greater than memory node\n"); 208 machine_restart(NULL); 209 } 210 211 /* Find main memory where the kernel is */ 212 memory_start = (u32) memblock.memory.regions[0].base; 213 lowmem_size = memory_size = (u32) memblock.memory.regions[0].size; 214 215 if (lowmem_size > CONFIG_LOWMEM_SIZE) { 216 lowmem_size = CONFIG_LOWMEM_SIZE; 217 #ifndef CONFIG_HIGHMEM 218 memory_size = lowmem_size; 219 #endif 220 } 221 222 mm_cmdline_setup(); /* FIXME parse args from command line - not used */ 223 224 /* 225 * Map out the kernel text/data/bss from the available physical 226 * memory. 227 */ 228 kstart = __pa(CONFIG_KERNEL_START); /* kernel start */ 229 /* kernel size */ 230 ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START)); 231 memblock_reserve(kstart, ksize); 232 233 #if defined(CONFIG_BLK_DEV_INITRD) 234 /* Remove the init RAM disk from the available memory. */ 235 if (initrd_start) { 236 unsigned long size; 237 size = initrd_end - initrd_start; 238 memblock_reserve(__virt_to_phys(initrd_start), size); 239 } 240 #endif /* CONFIG_BLK_DEV_INITRD */ 241 242 /* Initialize the MMU hardware */ 243 mmu_init_hw(); 244 245 /* Map in all of RAM starting at CONFIG_KERNEL_START */ 246 mapin_ram(); 247 248 /* Extend vmalloc and ioremap area as big as possible */ 249 #ifdef CONFIG_HIGHMEM 250 ioremap_base = ioremap_bot = PKMAP_BASE; 251 #else 252 ioremap_base = ioremap_bot = FIXADDR_START; 253 #endif 254 255 /* Initialize the context management stuff */ 256 mmu_context_init(); 257 258 /* Shortly after that, the entire linear mapping will be available */ 259 /* This will also cause that unflatten device tree will be allocated 260 * inside 768MB limit */ 261 memblock_set_current_limit(memory_start + lowmem_size - 1); 262 263 parse_early_param(); 264 265 early_init_fdt_scan_reserved_mem(); 266 267 /* CMA initialization */ 268 dma_contiguous_reserve(memory_start + lowmem_size - 1); 269 270 memblock_dump_all(); 271 } 272 273 void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) 274 { 275 void *p; 276 277 if (mem_init_done) { 278 p = kzalloc(size, mask); 279 } else { 280 p = memblock_alloc(size, SMP_CACHE_BYTES); 281 if (!p) 282 panic("%s: Failed to allocate %zu bytes\n", 283 __func__, size); 284 } 285 286 return p; 287 } 288 289 static const pgprot_t protection_map[16] = { 290 [VM_NONE] = PAGE_NONE, 291 [VM_READ] = PAGE_READONLY_X, 292 [VM_WRITE] = PAGE_COPY, 293 [VM_WRITE | VM_READ] = PAGE_COPY_X, 294 [VM_EXEC] = PAGE_READONLY, 295 [VM_EXEC | VM_READ] = PAGE_READONLY_X, 296 [VM_EXEC | VM_WRITE] = PAGE_COPY, 297 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X, 298 [VM_SHARED] = PAGE_NONE, 299 [VM_SHARED | VM_READ] = PAGE_READONLY_X, 300 [VM_SHARED | VM_WRITE] = PAGE_SHARED, 301 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X, 302 [VM_SHARED | VM_EXEC] = PAGE_READONLY, 303 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X, 304 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, 305 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X 306 }; 307 DECLARE_VM_GET_PAGE_PROT 308