1 /* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 6 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 7 * Copyright (C) 1996 Paul Mackerras 8 * 9 * Derived from "arch/i386/mm/init.c" 10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 11 * 12 * Dave Engebretsen <engebret@us.ibm.com> 13 * Rework for PPC64 port. 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 */ 21 22 #undef DEBUG 23 24 #include <linux/signal.h> 25 #include <linux/sched.h> 26 #include <linux/kernel.h> 27 #include <linux/errno.h> 28 #include <linux/string.h> 29 #include <linux/types.h> 30 #include <linux/mman.h> 31 #include <linux/mm.h> 32 #include <linux/swap.h> 33 #include <linux/stddef.h> 34 #include <linux/vmalloc.h> 35 #include <linux/init.h> 36 #include <linux/delay.h> 37 #include <linux/bootmem.h> 38 #include <linux/highmem.h> 39 #include <linux/idr.h> 40 #include <linux/nodemask.h> 41 #include <linux/module.h> 42 #include <linux/poison.h> 43 44 #include <asm/pgalloc.h> 45 #include <asm/page.h> 46 #include <asm/prom.h> 47 #include <asm/lmb.h> 48 #include <asm/rtas.h> 49 #include <asm/io.h> 50 #include <asm/mmu_context.h> 51 #include <asm/pgtable.h> 52 #include <asm/mmu.h> 53 #include <asm/uaccess.h> 54 #include <asm/smp.h> 55 #include <asm/machdep.h> 56 #include <asm/tlb.h> 57 #include <asm/eeh.h> 58 #include <asm/processor.h> 59 #include <asm/mmzone.h> 60 #include <asm/cputable.h> 61 #include <asm/sections.h> 62 #include <asm/system.h> 63 #include <asm/iommu.h> 64 #include <asm/abs_addr.h> 65 #include <asm/vdso.h> 66 67 #include "mmu_decl.h" 68 69 #ifdef DEBUG 70 #define DBG(fmt...) printk(fmt) 71 #else 72 #define DBG(fmt...) 73 #endif 74 75 #if PGTABLE_RANGE > USER_VSID_RANGE 76 #warning Limited user VSID range means pagetable space is wasted 77 #endif 78 79 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) 80 #warning TASK_SIZE is smaller than it needs to be. 81 #endif 82 83 /* max amount of RAM to use */ 84 unsigned long __max_memory; 85 86 void free_initmem(void) 87 { 88 unsigned long addr; 89 90 addr = (unsigned long)__init_begin; 91 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { 92 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 93 ClearPageReserved(virt_to_page(addr)); 94 init_page_count(virt_to_page(addr)); 95 free_page(addr); 96 totalram_pages++; 97 } 98 printk ("Freeing unused kernel memory: %luk freed\n", 99 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10); 100 } 101 102 #ifdef CONFIG_BLK_DEV_INITRD 103 void free_initrd_mem(unsigned long start, unsigned long end) 104 { 105 if (start < end) 106 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 107 for (; start < end; start += PAGE_SIZE) { 108 ClearPageReserved(virt_to_page(start)); 109 init_page_count(virt_to_page(start)); 110 free_page(start); 111 totalram_pages++; 112 } 113 } 114 #endif 115 116 #ifdef CONFIG_PROC_KCORE 117 static struct kcore_list kcore_vmem; 118 119 static int __init setup_kcore(void) 120 { 121 int i; 122 123 for (i=0; i < lmb.memory.cnt; i++) { 124 unsigned long base, size; 125 struct kcore_list *kcore_mem; 126 127 base = lmb.memory.region[i].base; 128 size = lmb.memory.region[i].size; 129 130 /* GFP_ATOMIC to avoid might_sleep warnings during boot */ 131 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC); 132 if (!kcore_mem) 133 panic("%s: kmalloc failed\n", __FUNCTION__); 134 135 kclist_add(kcore_mem, __va(base), size); 136 } 137 138 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); 139 140 return 0; 141 } 142 module_init(setup_kcore); 143 #endif 144 145 static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags) 146 { 147 memset(addr, 0, kmem_cache_size(cache)); 148 } 149 150 static const unsigned int pgtable_cache_size[2] = { 151 PGD_TABLE_SIZE, PMD_TABLE_SIZE 152 }; 153 static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { 154 #ifdef CONFIG_PPC_64K_PAGES 155 "pgd_cache", "pmd_cache", 156 #else 157 "pgd_cache", "pud_pmd_cache", 158 #endif /* CONFIG_PPC_64K_PAGES */ 159 }; 160 161 #ifdef CONFIG_HUGETLB_PAGE 162 /* Hugepages need one extra cache, initialized in hugetlbpage.c. We 163 * can't put into the tables above, because HPAGE_SHIFT is not compile 164 * time constant. */ 165 struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1]; 166 #else 167 struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; 168 #endif 169 170 void pgtable_cache_init(void) 171 { 172 int i; 173 174 for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) { 175 int size = pgtable_cache_size[i]; 176 const char *name = pgtable_cache_name[i]; 177 178 DBG("Allocating page table cache %s (#%d) " 179 "for size: %08x...\n", name, i, size); 180 pgtable_cache[i] = kmem_cache_create(name, 181 size, size, 182 SLAB_PANIC, 183 zero_ctor); 184 } 185 } 186 187 #ifdef CONFIG_SPARSEMEM_VMEMMAP 188 /* 189 * Given an address within the vmemmap, determine the pfn of the page that 190 * represents the start of the section it is within. Note that we have to 191 * do this by hand as the proffered address may not be correctly aligned. 192 * Subtraction of non-aligned pointers produces undefined results. 193 */ 194 unsigned long __meminit vmemmap_section_start(unsigned long page) 195 { 196 unsigned long offset = page - ((unsigned long)(vmemmap)); 197 198 /* Return the pfn of the start of the section. */ 199 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK; 200 } 201 202 /* 203 * Check if this vmemmap page is already initialised. If any section 204 * which overlaps this vmemmap page is initialised then this page is 205 * initialised already. 206 */ 207 int __meminit vmemmap_populated(unsigned long start, int page_size) 208 { 209 unsigned long end = start + page_size; 210 211 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) 212 if (pfn_valid(vmemmap_section_start(start))) 213 return 1; 214 215 return 0; 216 } 217 218 int __meminit vmemmap_populate(struct page *start_page, 219 unsigned long nr_pages, int node) 220 { 221 unsigned long mode_rw; 222 unsigned long start = (unsigned long)start_page; 223 unsigned long end = (unsigned long)(start_page + nr_pages); 224 unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift; 225 226 mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX; 227 228 /* Align to the page size of the linear mapping. */ 229 start = _ALIGN_DOWN(start, page_size); 230 231 for (; start < end; start += page_size) { 232 int mapped; 233 void *p; 234 235 if (vmemmap_populated(start, page_size)) 236 continue; 237 238 p = vmemmap_alloc_block(page_size, node); 239 if (!p) 240 return -ENOMEM; 241 242 printk(KERN_WARNING "vmemmap %08lx allocated at %p, " 243 "physical %p.\n", start, p, __pa(p)); 244 245 mapped = htab_bolt_mapping(start, start + page_size, 246 __pa(p), mode_rw, mmu_linear_psize); 247 BUG_ON(mapped < 0); 248 } 249 250 return 0; 251 } 252 #endif 253