1 /* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 6 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 7 * Copyright (C) 1996 Paul Mackerras 8 * 9 * Derived from "arch/i386/mm/init.c" 10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 11 * 12 * Dave Engebretsen <engebret@us.ibm.com> 13 * Rework for PPC64 port. 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 */ 21 22 #undef DEBUG 23 24 #include <linux/signal.h> 25 #include <linux/sched.h> 26 #include <linux/kernel.h> 27 #include <linux/errno.h> 28 #include <linux/string.h> 29 #include <linux/types.h> 30 #include <linux/mman.h> 31 #include <linux/mm.h> 32 #include <linux/swap.h> 33 #include <linux/stddef.h> 34 #include <linux/vmalloc.h> 35 #include <linux/init.h> 36 #include <linux/delay.h> 37 #include <linux/bootmem.h> 38 #include <linux/highmem.h> 39 #include <linux/idr.h> 40 #include <linux/nodemask.h> 41 #include <linux/module.h> 42 #include <linux/poison.h> 43 #include <linux/lmb.h> 44 45 #include <asm/pgalloc.h> 46 #include <asm/page.h> 47 #include <asm/prom.h> 48 #include <asm/rtas.h> 49 #include <asm/io.h> 50 #include <asm/mmu_context.h> 51 #include <asm/pgtable.h> 52 #include <asm/mmu.h> 53 #include <asm/uaccess.h> 54 #include <asm/smp.h> 55 #include <asm/machdep.h> 56 #include <asm/tlb.h> 57 #include <asm/eeh.h> 58 #include <asm/processor.h> 59 #include <asm/mmzone.h> 60 #include <asm/cputable.h> 61 #include <asm/sections.h> 62 #include <asm/system.h> 63 #include <asm/iommu.h> 64 #include <asm/abs_addr.h> 65 #include <asm/vdso.h> 66 67 #include "mmu_decl.h" 68 69 #ifdef CONFIG_PPC_STD_MMU_64 70 #if PGTABLE_RANGE > USER_VSID_RANGE 71 #warning Limited user VSID range means pagetable space is wasted 72 #endif 73 74 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) 75 #warning TASK_SIZE is smaller than it needs to be. 76 #endif 77 #endif /* CONFIG_PPC_STD_MMU_64 */ 78 79 phys_addr_t memstart_addr = ~0; 80 phys_addr_t kernstart_addr; 81 82 void free_initmem(void) 83 { 84 unsigned long addr; 85 86 addr = (unsigned long)__init_begin; 87 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { 88 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 89 ClearPageReserved(virt_to_page(addr)); 90 init_page_count(virt_to_page(addr)); 91 free_page(addr); 92 totalram_pages++; 93 } 94 printk ("Freeing unused kernel memory: %luk freed\n", 95 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10); 96 } 97 98 #ifdef CONFIG_BLK_DEV_INITRD 99 void free_initrd_mem(unsigned long start, unsigned long end) 100 { 101 if (start < end) 102 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 103 for (; start < end; start += PAGE_SIZE) { 104 ClearPageReserved(virt_to_page(start)); 105 init_page_count(virt_to_page(start)); 106 free_page(start); 107 totalram_pages++; 108 } 109 } 110 #endif 111 112 #ifdef CONFIG_PROC_KCORE 113 static struct kcore_list kcore_vmem; 114 115 static int __init setup_kcore(void) 116 { 117 int i; 118 119 for (i=0; i < lmb.memory.cnt; i++) { 120 unsigned long base, size; 121 struct kcore_list *kcore_mem; 122 123 base = lmb.memory.region[i].base; 124 size = lmb.memory.region[i].size; 125 126 /* GFP_ATOMIC to avoid might_sleep warnings during boot */ 127 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC); 128 if (!kcore_mem) 129 panic("%s: kmalloc failed\n", __func__); 130 131 kclist_add(kcore_mem, __va(base), size); 132 } 133 134 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); 135 136 return 0; 137 } 138 module_init(setup_kcore); 139 #endif 140 141 static void pgd_ctor(void *addr) 142 { 143 memset(addr, 0, PGD_TABLE_SIZE); 144 } 145 146 static void pmd_ctor(void *addr) 147 { 148 memset(addr, 0, PMD_TABLE_SIZE); 149 } 150 151 static const unsigned int pgtable_cache_size[2] = { 152 PGD_TABLE_SIZE, PMD_TABLE_SIZE 153 }; 154 static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { 155 #ifdef CONFIG_PPC_64K_PAGES 156 "pgd_cache", "pmd_cache", 157 #else 158 "pgd_cache", "pud_pmd_cache", 159 #endif /* CONFIG_PPC_64K_PAGES */ 160 }; 161 162 #ifdef CONFIG_HUGETLB_PAGE 163 /* Hugepages need an extra cache per hugepagesize, initialized in 164 * hugetlbpage.c. We can't put into the tables above, because HPAGE_SHIFT 165 * is not compile time constant. */ 166 struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+MMU_PAGE_COUNT]; 167 #else 168 struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; 169 #endif 170 171 void pgtable_cache_init(void) 172 { 173 pgtable_cache[0] = kmem_cache_create(pgtable_cache_name[0], PGD_TABLE_SIZE, PGD_TABLE_SIZE, SLAB_PANIC, pgd_ctor); 174 pgtable_cache[1] = kmem_cache_create(pgtable_cache_name[1], PMD_TABLE_SIZE, PMD_TABLE_SIZE, SLAB_PANIC, pmd_ctor); 175 } 176 177 #ifdef CONFIG_SPARSEMEM_VMEMMAP 178 /* 179 * Given an address within the vmemmap, determine the pfn of the page that 180 * represents the start of the section it is within. Note that we have to 181 * do this by hand as the proffered address may not be correctly aligned. 182 * Subtraction of non-aligned pointers produces undefined results. 183 */ 184 static unsigned long __meminit vmemmap_section_start(unsigned long page) 185 { 186 unsigned long offset = page - ((unsigned long)(vmemmap)); 187 188 /* Return the pfn of the start of the section. */ 189 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK; 190 } 191 192 /* 193 * Check if this vmemmap page is already initialised. If any section 194 * which overlaps this vmemmap page is initialised then this page is 195 * initialised already. 196 */ 197 static int __meminit vmemmap_populated(unsigned long start, int page_size) 198 { 199 unsigned long end = start + page_size; 200 201 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) 202 if (pfn_valid(vmemmap_section_start(start))) 203 return 1; 204 205 return 0; 206 } 207 208 /* On hash-based CPUs, the vmemmap is bolted in the hash table. 209 * 210 * On Book3E CPUs, the vmemmap is currently mapped in the top half of 211 * the vmalloc space using normal page tables, though the size of 212 * pages encoded in the PTEs can be different 213 */ 214 215 #ifdef CONFIG_PPC_BOOK3E 216 static void __meminit vmemmap_create_mapping(unsigned long start, 217 unsigned long page_size, 218 unsigned long phys) 219 { 220 /* Create a PTE encoding without page size */ 221 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED | 222 _PAGE_KERNEL_RW; 223 224 /* PTEs only contain page size encodings up to 32M */ 225 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf); 226 227 /* Encode the size in the PTE */ 228 flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8; 229 230 /* For each PTE for that area, map things. Note that we don't 231 * increment phys because all PTEs are of the large size and 232 * thus must have the low bits clear 233 */ 234 for (i = 0; i < page_size; i += PAGE_SIZE) 235 BUG_ON(map_kernel_page(start + i, phys, flags)); 236 } 237 #else /* CONFIG_PPC_BOOK3E */ 238 static void __meminit vmemmap_create_mapping(unsigned long start, 239 unsigned long page_size, 240 unsigned long phys) 241 { 242 int mapped = htab_bolt_mapping(start, start + page_size, phys, 243 PAGE_KERNEL, mmu_vmemmap_psize, 244 mmu_kernel_ssize); 245 BUG_ON(mapped < 0); 246 } 247 #endif /* CONFIG_PPC_BOOK3E */ 248 249 int __meminit vmemmap_populate(struct page *start_page, 250 unsigned long nr_pages, int node) 251 { 252 unsigned long start = (unsigned long)start_page; 253 unsigned long end = (unsigned long)(start_page + nr_pages); 254 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; 255 256 /* Align to the page size of the linear mapping. */ 257 start = _ALIGN_DOWN(start, page_size); 258 259 pr_debug("vmemmap_populate page %p, %ld pages, node %d\n", 260 start_page, nr_pages, node); 261 pr_debug(" -> map %lx..%lx\n", start, end); 262 263 for (; start < end; start += page_size) { 264 void *p; 265 266 if (vmemmap_populated(start, page_size)) 267 continue; 268 269 p = vmemmap_alloc_block(page_size, node); 270 if (!p) 271 return -ENOMEM; 272 273 pr_debug(" * %016lx..%016lx allocated at %p\n", 274 start, start + page_size, p); 275 276 vmemmap_create_mapping(start, page_size, __pa(p)); 277 } 278 279 return 0; 280 } 281 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 282