1 /* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 6 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 7 * Copyright (C) 1996 Paul Mackerras 8 * 9 * Derived from "arch/i386/mm/init.c" 10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 11 * 12 * Dave Engebretsen <engebret@us.ibm.com> 13 * Rework for PPC64 port. 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 */ 21 22 #undef DEBUG 23 24 #include <linux/signal.h> 25 #include <linux/sched.h> 26 #include <linux/kernel.h> 27 #include <linux/errno.h> 28 #include <linux/string.h> 29 #include <linux/types.h> 30 #include <linux/mman.h> 31 #include <linux/mm.h> 32 #include <linux/swap.h> 33 #include <linux/stddef.h> 34 #include <linux/vmalloc.h> 35 #include <linux/init.h> 36 #include <linux/delay.h> 37 #include <linux/bootmem.h> 38 #include <linux/highmem.h> 39 #include <linux/idr.h> 40 #include <linux/nodemask.h> 41 #include <linux/module.h> 42 #include <linux/poison.h> 43 #include <linux/memblock.h> 44 #include <linux/hugetlb.h> 45 #include <linux/slab.h> 46 47 #include <asm/pgalloc.h> 48 #include <asm/page.h> 49 #include <asm/prom.h> 50 #include <asm/rtas.h> 51 #include <asm/io.h> 52 #include <asm/mmu_context.h> 53 #include <asm/pgtable.h> 54 #include <asm/mmu.h> 55 #include <asm/uaccess.h> 56 #include <asm/smp.h> 57 #include <asm/machdep.h> 58 #include <asm/tlb.h> 59 #include <asm/eeh.h> 60 #include <asm/processor.h> 61 #include <asm/mmzone.h> 62 #include <asm/cputable.h> 63 #include <asm/sections.h> 64 #include <asm/system.h> 65 #include <asm/iommu.h> 66 #include <asm/abs_addr.h> 67 #include <asm/vdso.h> 68 69 #include "mmu_decl.h" 70 71 #ifdef CONFIG_PPC_STD_MMU_64 72 #if PGTABLE_RANGE > USER_VSID_RANGE 73 #warning Limited user VSID range means pagetable space is wasted 74 #endif 75 76 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) 77 #warning TASK_SIZE is smaller than it needs to be. 78 #endif 79 #endif /* CONFIG_PPC_STD_MMU_64 */ 80 81 phys_addr_t memstart_addr = ~0; 82 EXPORT_SYMBOL_GPL(memstart_addr); 83 phys_addr_t kernstart_addr; 84 EXPORT_SYMBOL_GPL(kernstart_addr); 85 86 static void pgd_ctor(void *addr) 87 { 88 memset(addr, 0, PGD_TABLE_SIZE); 89 } 90 91 static void pmd_ctor(void *addr) 92 { 93 memset(addr, 0, PMD_TABLE_SIZE); 94 } 95 96 struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; 97 98 /* 99 * Create a kmem_cache() for pagetables. This is not used for PTE 100 * pages - they're linked to struct page, come from the normal free 101 * pages pool and have a different entry size (see real_pte_t) to 102 * everything else. Caches created by this function are used for all 103 * the higher level pagetables, and for hugepage pagetables. 104 */ 105 void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) 106 { 107 char *name; 108 unsigned long table_size = sizeof(void *) << shift; 109 unsigned long align = table_size; 110 111 /* When batching pgtable pointers for RCU freeing, we store 112 * the index size in the low bits. Table alignment must be 113 * big enough to fit it. 114 * 115 * Likewise, hugeapge pagetable pointers contain a (different) 116 * shift value in the low bits. All tables must be aligned so 117 * as to leave enough 0 bits in the address to contain it. */ 118 unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1, 119 HUGEPD_SHIFT_MASK + 1); 120 struct kmem_cache *new; 121 122 /* It would be nice if this was a BUILD_BUG_ON(), but at the 123 * moment, gcc doesn't seem to recognize is_power_of_2 as a 124 * constant expression, so so much for that. */ 125 BUG_ON(!is_power_of_2(minalign)); 126 BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE)); 127 128 if (PGT_CACHE(shift)) 129 return; /* Already have a cache of this size */ 130 131 align = max_t(unsigned long, align, minalign); 132 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); 133 new = kmem_cache_create(name, table_size, align, 0, ctor); 134 PGT_CACHE(shift) = new; 135 136 pr_debug("Allocated pgtable cache for order %d\n", shift); 137 } 138 139 140 void pgtable_cache_init(void) 141 { 142 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); 143 pgtable_cache_add(PMD_INDEX_SIZE, pmd_ctor); 144 if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_INDEX_SIZE)) 145 panic("Couldn't allocate pgtable caches"); 146 147 /* In all current configs, when the PUD index exists it's the 148 * same size as either the pgd or pmd index. Verify that the 149 * initialization above has also created a PUD cache. This 150 * will need re-examiniation if we add new possibilities for 151 * the pagetable layout. */ 152 BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)); 153 } 154 155 #ifdef CONFIG_SPARSEMEM_VMEMMAP 156 /* 157 * Given an address within the vmemmap, determine the pfn of the page that 158 * represents the start of the section it is within. Note that we have to 159 * do this by hand as the proffered address may not be correctly aligned. 160 * Subtraction of non-aligned pointers produces undefined results. 161 */ 162 static unsigned long __meminit vmemmap_section_start(unsigned long page) 163 { 164 unsigned long offset = page - ((unsigned long)(vmemmap)); 165 166 /* Return the pfn of the start of the section. */ 167 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK; 168 } 169 170 /* 171 * Check if this vmemmap page is already initialised. If any section 172 * which overlaps this vmemmap page is initialised then this page is 173 * initialised already. 174 */ 175 static int __meminit vmemmap_populated(unsigned long start, int page_size) 176 { 177 unsigned long end = start + page_size; 178 179 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) 180 if (pfn_valid(vmemmap_section_start(start))) 181 return 1; 182 183 return 0; 184 } 185 186 /* On hash-based CPUs, the vmemmap is bolted in the hash table. 187 * 188 * On Book3E CPUs, the vmemmap is currently mapped in the top half of 189 * the vmalloc space using normal page tables, though the size of 190 * pages encoded in the PTEs can be different 191 */ 192 193 #ifdef CONFIG_PPC_BOOK3E 194 static void __meminit vmemmap_create_mapping(unsigned long start, 195 unsigned long page_size, 196 unsigned long phys) 197 { 198 /* Create a PTE encoding without page size */ 199 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED | 200 _PAGE_KERNEL_RW; 201 202 /* PTEs only contain page size encodings up to 32M */ 203 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf); 204 205 /* Encode the size in the PTE */ 206 flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8; 207 208 /* For each PTE for that area, map things. Note that we don't 209 * increment phys because all PTEs are of the large size and 210 * thus must have the low bits clear 211 */ 212 for (i = 0; i < page_size; i += PAGE_SIZE) 213 BUG_ON(map_kernel_page(start + i, phys, flags)); 214 } 215 #else /* CONFIG_PPC_BOOK3E */ 216 static void __meminit vmemmap_create_mapping(unsigned long start, 217 unsigned long page_size, 218 unsigned long phys) 219 { 220 int mapped = htab_bolt_mapping(start, start + page_size, phys, 221 PAGE_KERNEL, mmu_vmemmap_psize, 222 mmu_kernel_ssize); 223 BUG_ON(mapped < 0); 224 } 225 #endif /* CONFIG_PPC_BOOK3E */ 226 227 struct vmemmap_backing *vmemmap_list; 228 229 static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node) 230 { 231 static struct vmemmap_backing *next; 232 static int num_left; 233 234 /* allocate a page when required and hand out chunks */ 235 if (!next || !num_left) { 236 next = vmemmap_alloc_block(PAGE_SIZE, node); 237 if (unlikely(!next)) { 238 WARN_ON(1); 239 return NULL; 240 } 241 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing); 242 } 243 244 num_left--; 245 246 return next++; 247 } 248 249 static __meminit void vmemmap_list_populate(unsigned long phys, 250 unsigned long start, 251 int node) 252 { 253 struct vmemmap_backing *vmem_back; 254 255 vmem_back = vmemmap_list_alloc(node); 256 if (unlikely(!vmem_back)) { 257 WARN_ON(1); 258 return; 259 } 260 261 vmem_back->phys = phys; 262 vmem_back->virt_addr = start; 263 vmem_back->list = vmemmap_list; 264 265 vmemmap_list = vmem_back; 266 } 267 268 int __meminit vmemmap_populate(struct page *start_page, 269 unsigned long nr_pages, int node) 270 { 271 unsigned long start = (unsigned long)start_page; 272 unsigned long end = (unsigned long)(start_page + nr_pages); 273 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; 274 275 /* Align to the page size of the linear mapping. */ 276 start = _ALIGN_DOWN(start, page_size); 277 278 pr_debug("vmemmap_populate page %p, %ld pages, node %d\n", 279 start_page, nr_pages, node); 280 pr_debug(" -> map %lx..%lx\n", start, end); 281 282 for (; start < end; start += page_size) { 283 void *p; 284 285 if (vmemmap_populated(start, page_size)) 286 continue; 287 288 p = vmemmap_alloc_block(page_size, node); 289 if (!p) 290 return -ENOMEM; 291 292 vmemmap_list_populate(__pa(p), start, node); 293 294 pr_debug(" * %016lx..%016lx allocated at %p\n", 295 start, start + page_size, p); 296 297 vmemmap_create_mapping(start, page_size, __pa(p)); 298 } 299 300 return 0; 301 } 302 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 303 304