1 /* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 6 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 7 * Copyright (C) 1996 Paul Mackerras 8 * 9 * Derived from "arch/i386/mm/init.c" 10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 11 * 12 * Dave Engebretsen <engebret@us.ibm.com> 13 * Rework for PPC64 port. 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 * 20 */ 21 22 #undef DEBUG 23 24 #include <linux/signal.h> 25 #include <linux/sched.h> 26 #include <linux/kernel.h> 27 #include <linux/errno.h> 28 #include <linux/string.h> 29 #include <linux/types.h> 30 #include <linux/mman.h> 31 #include <linux/mm.h> 32 #include <linux/swap.h> 33 #include <linux/stddef.h> 34 #include <linux/vmalloc.h> 35 #include <linux/init.h> 36 #include <linux/delay.h> 37 #include <linux/bootmem.h> 38 #include <linux/highmem.h> 39 #include <linux/idr.h> 40 #include <linux/nodemask.h> 41 #include <linux/module.h> 42 #include <linux/poison.h> 43 #include <linux/lmb.h> 44 45 #include <asm/pgalloc.h> 46 #include <asm/page.h> 47 #include <asm/prom.h> 48 #include <asm/rtas.h> 49 #include <asm/io.h> 50 #include <asm/mmu_context.h> 51 #include <asm/pgtable.h> 52 #include <asm/mmu.h> 53 #include <asm/uaccess.h> 54 #include <asm/smp.h> 55 #include <asm/machdep.h> 56 #include <asm/tlb.h> 57 #include <asm/eeh.h> 58 #include <asm/processor.h> 59 #include <asm/mmzone.h> 60 #include <asm/cputable.h> 61 #include <asm/sections.h> 62 #include <asm/system.h> 63 #include <asm/iommu.h> 64 #include <asm/abs_addr.h> 65 #include <asm/vdso.h> 66 67 #include "mmu_decl.h" 68 69 #ifdef CONFIG_PPC_STD_MMU_64 70 #if PGTABLE_RANGE > USER_VSID_RANGE 71 #warning Limited user VSID range means pagetable space is wasted 72 #endif 73 74 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) 75 #warning TASK_SIZE is smaller than it needs to be. 76 #endif 77 #endif /* CONFIG_PPC_STD_MMU_64 */ 78 79 phys_addr_t memstart_addr = ~0; 80 phys_addr_t kernstart_addr; 81 82 void free_initmem(void) 83 { 84 unsigned long addr; 85 86 addr = (unsigned long)__init_begin; 87 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { 88 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 89 ClearPageReserved(virt_to_page(addr)); 90 init_page_count(virt_to_page(addr)); 91 free_page(addr); 92 totalram_pages++; 93 } 94 printk ("Freeing unused kernel memory: %luk freed\n", 95 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10); 96 } 97 98 #ifdef CONFIG_BLK_DEV_INITRD 99 void free_initrd_mem(unsigned long start, unsigned long end) 100 { 101 if (start < end) 102 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 103 for (; start < end; start += PAGE_SIZE) { 104 ClearPageReserved(virt_to_page(start)); 105 init_page_count(virt_to_page(start)); 106 free_page(start); 107 totalram_pages++; 108 } 109 } 110 #endif 111 112 static void pgd_ctor(void *addr) 113 { 114 memset(addr, 0, PGD_TABLE_SIZE); 115 } 116 117 static void pmd_ctor(void *addr) 118 { 119 memset(addr, 0, PMD_TABLE_SIZE); 120 } 121 122 static const unsigned int pgtable_cache_size[2] = { 123 PGD_TABLE_SIZE, PMD_TABLE_SIZE 124 }; 125 static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { 126 #ifdef CONFIG_PPC_64K_PAGES 127 "pgd_cache", "pmd_cache", 128 #else 129 "pgd_cache", "pud_pmd_cache", 130 #endif /* CONFIG_PPC_64K_PAGES */ 131 }; 132 133 #ifdef CONFIG_HUGETLB_PAGE 134 /* Hugepages need an extra cache per hugepagesize, initialized in 135 * hugetlbpage.c. We can't put into the tables above, because HPAGE_SHIFT 136 * is not compile time constant. */ 137 struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+MMU_PAGE_COUNT]; 138 #else 139 struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; 140 #endif 141 142 void pgtable_cache_init(void) 143 { 144 pgtable_cache[0] = kmem_cache_create(pgtable_cache_name[0], PGD_TABLE_SIZE, PGD_TABLE_SIZE, SLAB_PANIC, pgd_ctor); 145 pgtable_cache[1] = kmem_cache_create(pgtable_cache_name[1], PMD_TABLE_SIZE, PMD_TABLE_SIZE, SLAB_PANIC, pmd_ctor); 146 } 147 148 #ifdef CONFIG_SPARSEMEM_VMEMMAP 149 /* 150 * Given an address within the vmemmap, determine the pfn of the page that 151 * represents the start of the section it is within. Note that we have to 152 * do this by hand as the proffered address may not be correctly aligned. 153 * Subtraction of non-aligned pointers produces undefined results. 154 */ 155 static unsigned long __meminit vmemmap_section_start(unsigned long page) 156 { 157 unsigned long offset = page - ((unsigned long)(vmemmap)); 158 159 /* Return the pfn of the start of the section. */ 160 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK; 161 } 162 163 /* 164 * Check if this vmemmap page is already initialised. If any section 165 * which overlaps this vmemmap page is initialised then this page is 166 * initialised already. 167 */ 168 static int __meminit vmemmap_populated(unsigned long start, int page_size) 169 { 170 unsigned long end = start + page_size; 171 172 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) 173 if (pfn_valid(vmemmap_section_start(start))) 174 return 1; 175 176 return 0; 177 } 178 179 /* On hash-based CPUs, the vmemmap is bolted in the hash table. 180 * 181 * On Book3E CPUs, the vmemmap is currently mapped in the top half of 182 * the vmalloc space using normal page tables, though the size of 183 * pages encoded in the PTEs can be different 184 */ 185 186 #ifdef CONFIG_PPC_BOOK3E 187 static void __meminit vmemmap_create_mapping(unsigned long start, 188 unsigned long page_size, 189 unsigned long phys) 190 { 191 /* Create a PTE encoding without page size */ 192 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED | 193 _PAGE_KERNEL_RW; 194 195 /* PTEs only contain page size encodings up to 32M */ 196 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf); 197 198 /* Encode the size in the PTE */ 199 flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8; 200 201 /* For each PTE for that area, map things. Note that we don't 202 * increment phys because all PTEs are of the large size and 203 * thus must have the low bits clear 204 */ 205 for (i = 0; i < page_size; i += PAGE_SIZE) 206 BUG_ON(map_kernel_page(start + i, phys, flags)); 207 } 208 #else /* CONFIG_PPC_BOOK3E */ 209 static void __meminit vmemmap_create_mapping(unsigned long start, 210 unsigned long page_size, 211 unsigned long phys) 212 { 213 int mapped = htab_bolt_mapping(start, start + page_size, phys, 214 PAGE_KERNEL, mmu_vmemmap_psize, 215 mmu_kernel_ssize); 216 BUG_ON(mapped < 0); 217 } 218 #endif /* CONFIG_PPC_BOOK3E */ 219 220 int __meminit vmemmap_populate(struct page *start_page, 221 unsigned long nr_pages, int node) 222 { 223 unsigned long start = (unsigned long)start_page; 224 unsigned long end = (unsigned long)(start_page + nr_pages); 225 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; 226 227 /* Align to the page size of the linear mapping. */ 228 start = _ALIGN_DOWN(start, page_size); 229 230 pr_debug("vmemmap_populate page %p, %ld pages, node %d\n", 231 start_page, nr_pages, node); 232 pr_debug(" -> map %lx..%lx\n", start, end); 233 234 for (; start < end; start += page_size) { 235 void *p; 236 237 if (vmemmap_populated(start, page_size)) 238 continue; 239 240 p = vmemmap_alloc_block(page_size, node); 241 if (!p) 242 return -ENOMEM; 243 244 pr_debug(" * %016lx..%016lx allocated at %p\n", 245 start, start + page_size, p); 246 247 vmemmap_create_mapping(start, page_size, __pa(p)); 248 } 249 250 return 0; 251 } 252 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 253