170d64ceaSPaul Mackerras /* 270d64ceaSPaul Mackerras * PowerPC version 370d64ceaSPaul Mackerras * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 470d64ceaSPaul Mackerras * 570d64ceaSPaul Mackerras * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 670d64ceaSPaul Mackerras * and Cort Dougan (PReP) (cort@cs.nmt.edu) 770d64ceaSPaul Mackerras * Copyright (C) 1996 Paul Mackerras 870d64ceaSPaul Mackerras * 970d64ceaSPaul Mackerras * Derived from "arch/i386/mm/init.c" 1070d64ceaSPaul Mackerras * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 1170d64ceaSPaul Mackerras * 1270d64ceaSPaul Mackerras * Dave Engebretsen <engebret@us.ibm.com> 1370d64ceaSPaul Mackerras * Rework for PPC64 port. 1470d64ceaSPaul Mackerras * 1570d64ceaSPaul Mackerras * This program is free software; you can redistribute it and/or 1670d64ceaSPaul Mackerras * modify it under the terms of the GNU General Public License 1770d64ceaSPaul Mackerras * as published by the Free Software Foundation; either version 1870d64ceaSPaul Mackerras * 2 of the License, or (at your option) any later version. 1970d64ceaSPaul Mackerras * 2070d64ceaSPaul Mackerras */ 2170d64ceaSPaul Mackerras 22cec08e7aSBenjamin Herrenschmidt #undef DEBUG 23cec08e7aSBenjamin Herrenschmidt 2470d64ceaSPaul Mackerras #include <linux/signal.h> 2570d64ceaSPaul Mackerras #include <linux/sched.h> 2670d64ceaSPaul Mackerras #include <linux/kernel.h> 2770d64ceaSPaul Mackerras #include <linux/errno.h> 2870d64ceaSPaul Mackerras #include <linux/string.h> 2970d64ceaSPaul Mackerras #include <linux/types.h> 3070d64ceaSPaul Mackerras #include <linux/mman.h> 3170d64ceaSPaul Mackerras #include <linux/mm.h> 3270d64ceaSPaul Mackerras #include <linux/swap.h> 3370d64ceaSPaul Mackerras #include <linux/stddef.h> 3470d64ceaSPaul Mackerras #include <linux/vmalloc.h> 3570d64ceaSPaul Mackerras #include <linux/init.h> 3670d64ceaSPaul Mackerras #include <linux/delay.h> 3770d64ceaSPaul Mackerras #include <linux/bootmem.h> 3870d64ceaSPaul Mackerras #include <linux/highmem.h> 3970d64ceaSPaul Mackerras #include <linux/idr.h> 4070d64ceaSPaul Mackerras #include <linux/nodemask.h> 4170d64ceaSPaul Mackerras #include <linux/module.h> 42c9cf5528SRandy Dunlap #include <linux/poison.h> 4395f72d1eSYinghai Lu #include <linux/memblock.h> 44a4fe3ce7SDavid Gibson #include <linux/hugetlb.h> 455a0e3ad6STejun Heo #include <linux/slab.h> 4670d64ceaSPaul Mackerras 4770d64ceaSPaul Mackerras #include <asm/pgalloc.h> 4870d64ceaSPaul Mackerras #include <asm/page.h> 4970d64ceaSPaul Mackerras #include <asm/prom.h> 5070d64ceaSPaul Mackerras #include <asm/rtas.h> 5170d64ceaSPaul Mackerras #include <asm/io.h> 5270d64ceaSPaul Mackerras #include <asm/mmu_context.h> 5370d64ceaSPaul Mackerras #include <asm/pgtable.h> 5470d64ceaSPaul Mackerras #include <asm/mmu.h> 5570d64ceaSPaul Mackerras #include <asm/uaccess.h> 5670d64ceaSPaul Mackerras #include <asm/smp.h> 5770d64ceaSPaul Mackerras #include <asm/machdep.h> 5870d64ceaSPaul Mackerras #include <asm/tlb.h> 5970d64ceaSPaul Mackerras #include <asm/eeh.h> 6070d64ceaSPaul Mackerras #include <asm/processor.h> 6170d64ceaSPaul Mackerras #include <asm/mmzone.h> 6270d64ceaSPaul Mackerras #include <asm/cputable.h> 6370d64ceaSPaul Mackerras #include <asm/sections.h> 6470d64ceaSPaul Mackerras #include <asm/iommu.h> 6570d64ceaSPaul Mackerras #include <asm/vdso.h> 66800fc3eeSDavid Gibson 67800fc3eeSDavid Gibson #include "mmu_decl.h" 6870d64ceaSPaul Mackerras 6994491685SBenjamin Herrenschmidt #ifdef CONFIG_PPC_STD_MMU_64 7070d64ceaSPaul Mackerras #if PGTABLE_RANGE > USER_VSID_RANGE 7170d64ceaSPaul Mackerras #warning Limited user VSID range means pagetable space is wasted 7270d64ceaSPaul Mackerras #endif 7370d64ceaSPaul Mackerras 7470d64ceaSPaul Mackerras #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) 7570d64ceaSPaul Mackerras #warning TASK_SIZE is smaller than it needs to be. 7670d64ceaSPaul Mackerras #endif 7794491685SBenjamin Herrenschmidt #endif /* CONFIG_PPC_STD_MMU_64 */ 7870d64ceaSPaul Mackerras 7937dd2badSKumar Gala phys_addr_t memstart_addr = ~0; 8079c3095fSSonny Rao EXPORT_SYMBOL_GPL(memstart_addr); 8137dd2badSKumar Gala phys_addr_t kernstart_addr; 8279c3095fSSonny Rao EXPORT_SYMBOL_GPL(kernstart_addr); 83d7917ba7SKumar Gala 8451cc5068SAlexey Dobriyan static void pgd_ctor(void *addr) 8570d64ceaSPaul Mackerras { 8651cc5068SAlexey Dobriyan memset(addr, 0, PGD_TABLE_SIZE); 8751cc5068SAlexey Dobriyan } 8851cc5068SAlexey Dobriyan 8951cc5068SAlexey Dobriyan static void pmd_ctor(void *addr) 9051cc5068SAlexey Dobriyan { 91f940f528SAneesh Kumar K.V #ifdef CONFIG_TRANSPARENT_HUGEPAGE 92f940f528SAneesh Kumar K.V memset(addr, 0, PMD_TABLE_SIZE * 2); 93f940f528SAneesh Kumar K.V #else 9451cc5068SAlexey Dobriyan memset(addr, 0, PMD_TABLE_SIZE); 95f940f528SAneesh Kumar K.V #endif 9670d64ceaSPaul Mackerras } 9770d64ceaSPaul Mackerras 98a0668cdcSDavid Gibson struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; 9970d64ceaSPaul Mackerras 100a0668cdcSDavid Gibson /* 101a0668cdcSDavid Gibson * Create a kmem_cache() for pagetables. This is not used for PTE 102a0668cdcSDavid Gibson * pages - they're linked to struct page, come from the normal free 103a0668cdcSDavid Gibson * pages pool and have a different entry size (see real_pte_t) to 104a0668cdcSDavid Gibson * everything else. Caches created by this function are used for all 105a0668cdcSDavid Gibson * the higher level pagetables, and for hugepage pagetables. 106a0668cdcSDavid Gibson */ 107a0668cdcSDavid Gibson void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) 108a0668cdcSDavid Gibson { 109a0668cdcSDavid Gibson char *name; 110a0668cdcSDavid Gibson unsigned long table_size = sizeof(void *) << shift; 111a0668cdcSDavid Gibson unsigned long align = table_size; 112a0668cdcSDavid Gibson 113a0668cdcSDavid Gibson /* When batching pgtable pointers for RCU freeing, we store 114a0668cdcSDavid Gibson * the index size in the low bits. Table alignment must be 115a4fe3ce7SDavid Gibson * big enough to fit it. 116a4fe3ce7SDavid Gibson * 117a4fe3ce7SDavid Gibson * Likewise, hugeapge pagetable pointers contain a (different) 118a4fe3ce7SDavid Gibson * shift value in the low bits. All tables must be aligned so 119a4fe3ce7SDavid Gibson * as to leave enough 0 bits in the address to contain it. */ 120a4fe3ce7SDavid Gibson unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1, 121a4fe3ce7SDavid Gibson HUGEPD_SHIFT_MASK + 1); 122a0668cdcSDavid Gibson struct kmem_cache *new; 123a0668cdcSDavid Gibson 124a0668cdcSDavid Gibson /* It would be nice if this was a BUILD_BUG_ON(), but at the 125a0668cdcSDavid Gibson * moment, gcc doesn't seem to recognize is_power_of_2 as a 126a0668cdcSDavid Gibson * constant expression, so so much for that. */ 127a0668cdcSDavid Gibson BUG_ON(!is_power_of_2(minalign)); 128a0668cdcSDavid Gibson BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE)); 129a0668cdcSDavid Gibson 130a0668cdcSDavid Gibson if (PGT_CACHE(shift)) 131a0668cdcSDavid Gibson return; /* Already have a cache of this size */ 132a0668cdcSDavid Gibson 133a0668cdcSDavid Gibson align = max_t(unsigned long, align, minalign); 134a0668cdcSDavid Gibson name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); 135a0668cdcSDavid Gibson new = kmem_cache_create(name, table_size, align, 0, ctor); 136cf9427b8SAneesh Kumar K.V pgtable_cache[shift - 1] = new; 137a0668cdcSDavid Gibson pr_debug("Allocated pgtable cache for order %d\n", shift); 138a0668cdcSDavid Gibson } 139a0668cdcSDavid Gibson 14070d64ceaSPaul Mackerras 14170d64ceaSPaul Mackerras void pgtable_cache_init(void) 14270d64ceaSPaul Mackerras { 143a0668cdcSDavid Gibson pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); 144f940f528SAneesh Kumar K.V pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor); 145f940f528SAneesh Kumar K.V if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX)) 146a0668cdcSDavid Gibson panic("Couldn't allocate pgtable caches"); 147a0668cdcSDavid Gibson /* In all current configs, when the PUD index exists it's the 148a0668cdcSDavid Gibson * same size as either the pgd or pmd index. Verify that the 149a0668cdcSDavid Gibson * initialization above has also created a PUD cache. This 150a0668cdcSDavid Gibson * will need re-examiniation if we add new possibilities for 151a0668cdcSDavid Gibson * the pagetable layout. */ 152a0668cdcSDavid Gibson BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)); 15370d64ceaSPaul Mackerras } 154d29eff7bSAndy Whitcroft 155d29eff7bSAndy Whitcroft #ifdef CONFIG_SPARSEMEM_VMEMMAP 156d29eff7bSAndy Whitcroft /* 157d29eff7bSAndy Whitcroft * Given an address within the vmemmap, determine the pfn of the page that 158d29eff7bSAndy Whitcroft * represents the start of the section it is within. Note that we have to 159d29eff7bSAndy Whitcroft * do this by hand as the proffered address may not be correctly aligned. 160d29eff7bSAndy Whitcroft * Subtraction of non-aligned pointers produces undefined results. 161d29eff7bSAndy Whitcroft */ 16209de9ff8SMichael Ellerman static unsigned long __meminit vmemmap_section_start(unsigned long page) 163d29eff7bSAndy Whitcroft { 164d29eff7bSAndy Whitcroft unsigned long offset = page - ((unsigned long)(vmemmap)); 165d29eff7bSAndy Whitcroft 166d29eff7bSAndy Whitcroft /* Return the pfn of the start of the section. */ 167d29eff7bSAndy Whitcroft return (offset / sizeof(struct page)) & PAGE_SECTION_MASK; 168d29eff7bSAndy Whitcroft } 169d29eff7bSAndy Whitcroft 170d29eff7bSAndy Whitcroft /* 171d29eff7bSAndy Whitcroft * Check if this vmemmap page is already initialised. If any section 172d29eff7bSAndy Whitcroft * which overlaps this vmemmap page is initialised then this page is 173d29eff7bSAndy Whitcroft * initialised already. 174d29eff7bSAndy Whitcroft */ 17509de9ff8SMichael Ellerman static int __meminit vmemmap_populated(unsigned long start, int page_size) 176d29eff7bSAndy Whitcroft { 177d29eff7bSAndy Whitcroft unsigned long end = start + page_size; 178d29eff7bSAndy Whitcroft 179d29eff7bSAndy Whitcroft for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) 180d29eff7bSAndy Whitcroft if (pfn_valid(vmemmap_section_start(start))) 181d29eff7bSAndy Whitcroft return 1; 182d29eff7bSAndy Whitcroft 183d29eff7bSAndy Whitcroft return 0; 184d29eff7bSAndy Whitcroft } 185d29eff7bSAndy Whitcroft 18632a74949SBenjamin Herrenschmidt /* On hash-based CPUs, the vmemmap is bolted in the hash table. 18732a74949SBenjamin Herrenschmidt * 18832a74949SBenjamin Herrenschmidt * On Book3E CPUs, the vmemmap is currently mapped in the top half of 18932a74949SBenjamin Herrenschmidt * the vmalloc space using normal page tables, though the size of 19032a74949SBenjamin Herrenschmidt * pages encoded in the PTEs can be different 19132a74949SBenjamin Herrenschmidt */ 19232a74949SBenjamin Herrenschmidt 19332a74949SBenjamin Herrenschmidt #ifdef CONFIG_PPC_BOOK3E 19432a74949SBenjamin Herrenschmidt static void __meminit vmemmap_create_mapping(unsigned long start, 19532a74949SBenjamin Herrenschmidt unsigned long page_size, 19632a74949SBenjamin Herrenschmidt unsigned long phys) 19732a74949SBenjamin Herrenschmidt { 19832a74949SBenjamin Herrenschmidt /* Create a PTE encoding without page size */ 19932a74949SBenjamin Herrenschmidt unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED | 20032a74949SBenjamin Herrenschmidt _PAGE_KERNEL_RW; 20132a74949SBenjamin Herrenschmidt 20232a74949SBenjamin Herrenschmidt /* PTEs only contain page size encodings up to 32M */ 20332a74949SBenjamin Herrenschmidt BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf); 20432a74949SBenjamin Herrenschmidt 20532a74949SBenjamin Herrenschmidt /* Encode the size in the PTE */ 20632a74949SBenjamin Herrenschmidt flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8; 20732a74949SBenjamin Herrenschmidt 20832a74949SBenjamin Herrenschmidt /* For each PTE for that area, map things. Note that we don't 20932a74949SBenjamin Herrenschmidt * increment phys because all PTEs are of the large size and 21032a74949SBenjamin Herrenschmidt * thus must have the low bits clear 21132a74949SBenjamin Herrenschmidt */ 21232a74949SBenjamin Herrenschmidt for (i = 0; i < page_size; i += PAGE_SIZE) 21332a74949SBenjamin Herrenschmidt BUG_ON(map_kernel_page(start + i, phys, flags)); 21432a74949SBenjamin Herrenschmidt } 215ed5694a8SLi Zhong 216ed5694a8SLi Zhong #ifdef CONFIG_MEMORY_HOTPLUG 217ed5694a8SLi Zhong static void vmemmap_remove_mapping(unsigned long start, 218ed5694a8SLi Zhong unsigned long page_size) 219ed5694a8SLi Zhong { 220ed5694a8SLi Zhong } 221ed5694a8SLi Zhong #endif 22232a74949SBenjamin Herrenschmidt #else /* CONFIG_PPC_BOOK3E */ 22332a74949SBenjamin Herrenschmidt static void __meminit vmemmap_create_mapping(unsigned long start, 22432a74949SBenjamin Herrenschmidt unsigned long page_size, 22532a74949SBenjamin Herrenschmidt unsigned long phys) 22632a74949SBenjamin Herrenschmidt { 22732a74949SBenjamin Herrenschmidt int mapped = htab_bolt_mapping(start, start + page_size, phys, 22883d5e64bSAneesh Kumar K.V pgprot_val(PAGE_KERNEL), 22983d5e64bSAneesh Kumar K.V mmu_vmemmap_psize, 23032a74949SBenjamin Herrenschmidt mmu_kernel_ssize); 23132a74949SBenjamin Herrenschmidt BUG_ON(mapped < 0); 23232a74949SBenjamin Herrenschmidt } 233ed5694a8SLi Zhong 234ed5694a8SLi Zhong #ifdef CONFIG_MEMORY_HOTPLUG 235ed5694a8SLi Zhong extern int htab_remove_mapping(unsigned long vstart, unsigned long vend, 236ed5694a8SLi Zhong int psize, int ssize); 237ed5694a8SLi Zhong 238ed5694a8SLi Zhong static void vmemmap_remove_mapping(unsigned long start, 239ed5694a8SLi Zhong unsigned long page_size) 240ed5694a8SLi Zhong { 241ed5694a8SLi Zhong int mapped = htab_remove_mapping(start, start + page_size, 242ed5694a8SLi Zhong mmu_vmemmap_psize, 243ed5694a8SLi Zhong mmu_kernel_ssize); 244ed5694a8SLi Zhong BUG_ON(mapped < 0); 245ed5694a8SLi Zhong } 246ed5694a8SLi Zhong #endif 247ed5694a8SLi Zhong 24832a74949SBenjamin Herrenschmidt #endif /* CONFIG_PPC_BOOK3E */ 24932a74949SBenjamin Herrenschmidt 25091eea67cSMark Nelson struct vmemmap_backing *vmemmap_list; 251bd8cb03dSLi Zhong static struct vmemmap_backing *next; 252bd8cb03dSLi Zhong static int num_left; 253bd8cb03dSLi Zhong static int num_freed; 25491eea67cSMark Nelson 25591eea67cSMark Nelson static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node) 25691eea67cSMark Nelson { 257bd8cb03dSLi Zhong struct vmemmap_backing *vmem_back; 258bd8cb03dSLi Zhong /* get from freed entries first */ 259bd8cb03dSLi Zhong if (num_freed) { 260bd8cb03dSLi Zhong num_freed--; 261bd8cb03dSLi Zhong vmem_back = next; 262bd8cb03dSLi Zhong next = next->list; 263bd8cb03dSLi Zhong 264bd8cb03dSLi Zhong return vmem_back; 265bd8cb03dSLi Zhong } 26691eea67cSMark Nelson 26791eea67cSMark Nelson /* allocate a page when required and hand out chunks */ 268bd8cb03dSLi Zhong if (!num_left) { 26991eea67cSMark Nelson next = vmemmap_alloc_block(PAGE_SIZE, node); 27091eea67cSMark Nelson if (unlikely(!next)) { 27191eea67cSMark Nelson WARN_ON(1); 27291eea67cSMark Nelson return NULL; 27391eea67cSMark Nelson } 27491eea67cSMark Nelson num_left = PAGE_SIZE / sizeof(struct vmemmap_backing); 27591eea67cSMark Nelson } 27691eea67cSMark Nelson 27791eea67cSMark Nelson num_left--; 27891eea67cSMark Nelson 27991eea67cSMark Nelson return next++; 28091eea67cSMark Nelson } 28191eea67cSMark Nelson 28291eea67cSMark Nelson static __meminit void vmemmap_list_populate(unsigned long phys, 28391eea67cSMark Nelson unsigned long start, 28491eea67cSMark Nelson int node) 28591eea67cSMark Nelson { 28691eea67cSMark Nelson struct vmemmap_backing *vmem_back; 28791eea67cSMark Nelson 28891eea67cSMark Nelson vmem_back = vmemmap_list_alloc(node); 28991eea67cSMark Nelson if (unlikely(!vmem_back)) { 29091eea67cSMark Nelson WARN_ON(1); 29191eea67cSMark Nelson return; 29291eea67cSMark Nelson } 29391eea67cSMark Nelson 29491eea67cSMark Nelson vmem_back->phys = phys; 29591eea67cSMark Nelson vmem_back->virt_addr = start; 29691eea67cSMark Nelson vmem_back->list = vmemmap_list; 29791eea67cSMark Nelson 29891eea67cSMark Nelson vmemmap_list = vmem_back; 29991eea67cSMark Nelson } 30091eea67cSMark Nelson 301*71b0bfe4SLi Zhong int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) 302*71b0bfe4SLi Zhong { 303*71b0bfe4SLi Zhong unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; 304*71b0bfe4SLi Zhong 305*71b0bfe4SLi Zhong /* Align to the page size of the linear mapping. */ 306*71b0bfe4SLi Zhong start = _ALIGN_DOWN(start, page_size); 307*71b0bfe4SLi Zhong 308*71b0bfe4SLi Zhong pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); 309*71b0bfe4SLi Zhong 310*71b0bfe4SLi Zhong for (; start < end; start += page_size) { 311*71b0bfe4SLi Zhong void *p; 312*71b0bfe4SLi Zhong 313*71b0bfe4SLi Zhong if (vmemmap_populated(start, page_size)) 314*71b0bfe4SLi Zhong continue; 315*71b0bfe4SLi Zhong 316*71b0bfe4SLi Zhong p = vmemmap_alloc_block(page_size, node); 317*71b0bfe4SLi Zhong if (!p) 318*71b0bfe4SLi Zhong return -ENOMEM; 319*71b0bfe4SLi Zhong 320*71b0bfe4SLi Zhong vmemmap_list_populate(__pa(p), start, node); 321*71b0bfe4SLi Zhong 322*71b0bfe4SLi Zhong pr_debug(" * %016lx..%016lx allocated at %p\n", 323*71b0bfe4SLi Zhong start, start + page_size, p); 324*71b0bfe4SLi Zhong 325*71b0bfe4SLi Zhong vmemmap_create_mapping(start, page_size, __pa(p)); 326*71b0bfe4SLi Zhong } 327*71b0bfe4SLi Zhong 328*71b0bfe4SLi Zhong return 0; 329*71b0bfe4SLi Zhong } 330*71b0bfe4SLi Zhong 331*71b0bfe4SLi Zhong #ifdef CONFIG_MEMORY_HOTPLUG 332bd8cb03dSLi Zhong static unsigned long vmemmap_list_free(unsigned long start) 333bd8cb03dSLi Zhong { 334bd8cb03dSLi Zhong struct vmemmap_backing *vmem_back, *vmem_back_prev; 335bd8cb03dSLi Zhong 336bd8cb03dSLi Zhong vmem_back_prev = vmem_back = vmemmap_list; 337bd8cb03dSLi Zhong 338bd8cb03dSLi Zhong /* look for it with prev pointer recorded */ 339bd8cb03dSLi Zhong for (; vmem_back; vmem_back = vmem_back->list) { 340bd8cb03dSLi Zhong if (vmem_back->virt_addr == start) 341bd8cb03dSLi Zhong break; 342bd8cb03dSLi Zhong vmem_back_prev = vmem_back; 343bd8cb03dSLi Zhong } 344bd8cb03dSLi Zhong 345bd8cb03dSLi Zhong if (unlikely(!vmem_back)) { 346bd8cb03dSLi Zhong WARN_ON(1); 347bd8cb03dSLi Zhong return 0; 348bd8cb03dSLi Zhong } 349bd8cb03dSLi Zhong 350bd8cb03dSLi Zhong /* remove it from vmemmap_list */ 351bd8cb03dSLi Zhong if (vmem_back == vmemmap_list) /* remove head */ 352bd8cb03dSLi Zhong vmemmap_list = vmem_back->list; 353bd8cb03dSLi Zhong else 354bd8cb03dSLi Zhong vmem_back_prev->list = vmem_back->list; 355bd8cb03dSLi Zhong 356bd8cb03dSLi Zhong /* next point to this freed entry */ 357bd8cb03dSLi Zhong vmem_back->list = next; 358bd8cb03dSLi Zhong next = vmem_back; 359bd8cb03dSLi Zhong num_freed++; 360bd8cb03dSLi Zhong 361bd8cb03dSLi Zhong return vmem_back->phys; 362bd8cb03dSLi Zhong } 363bd8cb03dSLi Zhong 364*71b0bfe4SLi Zhong void __ref vmemmap_free(unsigned long start, unsigned long end) 365d29eff7bSAndy Whitcroft { 366cec08e7aSBenjamin Herrenschmidt unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; 367d29eff7bSAndy Whitcroft 368d29eff7bSAndy Whitcroft start = _ALIGN_DOWN(start, page_size); 369d29eff7bSAndy Whitcroft 370*71b0bfe4SLi Zhong pr_debug("vmemmap_free %lx...%lx\n", start, end); 37132a74949SBenjamin Herrenschmidt 372d29eff7bSAndy Whitcroft for (; start < end; start += page_size) { 373*71b0bfe4SLi Zhong unsigned long addr; 374d29eff7bSAndy Whitcroft 375*71b0bfe4SLi Zhong /* 376*71b0bfe4SLi Zhong * the section has already be marked as invalid, so 377*71b0bfe4SLi Zhong * vmemmap_populated() true means some other sections still 378*71b0bfe4SLi Zhong * in this page, so skip it. 379*71b0bfe4SLi Zhong */ 380d29eff7bSAndy Whitcroft if (vmemmap_populated(start, page_size)) 381d29eff7bSAndy Whitcroft continue; 382d29eff7bSAndy Whitcroft 383*71b0bfe4SLi Zhong addr = vmemmap_list_free(start); 384*71b0bfe4SLi Zhong if (addr) { 385*71b0bfe4SLi Zhong struct page *page = pfn_to_page(addr >> PAGE_SHIFT); 386d29eff7bSAndy Whitcroft 387*71b0bfe4SLi Zhong if (PageReserved(page)) { 388*71b0bfe4SLi Zhong /* allocated from bootmem */ 389*71b0bfe4SLi Zhong if (page_size < PAGE_SIZE) { 390*71b0bfe4SLi Zhong /* 391*71b0bfe4SLi Zhong * this shouldn't happen, but if it is 392*71b0bfe4SLi Zhong * the case, leave the memory there 393*71b0bfe4SLi Zhong */ 394*71b0bfe4SLi Zhong WARN_ON_ONCE(1); 395*71b0bfe4SLi Zhong } else { 396*71b0bfe4SLi Zhong unsigned int nr_pages = 397*71b0bfe4SLi Zhong 1 << get_order(page_size); 398*71b0bfe4SLi Zhong while (nr_pages--) 399*71b0bfe4SLi Zhong free_reserved_page(page++); 400d29eff7bSAndy Whitcroft } 401*71b0bfe4SLi Zhong } else 402*71b0bfe4SLi Zhong free_pages((unsigned long)(__va(addr)), 403*71b0bfe4SLi Zhong get_order(page_size)); 404d29eff7bSAndy Whitcroft 405*71b0bfe4SLi Zhong vmemmap_remove_mapping(start, page_size); 406d29eff7bSAndy Whitcroft } 4070197518cSTang Chen } 408*71b0bfe4SLi Zhong } 409*71b0bfe4SLi Zhong #endif 410f7e3334aSNathan Fontenot void register_page_bootmem_memmap(unsigned long section_nr, 411f7e3334aSNathan Fontenot struct page *start_page, unsigned long size) 412f7e3334aSNathan Fontenot { 413f7e3334aSNathan Fontenot } 414cd3db0c4SBenjamin Herrenschmidt 4158e0861faSAlexey Kardashevskiy /* 4168e0861faSAlexey Kardashevskiy * We do not have access to the sparsemem vmemmap, so we fallback to 4178e0861faSAlexey Kardashevskiy * walking the list of sparsemem blocks which we already maintain for 4188e0861faSAlexey Kardashevskiy * the sake of crashdump. In the long run, we might want to maintain 4198e0861faSAlexey Kardashevskiy * a tree if performance of that linear walk becomes a problem. 4208e0861faSAlexey Kardashevskiy * 4218e0861faSAlexey Kardashevskiy * realmode_pfn_to_page functions can fail due to: 4228e0861faSAlexey Kardashevskiy * 1) As real sparsemem blocks do not lay in RAM continously (they 4238e0861faSAlexey Kardashevskiy * are in virtual address space which is not available in the real mode), 4248e0861faSAlexey Kardashevskiy * the requested page struct can be split between blocks so get_page/put_page 4258e0861faSAlexey Kardashevskiy * may fail. 4268e0861faSAlexey Kardashevskiy * 2) When huge pages are used, the get_page/put_page API will fail 4278e0861faSAlexey Kardashevskiy * in real mode as the linked addresses in the page struct are virtual 4288e0861faSAlexey Kardashevskiy * too. 4298e0861faSAlexey Kardashevskiy */ 4308e0861faSAlexey Kardashevskiy struct page *realmode_pfn_to_page(unsigned long pfn) 4318e0861faSAlexey Kardashevskiy { 4328e0861faSAlexey Kardashevskiy struct vmemmap_backing *vmem_back; 4338e0861faSAlexey Kardashevskiy struct page *page; 4348e0861faSAlexey Kardashevskiy unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; 4358e0861faSAlexey Kardashevskiy unsigned long pg_va = (unsigned long) pfn_to_page(pfn); 4368e0861faSAlexey Kardashevskiy 4378e0861faSAlexey Kardashevskiy for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) { 4388e0861faSAlexey Kardashevskiy if (pg_va < vmem_back->virt_addr) 4398e0861faSAlexey Kardashevskiy continue; 4408e0861faSAlexey Kardashevskiy 441bd8cb03dSLi Zhong /* After vmemmap_list entry free is possible, need check all */ 442bd8cb03dSLi Zhong if ((pg_va + sizeof(struct page)) <= 443bd8cb03dSLi Zhong (vmem_back->virt_addr + page_size)) { 4448e0861faSAlexey Kardashevskiy page = (struct page *) (vmem_back->phys + pg_va - 4458e0861faSAlexey Kardashevskiy vmem_back->virt_addr); 4468e0861faSAlexey Kardashevskiy return page; 4478e0861faSAlexey Kardashevskiy } 448bd8cb03dSLi Zhong } 4498e0861faSAlexey Kardashevskiy 450bd8cb03dSLi Zhong /* Probably that page struct is split between real pages */ 4518e0861faSAlexey Kardashevskiy return NULL; 4528e0861faSAlexey Kardashevskiy } 4538e0861faSAlexey Kardashevskiy EXPORT_SYMBOL_GPL(realmode_pfn_to_page); 4548e0861faSAlexey Kardashevskiy 4558e0861faSAlexey Kardashevskiy #elif defined(CONFIG_FLATMEM) 4568e0861faSAlexey Kardashevskiy 4578e0861faSAlexey Kardashevskiy struct page *realmode_pfn_to_page(unsigned long pfn) 4588e0861faSAlexey Kardashevskiy { 4598e0861faSAlexey Kardashevskiy struct page *page = pfn_to_page(pfn); 4608e0861faSAlexey Kardashevskiy return page; 4618e0861faSAlexey Kardashevskiy } 4628e0861faSAlexey Kardashevskiy EXPORT_SYMBOL_GPL(realmode_pfn_to_page); 4638e0861faSAlexey Kardashevskiy 4648e0861faSAlexey Kardashevskiy #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */ 465