1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/mm/vmalloc.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds 61da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 71da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 81da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 9930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005 101da177e4SLinus Torvalds */ 111da177e4SLinus Torvalds 12db64fe02SNick Piggin #include <linux/vmalloc.h> 131da177e4SLinus Torvalds #include <linux/mm.h> 141da177e4SLinus Torvalds #include <linux/module.h> 151da177e4SLinus Torvalds #include <linux/highmem.h> 16c3edc401SIngo Molnar #include <linux/sched/signal.h> 171da177e4SLinus Torvalds #include <linux/slab.h> 181da177e4SLinus Torvalds #include <linux/spinlock.h> 191da177e4SLinus Torvalds #include <linux/interrupt.h> 205f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h> 21a10aa579SChristoph Lameter #include <linux/seq_file.h> 22868b104dSRick Edgecombe #include <linux/set_memory.h> 233ac7fe5aSThomas Gleixner #include <linux/debugobjects.h> 2423016969SChristoph Lameter #include <linux/kallsyms.h> 25db64fe02SNick Piggin #include <linux/list.h> 264da56b99SChris Wilson #include <linux/notifier.h> 27db64fe02SNick Piggin #include <linux/rbtree.h> 28db64fe02SNick Piggin #include <linux/radix-tree.h> 29db64fe02SNick Piggin #include <linux/rcupdate.h> 30f0aa6617STejun Heo #include <linux/pfn.h> 3189219d37SCatalin Marinas #include <linux/kmemleak.h> 3260063497SArun Sharma #include <linux/atomic.h> 333b32123dSGideon Israel Dsouza #include <linux/compiler.h> 3432fcfd40SAl Viro #include <linux/llist.h> 350f616be1SToshi Kani #include <linux/bitops.h> 3668ad4a33SUladzislau Rezki (Sony) #include <linux/rbtree_augmented.h> 373b32123dSGideon Israel Dsouza 387c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 391da177e4SLinus Torvalds #include <asm/tlbflush.h> 402dca6999SDavid Miller #include <asm/shmparam.h> 411da177e4SLinus Torvalds 42dd56b046SMel Gorman #include "internal.h" 43dd56b046SMel Gorman 4432fcfd40SAl Viro struct vfree_deferred { 4532fcfd40SAl Viro struct llist_head list; 4632fcfd40SAl Viro struct work_struct wq; 4732fcfd40SAl Viro }; 4832fcfd40SAl Viro static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 4932fcfd40SAl Viro 5032fcfd40SAl Viro static void __vunmap(const void *, int); 5132fcfd40SAl Viro 5232fcfd40SAl Viro static void free_work(struct work_struct *w) 5332fcfd40SAl Viro { 5432fcfd40SAl Viro struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 55894e58c1SByungchul Park struct llist_node *t, *llnode; 56894e58c1SByungchul Park 57894e58c1SByungchul Park llist_for_each_safe(llnode, t, llist_del_all(&p->list)) 58894e58c1SByungchul Park __vunmap((void *)llnode, 1); 5932fcfd40SAl Viro } 6032fcfd40SAl Viro 61db64fe02SNick Piggin /*** Page table manipulation functions ***/ 62b221385bSAdrian Bunk 631da177e4SLinus Torvalds static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 641da177e4SLinus Torvalds { 651da177e4SLinus Torvalds pte_t *pte; 661da177e4SLinus Torvalds 671da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr); 681da177e4SLinus Torvalds do { 691da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 701da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 711da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 721da177e4SLinus Torvalds } 731da177e4SLinus Torvalds 74db64fe02SNick Piggin static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) 751da177e4SLinus Torvalds { 761da177e4SLinus Torvalds pmd_t *pmd; 771da177e4SLinus Torvalds unsigned long next; 781da177e4SLinus Torvalds 791da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 801da177e4SLinus Torvalds do { 811da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 82b9820d8fSToshi Kani if (pmd_clear_huge(pmd)) 83b9820d8fSToshi Kani continue; 841da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 851da177e4SLinus Torvalds continue; 861da177e4SLinus Torvalds vunmap_pte_range(pmd, addr, next); 871da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 881da177e4SLinus Torvalds } 891da177e4SLinus Torvalds 90c2febafcSKirill A. Shutemov static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end) 911da177e4SLinus Torvalds { 921da177e4SLinus Torvalds pud_t *pud; 931da177e4SLinus Torvalds unsigned long next; 941da177e4SLinus Torvalds 95c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 961da177e4SLinus Torvalds do { 971da177e4SLinus Torvalds next = pud_addr_end(addr, end); 98b9820d8fSToshi Kani if (pud_clear_huge(pud)) 99b9820d8fSToshi Kani continue; 1001da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 1011da177e4SLinus Torvalds continue; 1021da177e4SLinus Torvalds vunmap_pmd_range(pud, addr, next); 1031da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1041da177e4SLinus Torvalds } 1051da177e4SLinus Torvalds 106c2febafcSKirill A. Shutemov static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end) 107c2febafcSKirill A. Shutemov { 108c2febafcSKirill A. Shutemov p4d_t *p4d; 109c2febafcSKirill A. Shutemov unsigned long next; 110c2febafcSKirill A. Shutemov 111c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 112c2febafcSKirill A. Shutemov do { 113c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 114c2febafcSKirill A. Shutemov if (p4d_clear_huge(p4d)) 115c2febafcSKirill A. Shutemov continue; 116c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 117c2febafcSKirill A. Shutemov continue; 118c2febafcSKirill A. Shutemov vunmap_pud_range(p4d, addr, next); 119c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 120c2febafcSKirill A. Shutemov } 121c2febafcSKirill A. Shutemov 122db64fe02SNick Piggin static void vunmap_page_range(unsigned long addr, unsigned long end) 1231da177e4SLinus Torvalds { 1241da177e4SLinus Torvalds pgd_t *pgd; 1251da177e4SLinus Torvalds unsigned long next; 1261da177e4SLinus Torvalds 1271da177e4SLinus Torvalds BUG_ON(addr >= end); 1281da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 1291da177e4SLinus Torvalds do { 1301da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 1311da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 1321da177e4SLinus Torvalds continue; 133c2febafcSKirill A. Shutemov vunmap_p4d_range(pgd, addr, next); 1341da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 1351da177e4SLinus Torvalds } 1361da177e4SLinus Torvalds 1371da177e4SLinus Torvalds static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 138db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1391da177e4SLinus Torvalds { 1401da177e4SLinus Torvalds pte_t *pte; 1411da177e4SLinus Torvalds 142db64fe02SNick Piggin /* 143db64fe02SNick Piggin * nr is a running index into the array which helps higher level 144db64fe02SNick Piggin * callers keep track of where we're up to. 145db64fe02SNick Piggin */ 146db64fe02SNick Piggin 147872fec16SHugh Dickins pte = pte_alloc_kernel(pmd, addr); 1481da177e4SLinus Torvalds if (!pte) 1491da177e4SLinus Torvalds return -ENOMEM; 1501da177e4SLinus Torvalds do { 151db64fe02SNick Piggin struct page *page = pages[*nr]; 152db64fe02SNick Piggin 153db64fe02SNick Piggin if (WARN_ON(!pte_none(*pte))) 154db64fe02SNick Piggin return -EBUSY; 155db64fe02SNick Piggin if (WARN_ON(!page)) 1561da177e4SLinus Torvalds return -ENOMEM; 1571da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 158db64fe02SNick Piggin (*nr)++; 1591da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 1601da177e4SLinus Torvalds return 0; 1611da177e4SLinus Torvalds } 1621da177e4SLinus Torvalds 163db64fe02SNick Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, 164db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1651da177e4SLinus Torvalds { 1661da177e4SLinus Torvalds pmd_t *pmd; 1671da177e4SLinus Torvalds unsigned long next; 1681da177e4SLinus Torvalds 1691da177e4SLinus Torvalds pmd = pmd_alloc(&init_mm, pud, addr); 1701da177e4SLinus Torvalds if (!pmd) 1711da177e4SLinus Torvalds return -ENOMEM; 1721da177e4SLinus Torvalds do { 1731da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 174db64fe02SNick Piggin if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) 1751da177e4SLinus Torvalds return -ENOMEM; 1761da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 1771da177e4SLinus Torvalds return 0; 1781da177e4SLinus Torvalds } 1791da177e4SLinus Torvalds 180c2febafcSKirill A. Shutemov static int vmap_pud_range(p4d_t *p4d, unsigned long addr, 181db64fe02SNick Piggin unsigned long end, pgprot_t prot, struct page **pages, int *nr) 1821da177e4SLinus Torvalds { 1831da177e4SLinus Torvalds pud_t *pud; 1841da177e4SLinus Torvalds unsigned long next; 1851da177e4SLinus Torvalds 186c2febafcSKirill A. Shutemov pud = pud_alloc(&init_mm, p4d, addr); 1871da177e4SLinus Torvalds if (!pud) 1881da177e4SLinus Torvalds return -ENOMEM; 1891da177e4SLinus Torvalds do { 1901da177e4SLinus Torvalds next = pud_addr_end(addr, end); 191db64fe02SNick Piggin if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) 1921da177e4SLinus Torvalds return -ENOMEM; 1931da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 1941da177e4SLinus Torvalds return 0; 1951da177e4SLinus Torvalds } 1961da177e4SLinus Torvalds 197c2febafcSKirill A. Shutemov static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, 198c2febafcSKirill A. Shutemov unsigned long end, pgprot_t prot, struct page **pages, int *nr) 199c2febafcSKirill A. Shutemov { 200c2febafcSKirill A. Shutemov p4d_t *p4d; 201c2febafcSKirill A. Shutemov unsigned long next; 202c2febafcSKirill A. Shutemov 203c2febafcSKirill A. Shutemov p4d = p4d_alloc(&init_mm, pgd, addr); 204c2febafcSKirill A. Shutemov if (!p4d) 205c2febafcSKirill A. Shutemov return -ENOMEM; 206c2febafcSKirill A. Shutemov do { 207c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 208c2febafcSKirill A. Shutemov if (vmap_pud_range(p4d, addr, next, prot, pages, nr)) 209c2febafcSKirill A. Shutemov return -ENOMEM; 210c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 211c2febafcSKirill A. Shutemov return 0; 212c2febafcSKirill A. Shutemov } 213c2febafcSKirill A. Shutemov 214db64fe02SNick Piggin /* 215db64fe02SNick Piggin * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 216db64fe02SNick Piggin * will have pfns corresponding to the "pages" array. 217db64fe02SNick Piggin * 218db64fe02SNick Piggin * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 219db64fe02SNick Piggin */ 2208fc48985STejun Heo static int vmap_page_range_noflush(unsigned long start, unsigned long end, 221db64fe02SNick Piggin pgprot_t prot, struct page **pages) 2221da177e4SLinus Torvalds { 2231da177e4SLinus Torvalds pgd_t *pgd; 2241da177e4SLinus Torvalds unsigned long next; 2252e4e27c7SAdam Lackorzynski unsigned long addr = start; 226db64fe02SNick Piggin int err = 0; 227db64fe02SNick Piggin int nr = 0; 2281da177e4SLinus Torvalds 2291da177e4SLinus Torvalds BUG_ON(addr >= end); 2301da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 2311da177e4SLinus Torvalds do { 2321da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 233c2febafcSKirill A. Shutemov err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr); 2341da177e4SLinus Torvalds if (err) 235bf88c8c8SFigo.zhang return err; 2361da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 237db64fe02SNick Piggin 238db64fe02SNick Piggin return nr; 2391da177e4SLinus Torvalds } 2401da177e4SLinus Torvalds 2418fc48985STejun Heo static int vmap_page_range(unsigned long start, unsigned long end, 2428fc48985STejun Heo pgprot_t prot, struct page **pages) 2438fc48985STejun Heo { 2448fc48985STejun Heo int ret; 2458fc48985STejun Heo 2468fc48985STejun Heo ret = vmap_page_range_noflush(start, end, prot, pages); 2478fc48985STejun Heo flush_cache_vmap(start, end); 2488fc48985STejun Heo return ret; 2498fc48985STejun Heo } 2508fc48985STejun Heo 25181ac3ad9SKAMEZAWA Hiroyuki int is_vmalloc_or_module_addr(const void *x) 25273bdf0a6SLinus Torvalds { 25373bdf0a6SLinus Torvalds /* 254ab4f2ee1SRussell King * ARM, x86-64 and sparc64 put modules in a special place, 25573bdf0a6SLinus Torvalds * and fall back on vmalloc() if that fails. Others 25673bdf0a6SLinus Torvalds * just put it in the vmalloc space. 25773bdf0a6SLinus Torvalds */ 25873bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 25973bdf0a6SLinus Torvalds unsigned long addr = (unsigned long)x; 26073bdf0a6SLinus Torvalds if (addr >= MODULES_VADDR && addr < MODULES_END) 26173bdf0a6SLinus Torvalds return 1; 26273bdf0a6SLinus Torvalds #endif 26373bdf0a6SLinus Torvalds return is_vmalloc_addr(x); 26473bdf0a6SLinus Torvalds } 26573bdf0a6SLinus Torvalds 26648667e7aSChristoph Lameter /* 267add688fbSmalc * Walk a vmap address to the struct page it maps. 26848667e7aSChristoph Lameter */ 269add688fbSmalc struct page *vmalloc_to_page(const void *vmalloc_addr) 27048667e7aSChristoph Lameter { 27148667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr; 272add688fbSmalc struct page *page = NULL; 27348667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr); 274c2febafcSKirill A. Shutemov p4d_t *p4d; 275c2febafcSKirill A. Shutemov pud_t *pud; 276c2febafcSKirill A. Shutemov pmd_t *pmd; 277c2febafcSKirill A. Shutemov pte_t *ptep, pte; 27848667e7aSChristoph Lameter 2797aa413deSIngo Molnar /* 2807aa413deSIngo Molnar * XXX we might need to change this if we add VIRTUAL_BUG_ON for 2817aa413deSIngo Molnar * architectures that do not vmalloc module space 2827aa413deSIngo Molnar */ 28373bdf0a6SLinus Torvalds VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 28459ea7463SJiri Slaby 285c2febafcSKirill A. Shutemov if (pgd_none(*pgd)) 286c2febafcSKirill A. Shutemov return NULL; 287c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 288c2febafcSKirill A. Shutemov if (p4d_none(*p4d)) 289c2febafcSKirill A. Shutemov return NULL; 290c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 291029c54b0SArd Biesheuvel 292029c54b0SArd Biesheuvel /* 293029c54b0SArd Biesheuvel * Don't dereference bad PUD or PMD (below) entries. This will also 294029c54b0SArd Biesheuvel * identify huge mappings, which we may encounter on architectures 295029c54b0SArd Biesheuvel * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be 296029c54b0SArd Biesheuvel * identified as vmalloc addresses by is_vmalloc_addr(), but are 297029c54b0SArd Biesheuvel * not [unambiguously] associated with a struct page, so there is 298029c54b0SArd Biesheuvel * no correct value to return for them. 299029c54b0SArd Biesheuvel */ 300029c54b0SArd Biesheuvel WARN_ON_ONCE(pud_bad(*pud)); 301029c54b0SArd Biesheuvel if (pud_none(*pud) || pud_bad(*pud)) 302c2febafcSKirill A. Shutemov return NULL; 303c2febafcSKirill A. Shutemov pmd = pmd_offset(pud, addr); 304029c54b0SArd Biesheuvel WARN_ON_ONCE(pmd_bad(*pmd)); 305029c54b0SArd Biesheuvel if (pmd_none(*pmd) || pmd_bad(*pmd)) 306c2febafcSKirill A. Shutemov return NULL; 307db64fe02SNick Piggin 30848667e7aSChristoph Lameter ptep = pte_offset_map(pmd, addr); 30948667e7aSChristoph Lameter pte = *ptep; 31048667e7aSChristoph Lameter if (pte_present(pte)) 311add688fbSmalc page = pte_page(pte); 31248667e7aSChristoph Lameter pte_unmap(ptep); 313add688fbSmalc return page; 314ece86e22SJianyu Zhan } 315ece86e22SJianyu Zhan EXPORT_SYMBOL(vmalloc_to_page); 316ece86e22SJianyu Zhan 317add688fbSmalc /* 318add688fbSmalc * Map a vmalloc()-space virtual address to the physical page frame number. 319add688fbSmalc */ 320add688fbSmalc unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 321add688fbSmalc { 322add688fbSmalc return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 323add688fbSmalc } 324add688fbSmalc EXPORT_SYMBOL(vmalloc_to_pfn); 325add688fbSmalc 326db64fe02SNick Piggin 327db64fe02SNick Piggin /*** Global kva allocator ***/ 328db64fe02SNick Piggin 329bb850f4dSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 330a6cf4e0fSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 331bb850f4dSUladzislau Rezki (Sony) 33278c72746SYisheng Xie #define VM_LAZY_FREE 0x02 333db64fe02SNick Piggin #define VM_VM_AREA 0x04 334db64fe02SNick Piggin 335db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_area_lock); 336f1c4069eSJoonsoo Kim /* Export for kexec only */ 337f1c4069eSJoonsoo Kim LIST_HEAD(vmap_area_list); 33880c4bd7aSChris Wilson static LLIST_HEAD(vmap_purge_list); 33989699605SNick Piggin static struct rb_root vmap_area_root = RB_ROOT; 34068ad4a33SUladzislau Rezki (Sony) static bool vmap_initialized __read_mostly; 34189699605SNick Piggin 34268ad4a33SUladzislau Rezki (Sony) /* 34368ad4a33SUladzislau Rezki (Sony) * This kmem_cache is used for vmap_area objects. Instead of 34468ad4a33SUladzislau Rezki (Sony) * allocating from slab we reuse an object from this cache to 34568ad4a33SUladzislau Rezki (Sony) * make things faster. Especially in "no edge" splitting of 34668ad4a33SUladzislau Rezki (Sony) * free block. 34768ad4a33SUladzislau Rezki (Sony) */ 34868ad4a33SUladzislau Rezki (Sony) static struct kmem_cache *vmap_area_cachep; 34989699605SNick Piggin 35068ad4a33SUladzislau Rezki (Sony) /* 35168ad4a33SUladzislau Rezki (Sony) * This linked list is used in pair with free_vmap_area_root. 35268ad4a33SUladzislau Rezki (Sony) * It gives O(1) access to prev/next to perform fast coalescing. 35368ad4a33SUladzislau Rezki (Sony) */ 35468ad4a33SUladzislau Rezki (Sony) static LIST_HEAD(free_vmap_area_list); 35568ad4a33SUladzislau Rezki (Sony) 35668ad4a33SUladzislau Rezki (Sony) /* 35768ad4a33SUladzislau Rezki (Sony) * This augment red-black tree represents the free vmap space. 35868ad4a33SUladzislau Rezki (Sony) * All vmap_area objects in this tree are sorted by va->va_start 35968ad4a33SUladzislau Rezki (Sony) * address. It is used for allocation and merging when a vmap 36068ad4a33SUladzislau Rezki (Sony) * object is released. 36168ad4a33SUladzislau Rezki (Sony) * 36268ad4a33SUladzislau Rezki (Sony) * Each vmap_area node contains a maximum available free block 36368ad4a33SUladzislau Rezki (Sony) * of its sub-tree, right or left. Therefore it is possible to 36468ad4a33SUladzislau Rezki (Sony) * find a lowest match of free area. 36568ad4a33SUladzislau Rezki (Sony) */ 36668ad4a33SUladzislau Rezki (Sony) static struct rb_root free_vmap_area_root = RB_ROOT; 36768ad4a33SUladzislau Rezki (Sony) 368*82dd23e8SUladzislau Rezki (Sony) /* 369*82dd23e8SUladzislau Rezki (Sony) * Preload a CPU with one object for "no edge" split case. The 370*82dd23e8SUladzislau Rezki (Sony) * aim is to get rid of allocations from the atomic context, thus 371*82dd23e8SUladzislau Rezki (Sony) * to use more permissive allocation masks. 372*82dd23e8SUladzislau Rezki (Sony) */ 373*82dd23e8SUladzislau Rezki (Sony) static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); 374*82dd23e8SUladzislau Rezki (Sony) 37568ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 37668ad4a33SUladzislau Rezki (Sony) va_size(struct vmap_area *va) 37768ad4a33SUladzislau Rezki (Sony) { 37868ad4a33SUladzislau Rezki (Sony) return (va->va_end - va->va_start); 37968ad4a33SUladzislau Rezki (Sony) } 38068ad4a33SUladzislau Rezki (Sony) 38168ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 38268ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(struct rb_node *node) 38368ad4a33SUladzislau Rezki (Sony) { 38468ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 38568ad4a33SUladzislau Rezki (Sony) 38668ad4a33SUladzislau Rezki (Sony) va = rb_entry_safe(node, struct vmap_area, rb_node); 38768ad4a33SUladzislau Rezki (Sony) return va ? va->subtree_max_size : 0; 38868ad4a33SUladzislau Rezki (Sony) } 38968ad4a33SUladzislau Rezki (Sony) 39068ad4a33SUladzislau Rezki (Sony) /* 39168ad4a33SUladzislau Rezki (Sony) * Gets called when remove the node and rotate. 39268ad4a33SUladzislau Rezki (Sony) */ 39368ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 39468ad4a33SUladzislau Rezki (Sony) compute_subtree_max_size(struct vmap_area *va) 39568ad4a33SUladzislau Rezki (Sony) { 39668ad4a33SUladzislau Rezki (Sony) return max3(va_size(va), 39768ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(va->rb_node.rb_left), 39868ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(va->rb_node.rb_right)); 39968ad4a33SUladzislau Rezki (Sony) } 40068ad4a33SUladzislau Rezki (Sony) 40168ad4a33SUladzislau Rezki (Sony) RB_DECLARE_CALLBACKS(static, free_vmap_area_rb_augment_cb, 40268ad4a33SUladzislau Rezki (Sony) struct vmap_area, rb_node, unsigned long, subtree_max_size, 40368ad4a33SUladzislau Rezki (Sony) compute_subtree_max_size) 40468ad4a33SUladzislau Rezki (Sony) 40568ad4a33SUladzislau Rezki (Sony) static void purge_vmap_area_lazy(void); 40668ad4a33SUladzislau Rezki (Sony) static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 40768ad4a33SUladzislau Rezki (Sony) static unsigned long lazy_max_pages(void); 408db64fe02SNick Piggin 409db64fe02SNick Piggin static struct vmap_area *__find_vmap_area(unsigned long addr) 4101da177e4SLinus Torvalds { 411db64fe02SNick Piggin struct rb_node *n = vmap_area_root.rb_node; 412db64fe02SNick Piggin 413db64fe02SNick Piggin while (n) { 414db64fe02SNick Piggin struct vmap_area *va; 415db64fe02SNick Piggin 416db64fe02SNick Piggin va = rb_entry(n, struct vmap_area, rb_node); 417db64fe02SNick Piggin if (addr < va->va_start) 418db64fe02SNick Piggin n = n->rb_left; 419cef2ac3fSHATAYAMA Daisuke else if (addr >= va->va_end) 420db64fe02SNick Piggin n = n->rb_right; 421db64fe02SNick Piggin else 422db64fe02SNick Piggin return va; 423db64fe02SNick Piggin } 424db64fe02SNick Piggin 425db64fe02SNick Piggin return NULL; 426db64fe02SNick Piggin } 427db64fe02SNick Piggin 42868ad4a33SUladzislau Rezki (Sony) /* 42968ad4a33SUladzislau Rezki (Sony) * This function returns back addresses of parent node 43068ad4a33SUladzislau Rezki (Sony) * and its left or right link for further processing. 43168ad4a33SUladzislau Rezki (Sony) */ 43268ad4a33SUladzislau Rezki (Sony) static __always_inline struct rb_node ** 43368ad4a33SUladzislau Rezki (Sony) find_va_links(struct vmap_area *va, 43468ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct rb_node *from, 43568ad4a33SUladzislau Rezki (Sony) struct rb_node **parent) 436db64fe02SNick Piggin { 437170168d0SNamhyung Kim struct vmap_area *tmp_va; 43868ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 439db64fe02SNick Piggin 44068ad4a33SUladzislau Rezki (Sony) if (root) { 44168ad4a33SUladzislau Rezki (Sony) link = &root->rb_node; 44268ad4a33SUladzislau Rezki (Sony) if (unlikely(!*link)) { 44368ad4a33SUladzislau Rezki (Sony) *parent = NULL; 44468ad4a33SUladzislau Rezki (Sony) return link; 44568ad4a33SUladzislau Rezki (Sony) } 44668ad4a33SUladzislau Rezki (Sony) } else { 44768ad4a33SUladzislau Rezki (Sony) link = &from; 44868ad4a33SUladzislau Rezki (Sony) } 44968ad4a33SUladzislau Rezki (Sony) 45068ad4a33SUladzislau Rezki (Sony) /* 45168ad4a33SUladzislau Rezki (Sony) * Go to the bottom of the tree. When we hit the last point 45268ad4a33SUladzislau Rezki (Sony) * we end up with parent rb_node and correct direction, i name 45368ad4a33SUladzislau Rezki (Sony) * it link, where the new va->rb_node will be attached to. 45468ad4a33SUladzislau Rezki (Sony) */ 45568ad4a33SUladzislau Rezki (Sony) do { 45668ad4a33SUladzislau Rezki (Sony) tmp_va = rb_entry(*link, struct vmap_area, rb_node); 45768ad4a33SUladzislau Rezki (Sony) 45868ad4a33SUladzislau Rezki (Sony) /* 45968ad4a33SUladzislau Rezki (Sony) * During the traversal we also do some sanity check. 46068ad4a33SUladzislau Rezki (Sony) * Trigger the BUG() if there are sides(left/right) 46168ad4a33SUladzislau Rezki (Sony) * or full overlaps. 46268ad4a33SUladzislau Rezki (Sony) */ 46368ad4a33SUladzislau Rezki (Sony) if (va->va_start < tmp_va->va_end && 46468ad4a33SUladzislau Rezki (Sony) va->va_end <= tmp_va->va_start) 46568ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_left; 46668ad4a33SUladzislau Rezki (Sony) else if (va->va_end > tmp_va->va_start && 46768ad4a33SUladzislau Rezki (Sony) va->va_start >= tmp_va->va_end) 46868ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_right; 469db64fe02SNick Piggin else 470db64fe02SNick Piggin BUG(); 47168ad4a33SUladzislau Rezki (Sony) } while (*link); 47268ad4a33SUladzislau Rezki (Sony) 47368ad4a33SUladzislau Rezki (Sony) *parent = &tmp_va->rb_node; 47468ad4a33SUladzislau Rezki (Sony) return link; 475db64fe02SNick Piggin } 476db64fe02SNick Piggin 47768ad4a33SUladzislau Rezki (Sony) static __always_inline struct list_head * 47868ad4a33SUladzislau Rezki (Sony) get_va_next_sibling(struct rb_node *parent, struct rb_node **link) 47968ad4a33SUladzislau Rezki (Sony) { 48068ad4a33SUladzislau Rezki (Sony) struct list_head *list; 481db64fe02SNick Piggin 48268ad4a33SUladzislau Rezki (Sony) if (unlikely(!parent)) 48368ad4a33SUladzislau Rezki (Sony) /* 48468ad4a33SUladzislau Rezki (Sony) * The red-black tree where we try to find VA neighbors 48568ad4a33SUladzislau Rezki (Sony) * before merging or inserting is empty, i.e. it means 48668ad4a33SUladzislau Rezki (Sony) * there is no free vmap space. Normally it does not 48768ad4a33SUladzislau Rezki (Sony) * happen but we handle this case anyway. 48868ad4a33SUladzislau Rezki (Sony) */ 48968ad4a33SUladzislau Rezki (Sony) return NULL; 49068ad4a33SUladzislau Rezki (Sony) 49168ad4a33SUladzislau Rezki (Sony) list = &rb_entry(parent, struct vmap_area, rb_node)->list; 49268ad4a33SUladzislau Rezki (Sony) return (&parent->rb_right == link ? list->next : list); 493db64fe02SNick Piggin } 494db64fe02SNick Piggin 49568ad4a33SUladzislau Rezki (Sony) static __always_inline void 49668ad4a33SUladzislau Rezki (Sony) link_va(struct vmap_area *va, struct rb_root *root, 49768ad4a33SUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link, struct list_head *head) 49868ad4a33SUladzislau Rezki (Sony) { 49968ad4a33SUladzislau Rezki (Sony) /* 50068ad4a33SUladzislau Rezki (Sony) * VA is still not in the list, but we can 50168ad4a33SUladzislau Rezki (Sony) * identify its future previous list_head node. 50268ad4a33SUladzislau Rezki (Sony) */ 50368ad4a33SUladzislau Rezki (Sony) if (likely(parent)) { 50468ad4a33SUladzislau Rezki (Sony) head = &rb_entry(parent, struct vmap_area, rb_node)->list; 50568ad4a33SUladzislau Rezki (Sony) if (&parent->rb_right != link) 50668ad4a33SUladzislau Rezki (Sony) head = head->prev; 50768ad4a33SUladzislau Rezki (Sony) } 508db64fe02SNick Piggin 50968ad4a33SUladzislau Rezki (Sony) /* Insert to the rb-tree */ 51068ad4a33SUladzislau Rezki (Sony) rb_link_node(&va->rb_node, parent, link); 51168ad4a33SUladzislau Rezki (Sony) if (root == &free_vmap_area_root) { 51268ad4a33SUladzislau Rezki (Sony) /* 51368ad4a33SUladzislau Rezki (Sony) * Some explanation here. Just perform simple insertion 51468ad4a33SUladzislau Rezki (Sony) * to the tree. We do not set va->subtree_max_size to 51568ad4a33SUladzislau Rezki (Sony) * its current size before calling rb_insert_augmented(). 51668ad4a33SUladzislau Rezki (Sony) * It is because of we populate the tree from the bottom 51768ad4a33SUladzislau Rezki (Sony) * to parent levels when the node _is_ in the tree. 51868ad4a33SUladzislau Rezki (Sony) * 51968ad4a33SUladzislau Rezki (Sony) * Therefore we set subtree_max_size to zero after insertion, 52068ad4a33SUladzislau Rezki (Sony) * to let __augment_tree_propagate_from() puts everything to 52168ad4a33SUladzislau Rezki (Sony) * the correct order later on. 52268ad4a33SUladzislau Rezki (Sony) */ 52368ad4a33SUladzislau Rezki (Sony) rb_insert_augmented(&va->rb_node, 52468ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb); 52568ad4a33SUladzislau Rezki (Sony) va->subtree_max_size = 0; 52668ad4a33SUladzislau Rezki (Sony) } else { 52768ad4a33SUladzislau Rezki (Sony) rb_insert_color(&va->rb_node, root); 52868ad4a33SUladzislau Rezki (Sony) } 52968ad4a33SUladzislau Rezki (Sony) 53068ad4a33SUladzislau Rezki (Sony) /* Address-sort this list */ 53168ad4a33SUladzislau Rezki (Sony) list_add(&va->list, head); 53268ad4a33SUladzislau Rezki (Sony) } 53368ad4a33SUladzislau Rezki (Sony) 53468ad4a33SUladzislau Rezki (Sony) static __always_inline void 53568ad4a33SUladzislau Rezki (Sony) unlink_va(struct vmap_area *va, struct rb_root *root) 53668ad4a33SUladzislau Rezki (Sony) { 53768ad4a33SUladzislau Rezki (Sony) /* 53868ad4a33SUladzislau Rezki (Sony) * During merging a VA node can be empty, therefore 53968ad4a33SUladzislau Rezki (Sony) * not linked with the tree nor list. Just check it. 54068ad4a33SUladzislau Rezki (Sony) */ 54168ad4a33SUladzislau Rezki (Sony) if (!RB_EMPTY_NODE(&va->rb_node)) { 54268ad4a33SUladzislau Rezki (Sony) if (root == &free_vmap_area_root) 54368ad4a33SUladzislau Rezki (Sony) rb_erase_augmented(&va->rb_node, 54468ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb); 54568ad4a33SUladzislau Rezki (Sony) else 54668ad4a33SUladzislau Rezki (Sony) rb_erase(&va->rb_node, root); 54768ad4a33SUladzislau Rezki (Sony) 54868ad4a33SUladzislau Rezki (Sony) list_del(&va->list); 54968ad4a33SUladzislau Rezki (Sony) RB_CLEAR_NODE(&va->rb_node); 55068ad4a33SUladzislau Rezki (Sony) } 55168ad4a33SUladzislau Rezki (Sony) } 55268ad4a33SUladzislau Rezki (Sony) 553bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK 554bb850f4dSUladzislau Rezki (Sony) static void 555bb850f4dSUladzislau Rezki (Sony) augment_tree_propagate_check(struct rb_node *n) 556bb850f4dSUladzislau Rezki (Sony) { 557bb850f4dSUladzislau Rezki (Sony) struct vmap_area *va; 558bb850f4dSUladzislau Rezki (Sony) struct rb_node *node; 559bb850f4dSUladzislau Rezki (Sony) unsigned long size; 560bb850f4dSUladzislau Rezki (Sony) bool found = false; 561bb850f4dSUladzislau Rezki (Sony) 562bb850f4dSUladzislau Rezki (Sony) if (n == NULL) 563bb850f4dSUladzislau Rezki (Sony) return; 564bb850f4dSUladzislau Rezki (Sony) 565bb850f4dSUladzislau Rezki (Sony) va = rb_entry(n, struct vmap_area, rb_node); 566bb850f4dSUladzislau Rezki (Sony) size = va->subtree_max_size; 567bb850f4dSUladzislau Rezki (Sony) node = n; 568bb850f4dSUladzislau Rezki (Sony) 569bb850f4dSUladzislau Rezki (Sony) while (node) { 570bb850f4dSUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 571bb850f4dSUladzislau Rezki (Sony) 572bb850f4dSUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_left) == size) { 573bb850f4dSUladzislau Rezki (Sony) node = node->rb_left; 574bb850f4dSUladzislau Rezki (Sony) } else { 575bb850f4dSUladzislau Rezki (Sony) if (va_size(va) == size) { 576bb850f4dSUladzislau Rezki (Sony) found = true; 577bb850f4dSUladzislau Rezki (Sony) break; 578bb850f4dSUladzislau Rezki (Sony) } 579bb850f4dSUladzislau Rezki (Sony) 580bb850f4dSUladzislau Rezki (Sony) node = node->rb_right; 581bb850f4dSUladzislau Rezki (Sony) } 582bb850f4dSUladzislau Rezki (Sony) } 583bb850f4dSUladzislau Rezki (Sony) 584bb850f4dSUladzislau Rezki (Sony) if (!found) { 585bb850f4dSUladzislau Rezki (Sony) va = rb_entry(n, struct vmap_area, rb_node); 586bb850f4dSUladzislau Rezki (Sony) pr_emerg("tree is corrupted: %lu, %lu\n", 587bb850f4dSUladzislau Rezki (Sony) va_size(va), va->subtree_max_size); 588bb850f4dSUladzislau Rezki (Sony) } 589bb850f4dSUladzislau Rezki (Sony) 590bb850f4dSUladzislau Rezki (Sony) augment_tree_propagate_check(n->rb_left); 591bb850f4dSUladzislau Rezki (Sony) augment_tree_propagate_check(n->rb_right); 592bb850f4dSUladzislau Rezki (Sony) } 593bb850f4dSUladzislau Rezki (Sony) #endif 594bb850f4dSUladzislau Rezki (Sony) 59568ad4a33SUladzislau Rezki (Sony) /* 59668ad4a33SUladzislau Rezki (Sony) * This function populates subtree_max_size from bottom to upper 59768ad4a33SUladzislau Rezki (Sony) * levels starting from VA point. The propagation must be done 59868ad4a33SUladzislau Rezki (Sony) * when VA size is modified by changing its va_start/va_end. Or 59968ad4a33SUladzislau Rezki (Sony) * in case of newly inserting of VA to the tree. 60068ad4a33SUladzislau Rezki (Sony) * 60168ad4a33SUladzislau Rezki (Sony) * It means that __augment_tree_propagate_from() must be called: 60268ad4a33SUladzislau Rezki (Sony) * - After VA has been inserted to the tree(free path); 60368ad4a33SUladzislau Rezki (Sony) * - After VA has been shrunk(allocation path); 60468ad4a33SUladzislau Rezki (Sony) * - After VA has been increased(merging path). 60568ad4a33SUladzislau Rezki (Sony) * 60668ad4a33SUladzislau Rezki (Sony) * Please note that, it does not mean that upper parent nodes 60768ad4a33SUladzislau Rezki (Sony) * and their subtree_max_size are recalculated all the time up 60868ad4a33SUladzislau Rezki (Sony) * to the root node. 60968ad4a33SUladzislau Rezki (Sony) * 61068ad4a33SUladzislau Rezki (Sony) * 4--8 61168ad4a33SUladzislau Rezki (Sony) * /\ 61268ad4a33SUladzislau Rezki (Sony) * / \ 61368ad4a33SUladzislau Rezki (Sony) * / \ 61468ad4a33SUladzislau Rezki (Sony) * 2--2 8--8 61568ad4a33SUladzislau Rezki (Sony) * 61668ad4a33SUladzislau Rezki (Sony) * For example if we modify the node 4, shrinking it to 2, then 61768ad4a33SUladzislau Rezki (Sony) * no any modification is required. If we shrink the node 2 to 1 61868ad4a33SUladzislau Rezki (Sony) * its subtree_max_size is updated only, and set to 1. If we shrink 61968ad4a33SUladzislau Rezki (Sony) * the node 8 to 6, then its subtree_max_size is set to 6 and parent 62068ad4a33SUladzislau Rezki (Sony) * node becomes 4--6. 62168ad4a33SUladzislau Rezki (Sony) */ 62268ad4a33SUladzislau Rezki (Sony) static __always_inline void 62368ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(struct vmap_area *va) 62468ad4a33SUladzislau Rezki (Sony) { 62568ad4a33SUladzislau Rezki (Sony) struct rb_node *node = &va->rb_node; 62668ad4a33SUladzislau Rezki (Sony) unsigned long new_va_sub_max_size; 62768ad4a33SUladzislau Rezki (Sony) 62868ad4a33SUladzislau Rezki (Sony) while (node) { 62968ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 63068ad4a33SUladzislau Rezki (Sony) new_va_sub_max_size = compute_subtree_max_size(va); 63168ad4a33SUladzislau Rezki (Sony) 63268ad4a33SUladzislau Rezki (Sony) /* 63368ad4a33SUladzislau Rezki (Sony) * If the newly calculated maximum available size of the 63468ad4a33SUladzislau Rezki (Sony) * subtree is equal to the current one, then it means that 63568ad4a33SUladzislau Rezki (Sony) * the tree is propagated correctly. So we have to stop at 63668ad4a33SUladzislau Rezki (Sony) * this point to save cycles. 63768ad4a33SUladzislau Rezki (Sony) */ 63868ad4a33SUladzislau Rezki (Sony) if (va->subtree_max_size == new_va_sub_max_size) 63968ad4a33SUladzislau Rezki (Sony) break; 64068ad4a33SUladzislau Rezki (Sony) 64168ad4a33SUladzislau Rezki (Sony) va->subtree_max_size = new_va_sub_max_size; 64268ad4a33SUladzislau Rezki (Sony) node = rb_parent(&va->rb_node); 64368ad4a33SUladzislau Rezki (Sony) } 644bb850f4dSUladzislau Rezki (Sony) 645bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK 646bb850f4dSUladzislau Rezki (Sony) augment_tree_propagate_check(free_vmap_area_root.rb_node); 647bb850f4dSUladzislau Rezki (Sony) #endif 64868ad4a33SUladzislau Rezki (Sony) } 64968ad4a33SUladzislau Rezki (Sony) 65068ad4a33SUladzislau Rezki (Sony) static void 65168ad4a33SUladzislau Rezki (Sony) insert_vmap_area(struct vmap_area *va, 65268ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 65368ad4a33SUladzislau Rezki (Sony) { 65468ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 65568ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 65668ad4a33SUladzislau Rezki (Sony) 65768ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 65868ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 65968ad4a33SUladzislau Rezki (Sony) } 66068ad4a33SUladzislau Rezki (Sony) 66168ad4a33SUladzislau Rezki (Sony) static void 66268ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(struct vmap_area *va, 66368ad4a33SUladzislau Rezki (Sony) struct rb_node *from, struct rb_root *root, 66468ad4a33SUladzislau Rezki (Sony) struct list_head *head) 66568ad4a33SUladzislau Rezki (Sony) { 66668ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 66768ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 66868ad4a33SUladzislau Rezki (Sony) 66968ad4a33SUladzislau Rezki (Sony) if (from) 67068ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, NULL, from, &parent); 67168ad4a33SUladzislau Rezki (Sony) else 67268ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 67368ad4a33SUladzislau Rezki (Sony) 67468ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 67568ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 67668ad4a33SUladzislau Rezki (Sony) } 67768ad4a33SUladzislau Rezki (Sony) 67868ad4a33SUladzislau Rezki (Sony) /* 67968ad4a33SUladzislau Rezki (Sony) * Merge de-allocated chunk of VA memory with previous 68068ad4a33SUladzislau Rezki (Sony) * and next free blocks. If coalesce is not done a new 68168ad4a33SUladzislau Rezki (Sony) * free area is inserted. If VA has been merged, it is 68268ad4a33SUladzislau Rezki (Sony) * freed. 68368ad4a33SUladzislau Rezki (Sony) */ 68468ad4a33SUladzislau Rezki (Sony) static __always_inline void 68568ad4a33SUladzislau Rezki (Sony) merge_or_add_vmap_area(struct vmap_area *va, 68668ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 68768ad4a33SUladzislau Rezki (Sony) { 68868ad4a33SUladzislau Rezki (Sony) struct vmap_area *sibling; 68968ad4a33SUladzislau Rezki (Sony) struct list_head *next; 69068ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 69168ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 69268ad4a33SUladzislau Rezki (Sony) bool merged = false; 69368ad4a33SUladzislau Rezki (Sony) 69468ad4a33SUladzislau Rezki (Sony) /* 69568ad4a33SUladzislau Rezki (Sony) * Find a place in the tree where VA potentially will be 69668ad4a33SUladzislau Rezki (Sony) * inserted, unless it is merged with its sibling/siblings. 69768ad4a33SUladzislau Rezki (Sony) */ 69868ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 69968ad4a33SUladzislau Rezki (Sony) 70068ad4a33SUladzislau Rezki (Sony) /* 70168ad4a33SUladzislau Rezki (Sony) * Get next node of VA to check if merging can be done. 70268ad4a33SUladzislau Rezki (Sony) */ 70368ad4a33SUladzislau Rezki (Sony) next = get_va_next_sibling(parent, link); 70468ad4a33SUladzislau Rezki (Sony) if (unlikely(next == NULL)) 70568ad4a33SUladzislau Rezki (Sony) goto insert; 70668ad4a33SUladzislau Rezki (Sony) 70768ad4a33SUladzislau Rezki (Sony) /* 70868ad4a33SUladzislau Rezki (Sony) * start end 70968ad4a33SUladzislau Rezki (Sony) * | | 71068ad4a33SUladzislau Rezki (Sony) * |<------VA------>|<-----Next----->| 71168ad4a33SUladzislau Rezki (Sony) * | | 71268ad4a33SUladzislau Rezki (Sony) * start end 71368ad4a33SUladzislau Rezki (Sony) */ 71468ad4a33SUladzislau Rezki (Sony) if (next != head) { 71568ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next, struct vmap_area, list); 71668ad4a33SUladzislau Rezki (Sony) if (sibling->va_start == va->va_end) { 71768ad4a33SUladzislau Rezki (Sony) sibling->va_start = va->va_start; 71868ad4a33SUladzislau Rezki (Sony) 71968ad4a33SUladzislau Rezki (Sony) /* Check and update the tree if needed. */ 72068ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(sibling); 72168ad4a33SUladzislau Rezki (Sony) 72268ad4a33SUladzislau Rezki (Sony) /* Remove this VA, it has been merged. */ 72368ad4a33SUladzislau Rezki (Sony) unlink_va(va, root); 72468ad4a33SUladzislau Rezki (Sony) 72568ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */ 72668ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 72768ad4a33SUladzislau Rezki (Sony) 72868ad4a33SUladzislau Rezki (Sony) /* Point to the new merged area. */ 72968ad4a33SUladzislau Rezki (Sony) va = sibling; 73068ad4a33SUladzislau Rezki (Sony) merged = true; 73168ad4a33SUladzislau Rezki (Sony) } 73268ad4a33SUladzislau Rezki (Sony) } 73368ad4a33SUladzislau Rezki (Sony) 73468ad4a33SUladzislau Rezki (Sony) /* 73568ad4a33SUladzislau Rezki (Sony) * start end 73668ad4a33SUladzislau Rezki (Sony) * | | 73768ad4a33SUladzislau Rezki (Sony) * |<-----Prev----->|<------VA------>| 73868ad4a33SUladzislau Rezki (Sony) * | | 73968ad4a33SUladzislau Rezki (Sony) * start end 74068ad4a33SUladzislau Rezki (Sony) */ 74168ad4a33SUladzislau Rezki (Sony) if (next->prev != head) { 74268ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next->prev, struct vmap_area, list); 74368ad4a33SUladzislau Rezki (Sony) if (sibling->va_end == va->va_start) { 74468ad4a33SUladzislau Rezki (Sony) sibling->va_end = va->va_end; 74568ad4a33SUladzislau Rezki (Sony) 74668ad4a33SUladzislau Rezki (Sony) /* Check and update the tree if needed. */ 74768ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(sibling); 74868ad4a33SUladzislau Rezki (Sony) 74968ad4a33SUladzislau Rezki (Sony) /* Remove this VA, it has been merged. */ 75068ad4a33SUladzislau Rezki (Sony) unlink_va(va, root); 75168ad4a33SUladzislau Rezki (Sony) 75268ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */ 75368ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 75468ad4a33SUladzislau Rezki (Sony) 75568ad4a33SUladzislau Rezki (Sony) return; 75668ad4a33SUladzislau Rezki (Sony) } 75768ad4a33SUladzislau Rezki (Sony) } 75868ad4a33SUladzislau Rezki (Sony) 75968ad4a33SUladzislau Rezki (Sony) insert: 76068ad4a33SUladzislau Rezki (Sony) if (!merged) { 76168ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 76268ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 76368ad4a33SUladzislau Rezki (Sony) } 76468ad4a33SUladzislau Rezki (Sony) } 76568ad4a33SUladzislau Rezki (Sony) 76668ad4a33SUladzislau Rezki (Sony) static __always_inline bool 76768ad4a33SUladzislau Rezki (Sony) is_within_this_va(struct vmap_area *va, unsigned long size, 76868ad4a33SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 76968ad4a33SUladzislau Rezki (Sony) { 77068ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr; 77168ad4a33SUladzislau Rezki (Sony) 77268ad4a33SUladzislau Rezki (Sony) if (va->va_start > vstart) 77368ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align); 77468ad4a33SUladzislau Rezki (Sony) else 77568ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align); 77668ad4a33SUladzislau Rezki (Sony) 77768ad4a33SUladzislau Rezki (Sony) /* Can be overflowed due to big size or alignment. */ 77868ad4a33SUladzislau Rezki (Sony) if (nva_start_addr + size < nva_start_addr || 77968ad4a33SUladzislau Rezki (Sony) nva_start_addr < vstart) 78068ad4a33SUladzislau Rezki (Sony) return false; 78168ad4a33SUladzislau Rezki (Sony) 78268ad4a33SUladzislau Rezki (Sony) return (nva_start_addr + size <= va->va_end); 78368ad4a33SUladzislau Rezki (Sony) } 78468ad4a33SUladzislau Rezki (Sony) 78568ad4a33SUladzislau Rezki (Sony) /* 78668ad4a33SUladzislau Rezki (Sony) * Find the first free block(lowest start address) in the tree, 78768ad4a33SUladzislau Rezki (Sony) * that will accomplish the request corresponding to passing 78868ad4a33SUladzislau Rezki (Sony) * parameters. 78968ad4a33SUladzislau Rezki (Sony) */ 79068ad4a33SUladzislau Rezki (Sony) static __always_inline struct vmap_area * 79168ad4a33SUladzislau Rezki (Sony) find_vmap_lowest_match(unsigned long size, 79268ad4a33SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 79368ad4a33SUladzislau Rezki (Sony) { 79468ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 79568ad4a33SUladzislau Rezki (Sony) struct rb_node *node; 79668ad4a33SUladzislau Rezki (Sony) unsigned long length; 79768ad4a33SUladzislau Rezki (Sony) 79868ad4a33SUladzislau Rezki (Sony) /* Start from the root. */ 79968ad4a33SUladzislau Rezki (Sony) node = free_vmap_area_root.rb_node; 80068ad4a33SUladzislau Rezki (Sony) 80168ad4a33SUladzislau Rezki (Sony) /* Adjust the search size for alignment overhead. */ 80268ad4a33SUladzislau Rezki (Sony) length = size + align - 1; 80368ad4a33SUladzislau Rezki (Sony) 80468ad4a33SUladzislau Rezki (Sony) while (node) { 80568ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 80668ad4a33SUladzislau Rezki (Sony) 80768ad4a33SUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_left) >= length && 80868ad4a33SUladzislau Rezki (Sony) vstart < va->va_start) { 80968ad4a33SUladzislau Rezki (Sony) node = node->rb_left; 81068ad4a33SUladzislau Rezki (Sony) } else { 81168ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart)) 81268ad4a33SUladzislau Rezki (Sony) return va; 81368ad4a33SUladzislau Rezki (Sony) 81468ad4a33SUladzislau Rezki (Sony) /* 81568ad4a33SUladzislau Rezki (Sony) * Does not make sense to go deeper towards the right 81668ad4a33SUladzislau Rezki (Sony) * sub-tree if it does not have a free block that is 81768ad4a33SUladzislau Rezki (Sony) * equal or bigger to the requested search length. 81868ad4a33SUladzislau Rezki (Sony) */ 81968ad4a33SUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_right) >= length) { 82068ad4a33SUladzislau Rezki (Sony) node = node->rb_right; 82168ad4a33SUladzislau Rezki (Sony) continue; 82268ad4a33SUladzislau Rezki (Sony) } 82368ad4a33SUladzislau Rezki (Sony) 82468ad4a33SUladzislau Rezki (Sony) /* 8253806b041SAndrew Morton * OK. We roll back and find the first right sub-tree, 82668ad4a33SUladzislau Rezki (Sony) * that will satisfy the search criteria. It can happen 82768ad4a33SUladzislau Rezki (Sony) * only once due to "vstart" restriction. 82868ad4a33SUladzislau Rezki (Sony) */ 82968ad4a33SUladzislau Rezki (Sony) while ((node = rb_parent(node))) { 83068ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 83168ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart)) 83268ad4a33SUladzislau Rezki (Sony) return va; 83368ad4a33SUladzislau Rezki (Sony) 83468ad4a33SUladzislau Rezki (Sony) if (get_subtree_max_size(node->rb_right) >= length && 83568ad4a33SUladzislau Rezki (Sony) vstart <= va->va_start) { 83668ad4a33SUladzislau Rezki (Sony) node = node->rb_right; 83768ad4a33SUladzislau Rezki (Sony) break; 83868ad4a33SUladzislau Rezki (Sony) } 83968ad4a33SUladzislau Rezki (Sony) } 84068ad4a33SUladzislau Rezki (Sony) } 84168ad4a33SUladzislau Rezki (Sony) } 84268ad4a33SUladzislau Rezki (Sony) 84368ad4a33SUladzislau Rezki (Sony) return NULL; 84468ad4a33SUladzislau Rezki (Sony) } 84568ad4a33SUladzislau Rezki (Sony) 846a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 847a6cf4e0fSUladzislau Rezki (Sony) #include <linux/random.h> 848a6cf4e0fSUladzislau Rezki (Sony) 849a6cf4e0fSUladzislau Rezki (Sony) static struct vmap_area * 850a6cf4e0fSUladzislau Rezki (Sony) find_vmap_lowest_linear_match(unsigned long size, 851a6cf4e0fSUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 852a6cf4e0fSUladzislau Rezki (Sony) { 853a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va; 854a6cf4e0fSUladzislau Rezki (Sony) 855a6cf4e0fSUladzislau Rezki (Sony) list_for_each_entry(va, &free_vmap_area_list, list) { 856a6cf4e0fSUladzislau Rezki (Sony) if (!is_within_this_va(va, size, align, vstart)) 857a6cf4e0fSUladzislau Rezki (Sony) continue; 858a6cf4e0fSUladzislau Rezki (Sony) 859a6cf4e0fSUladzislau Rezki (Sony) return va; 860a6cf4e0fSUladzislau Rezki (Sony) } 861a6cf4e0fSUladzislau Rezki (Sony) 862a6cf4e0fSUladzislau Rezki (Sony) return NULL; 863a6cf4e0fSUladzislau Rezki (Sony) } 864a6cf4e0fSUladzislau Rezki (Sony) 865a6cf4e0fSUladzislau Rezki (Sony) static void 866a6cf4e0fSUladzislau Rezki (Sony) find_vmap_lowest_match_check(unsigned long size) 867a6cf4e0fSUladzislau Rezki (Sony) { 868a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va_1, *va_2; 869a6cf4e0fSUladzislau Rezki (Sony) unsigned long vstart; 870a6cf4e0fSUladzislau Rezki (Sony) unsigned int rnd; 871a6cf4e0fSUladzislau Rezki (Sony) 872a6cf4e0fSUladzislau Rezki (Sony) get_random_bytes(&rnd, sizeof(rnd)); 873a6cf4e0fSUladzislau Rezki (Sony) vstart = VMALLOC_START + rnd; 874a6cf4e0fSUladzislau Rezki (Sony) 875a6cf4e0fSUladzislau Rezki (Sony) va_1 = find_vmap_lowest_match(size, 1, vstart); 876a6cf4e0fSUladzislau Rezki (Sony) va_2 = find_vmap_lowest_linear_match(size, 1, vstart); 877a6cf4e0fSUladzislau Rezki (Sony) 878a6cf4e0fSUladzislau Rezki (Sony) if (va_1 != va_2) 879a6cf4e0fSUladzislau Rezki (Sony) pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", 880a6cf4e0fSUladzislau Rezki (Sony) va_1, va_2, vstart); 881a6cf4e0fSUladzislau Rezki (Sony) } 882a6cf4e0fSUladzislau Rezki (Sony) #endif 883a6cf4e0fSUladzislau Rezki (Sony) 88468ad4a33SUladzislau Rezki (Sony) enum fit_type { 88568ad4a33SUladzislau Rezki (Sony) NOTHING_FIT = 0, 88668ad4a33SUladzislau Rezki (Sony) FL_FIT_TYPE = 1, /* full fit */ 88768ad4a33SUladzislau Rezki (Sony) LE_FIT_TYPE = 2, /* left edge fit */ 88868ad4a33SUladzislau Rezki (Sony) RE_FIT_TYPE = 3, /* right edge fit */ 88968ad4a33SUladzislau Rezki (Sony) NE_FIT_TYPE = 4 /* no edge fit */ 89068ad4a33SUladzislau Rezki (Sony) }; 89168ad4a33SUladzislau Rezki (Sony) 89268ad4a33SUladzislau Rezki (Sony) static __always_inline enum fit_type 89368ad4a33SUladzislau Rezki (Sony) classify_va_fit_type(struct vmap_area *va, 89468ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr, unsigned long size) 89568ad4a33SUladzislau Rezki (Sony) { 89668ad4a33SUladzislau Rezki (Sony) enum fit_type type; 89768ad4a33SUladzislau Rezki (Sony) 89868ad4a33SUladzislau Rezki (Sony) /* Check if it is within VA. */ 89968ad4a33SUladzislau Rezki (Sony) if (nva_start_addr < va->va_start || 90068ad4a33SUladzislau Rezki (Sony) nva_start_addr + size > va->va_end) 90168ad4a33SUladzislau Rezki (Sony) return NOTHING_FIT; 90268ad4a33SUladzislau Rezki (Sony) 90368ad4a33SUladzislau Rezki (Sony) /* Now classify. */ 90468ad4a33SUladzislau Rezki (Sony) if (va->va_start == nva_start_addr) { 90568ad4a33SUladzislau Rezki (Sony) if (va->va_end == nva_start_addr + size) 90668ad4a33SUladzislau Rezki (Sony) type = FL_FIT_TYPE; 90768ad4a33SUladzislau Rezki (Sony) else 90868ad4a33SUladzislau Rezki (Sony) type = LE_FIT_TYPE; 90968ad4a33SUladzislau Rezki (Sony) } else if (va->va_end == nva_start_addr + size) { 91068ad4a33SUladzislau Rezki (Sony) type = RE_FIT_TYPE; 91168ad4a33SUladzislau Rezki (Sony) } else { 91268ad4a33SUladzislau Rezki (Sony) type = NE_FIT_TYPE; 91368ad4a33SUladzislau Rezki (Sony) } 91468ad4a33SUladzislau Rezki (Sony) 91568ad4a33SUladzislau Rezki (Sony) return type; 91668ad4a33SUladzislau Rezki (Sony) } 91768ad4a33SUladzislau Rezki (Sony) 91868ad4a33SUladzislau Rezki (Sony) static __always_inline int 91968ad4a33SUladzislau Rezki (Sony) adjust_va_to_fit_type(struct vmap_area *va, 92068ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr, unsigned long size, 92168ad4a33SUladzislau Rezki (Sony) enum fit_type type) 92268ad4a33SUladzislau Rezki (Sony) { 9232c929233SArnd Bergmann struct vmap_area *lva = NULL; 92468ad4a33SUladzislau Rezki (Sony) 92568ad4a33SUladzislau Rezki (Sony) if (type == FL_FIT_TYPE) { 92668ad4a33SUladzislau Rezki (Sony) /* 92768ad4a33SUladzislau Rezki (Sony) * No need to split VA, it fully fits. 92868ad4a33SUladzislau Rezki (Sony) * 92968ad4a33SUladzislau Rezki (Sony) * | | 93068ad4a33SUladzislau Rezki (Sony) * V NVA V 93168ad4a33SUladzislau Rezki (Sony) * |---------------| 93268ad4a33SUladzislau Rezki (Sony) */ 93368ad4a33SUladzislau Rezki (Sony) unlink_va(va, &free_vmap_area_root); 93468ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 93568ad4a33SUladzislau Rezki (Sony) } else if (type == LE_FIT_TYPE) { 93668ad4a33SUladzislau Rezki (Sony) /* 93768ad4a33SUladzislau Rezki (Sony) * Split left edge of fit VA. 93868ad4a33SUladzislau Rezki (Sony) * 93968ad4a33SUladzislau Rezki (Sony) * | | 94068ad4a33SUladzislau Rezki (Sony) * V NVA V R 94168ad4a33SUladzislau Rezki (Sony) * |-------|-------| 94268ad4a33SUladzislau Rezki (Sony) */ 94368ad4a33SUladzislau Rezki (Sony) va->va_start += size; 94468ad4a33SUladzislau Rezki (Sony) } else if (type == RE_FIT_TYPE) { 94568ad4a33SUladzislau Rezki (Sony) /* 94668ad4a33SUladzislau Rezki (Sony) * Split right edge of fit VA. 94768ad4a33SUladzislau Rezki (Sony) * 94868ad4a33SUladzislau Rezki (Sony) * | | 94968ad4a33SUladzislau Rezki (Sony) * L V NVA V 95068ad4a33SUladzislau Rezki (Sony) * |-------|-------| 95168ad4a33SUladzislau Rezki (Sony) */ 95268ad4a33SUladzislau Rezki (Sony) va->va_end = nva_start_addr; 95368ad4a33SUladzislau Rezki (Sony) } else if (type == NE_FIT_TYPE) { 95468ad4a33SUladzislau Rezki (Sony) /* 95568ad4a33SUladzislau Rezki (Sony) * Split no edge of fit VA. 95668ad4a33SUladzislau Rezki (Sony) * 95768ad4a33SUladzislau Rezki (Sony) * | | 95868ad4a33SUladzislau Rezki (Sony) * L V NVA V R 95968ad4a33SUladzislau Rezki (Sony) * |---|-------|---| 96068ad4a33SUladzislau Rezki (Sony) */ 961*82dd23e8SUladzislau Rezki (Sony) lva = __this_cpu_xchg(ne_fit_preload_node, NULL); 962*82dd23e8SUladzislau Rezki (Sony) if (unlikely(!lva)) { 963*82dd23e8SUladzislau Rezki (Sony) /* 964*82dd23e8SUladzislau Rezki (Sony) * For percpu allocator we do not do any pre-allocation 965*82dd23e8SUladzislau Rezki (Sony) * and leave it as it is. The reason is it most likely 966*82dd23e8SUladzislau Rezki (Sony) * never ends up with NE_FIT_TYPE splitting. In case of 967*82dd23e8SUladzislau Rezki (Sony) * percpu allocations offsets and sizes are aligned to 968*82dd23e8SUladzislau Rezki (Sony) * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE 969*82dd23e8SUladzislau Rezki (Sony) * are its main fitting cases. 970*82dd23e8SUladzislau Rezki (Sony) * 971*82dd23e8SUladzislau Rezki (Sony) * There are a few exceptions though, as an example it is 972*82dd23e8SUladzislau Rezki (Sony) * a first allocation (early boot up) when we have "one" 973*82dd23e8SUladzislau Rezki (Sony) * big free space that has to be split. 974*82dd23e8SUladzislau Rezki (Sony) */ 97568ad4a33SUladzislau Rezki (Sony) lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); 976*82dd23e8SUladzislau Rezki (Sony) if (!lva) 97768ad4a33SUladzislau Rezki (Sony) return -1; 978*82dd23e8SUladzislau Rezki (Sony) } 97968ad4a33SUladzislau Rezki (Sony) 98068ad4a33SUladzislau Rezki (Sony) /* 98168ad4a33SUladzislau Rezki (Sony) * Build the remainder. 98268ad4a33SUladzislau Rezki (Sony) */ 98368ad4a33SUladzislau Rezki (Sony) lva->va_start = va->va_start; 98468ad4a33SUladzislau Rezki (Sony) lva->va_end = nva_start_addr; 98568ad4a33SUladzislau Rezki (Sony) 98668ad4a33SUladzislau Rezki (Sony) /* 98768ad4a33SUladzislau Rezki (Sony) * Shrink this VA to remaining size. 98868ad4a33SUladzislau Rezki (Sony) */ 98968ad4a33SUladzislau Rezki (Sony) va->va_start = nva_start_addr + size; 99068ad4a33SUladzislau Rezki (Sony) } else { 99168ad4a33SUladzislau Rezki (Sony) return -1; 99268ad4a33SUladzislau Rezki (Sony) } 99368ad4a33SUladzislau Rezki (Sony) 99468ad4a33SUladzislau Rezki (Sony) if (type != FL_FIT_TYPE) { 99568ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 99668ad4a33SUladzislau Rezki (Sony) 9972c929233SArnd Bergmann if (lva) /* type == NE_FIT_TYPE */ 99868ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(lva, &va->rb_node, 99968ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, &free_vmap_area_list); 100068ad4a33SUladzislau Rezki (Sony) } 100168ad4a33SUladzislau Rezki (Sony) 100268ad4a33SUladzislau Rezki (Sony) return 0; 100368ad4a33SUladzislau Rezki (Sony) } 100468ad4a33SUladzislau Rezki (Sony) 100568ad4a33SUladzislau Rezki (Sony) /* 100668ad4a33SUladzislau Rezki (Sony) * Returns a start address of the newly allocated area, if success. 100768ad4a33SUladzislau Rezki (Sony) * Otherwise a vend is returned that indicates failure. 100868ad4a33SUladzislau Rezki (Sony) */ 100968ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 101068ad4a33SUladzislau Rezki (Sony) __alloc_vmap_area(unsigned long size, unsigned long align, 1011cacca6baSUladzislau Rezki (Sony) unsigned long vstart, unsigned long vend) 101268ad4a33SUladzislau Rezki (Sony) { 101368ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr; 101468ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 101568ad4a33SUladzislau Rezki (Sony) enum fit_type type; 101668ad4a33SUladzislau Rezki (Sony) int ret; 101768ad4a33SUladzislau Rezki (Sony) 101868ad4a33SUladzislau Rezki (Sony) va = find_vmap_lowest_match(size, align, vstart); 101968ad4a33SUladzislau Rezki (Sony) if (unlikely(!va)) 102068ad4a33SUladzislau Rezki (Sony) return vend; 102168ad4a33SUladzislau Rezki (Sony) 102268ad4a33SUladzislau Rezki (Sony) if (va->va_start > vstart) 102368ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align); 102468ad4a33SUladzislau Rezki (Sony) else 102568ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align); 102668ad4a33SUladzislau Rezki (Sony) 102768ad4a33SUladzislau Rezki (Sony) /* Check the "vend" restriction. */ 102868ad4a33SUladzislau Rezki (Sony) if (nva_start_addr + size > vend) 102968ad4a33SUladzislau Rezki (Sony) return vend; 103068ad4a33SUladzislau Rezki (Sony) 103168ad4a33SUladzislau Rezki (Sony) /* Classify what we have found. */ 103268ad4a33SUladzislau Rezki (Sony) type = classify_va_fit_type(va, nva_start_addr, size); 103368ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(type == NOTHING_FIT)) 103468ad4a33SUladzislau Rezki (Sony) return vend; 103568ad4a33SUladzislau Rezki (Sony) 103668ad4a33SUladzislau Rezki (Sony) /* Update the free vmap_area. */ 103768ad4a33SUladzislau Rezki (Sony) ret = adjust_va_to_fit_type(va, nva_start_addr, size, type); 103868ad4a33SUladzislau Rezki (Sony) if (ret) 103968ad4a33SUladzislau Rezki (Sony) return vend; 104068ad4a33SUladzislau Rezki (Sony) 1041a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1042a6cf4e0fSUladzislau Rezki (Sony) find_vmap_lowest_match_check(size); 1043a6cf4e0fSUladzislau Rezki (Sony) #endif 1044a6cf4e0fSUladzislau Rezki (Sony) 104568ad4a33SUladzislau Rezki (Sony) return nva_start_addr; 104668ad4a33SUladzislau Rezki (Sony) } 10474da56b99SChris Wilson 1048db64fe02SNick Piggin /* 1049db64fe02SNick Piggin * Allocate a region of KVA of the specified size and alignment, within the 1050db64fe02SNick Piggin * vstart and vend. 1051db64fe02SNick Piggin */ 1052db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size, 1053db64fe02SNick Piggin unsigned long align, 1054db64fe02SNick Piggin unsigned long vstart, unsigned long vend, 1055db64fe02SNick Piggin int node, gfp_t gfp_mask) 1056db64fe02SNick Piggin { 1057*82dd23e8SUladzislau Rezki (Sony) struct vmap_area *va, *pva; 10581da177e4SLinus Torvalds unsigned long addr; 1059db64fe02SNick Piggin int purged = 0; 1060db64fe02SNick Piggin 10617766970cSNick Piggin BUG_ON(!size); 1062891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 106389699605SNick Piggin BUG_ON(!is_power_of_2(align)); 1064db64fe02SNick Piggin 106568ad4a33SUladzislau Rezki (Sony) if (unlikely(!vmap_initialized)) 106668ad4a33SUladzislau Rezki (Sony) return ERR_PTR(-EBUSY); 106768ad4a33SUladzislau Rezki (Sony) 10685803ed29SChristoph Hellwig might_sleep(); 10694da56b99SChris Wilson 107068ad4a33SUladzislau Rezki (Sony) va = kmem_cache_alloc_node(vmap_area_cachep, 1071db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 1072db64fe02SNick Piggin if (unlikely(!va)) 1073db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 1074db64fe02SNick Piggin 10757f88f88fSCatalin Marinas /* 10767f88f88fSCatalin Marinas * Only scan the relevant parts containing pointers to other objects 10777f88f88fSCatalin Marinas * to avoid false negatives. 10787f88f88fSCatalin Marinas */ 10797f88f88fSCatalin Marinas kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); 10807f88f88fSCatalin Marinas 1081db64fe02SNick Piggin retry: 1082*82dd23e8SUladzislau Rezki (Sony) /* 1083*82dd23e8SUladzislau Rezki (Sony) * Preload this CPU with one extra vmap_area object to ensure 1084*82dd23e8SUladzislau Rezki (Sony) * that we have it available when fit type of free area is 1085*82dd23e8SUladzislau Rezki (Sony) * NE_FIT_TYPE. 1086*82dd23e8SUladzislau Rezki (Sony) * 1087*82dd23e8SUladzislau Rezki (Sony) * The preload is done in non-atomic context, thus it allows us 1088*82dd23e8SUladzislau Rezki (Sony) * to use more permissive allocation masks to be more stable under 1089*82dd23e8SUladzislau Rezki (Sony) * low memory condition and high memory pressure. 1090*82dd23e8SUladzislau Rezki (Sony) * 1091*82dd23e8SUladzislau Rezki (Sony) * Even if it fails we do not really care about that. Just proceed 1092*82dd23e8SUladzislau Rezki (Sony) * as it is. "overflow" path will refill the cache we allocate from. 1093*82dd23e8SUladzislau Rezki (Sony) */ 1094*82dd23e8SUladzislau Rezki (Sony) preempt_disable(); 1095*82dd23e8SUladzislau Rezki (Sony) if (!__this_cpu_read(ne_fit_preload_node)) { 1096*82dd23e8SUladzislau Rezki (Sony) preempt_enable(); 1097*82dd23e8SUladzislau Rezki (Sony) pva = kmem_cache_alloc_node(vmap_area_cachep, GFP_KERNEL, node); 1098*82dd23e8SUladzislau Rezki (Sony) preempt_disable(); 1099*82dd23e8SUladzislau Rezki (Sony) 1100*82dd23e8SUladzislau Rezki (Sony) if (__this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva)) { 1101*82dd23e8SUladzislau Rezki (Sony) if (pva) 1102*82dd23e8SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, pva); 1103*82dd23e8SUladzislau Rezki (Sony) } 1104*82dd23e8SUladzislau Rezki (Sony) } 1105*82dd23e8SUladzislau Rezki (Sony) 1106db64fe02SNick Piggin spin_lock(&vmap_area_lock); 1107*82dd23e8SUladzislau Rezki (Sony) preempt_enable(); 110868ad4a33SUladzislau Rezki (Sony) 110989699605SNick Piggin /* 111068ad4a33SUladzislau Rezki (Sony) * If an allocation fails, the "vend" address is 111168ad4a33SUladzislau Rezki (Sony) * returned. Therefore trigger the overflow path. 111289699605SNick Piggin */ 1113cacca6baSUladzislau Rezki (Sony) addr = __alloc_vmap_area(size, align, vstart, vend); 111468ad4a33SUladzislau Rezki (Sony) if (unlikely(addr == vend)) 111589699605SNick Piggin goto overflow; 111689699605SNick Piggin 111789699605SNick Piggin va->va_start = addr; 111889699605SNick Piggin va->va_end = addr + size; 111989699605SNick Piggin va->flags = 0; 112068ad4a33SUladzislau Rezki (Sony) insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 112168ad4a33SUladzislau Rezki (Sony) 112289699605SNick Piggin spin_unlock(&vmap_area_lock); 112389699605SNick Piggin 112461e16557SWang Xiaoqiang BUG_ON(!IS_ALIGNED(va->va_start, align)); 112589699605SNick Piggin BUG_ON(va->va_start < vstart); 112689699605SNick Piggin BUG_ON(va->va_end > vend); 112789699605SNick Piggin 112889699605SNick Piggin return va; 112989699605SNick Piggin 11307766970cSNick Piggin overflow: 1131db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 1132db64fe02SNick Piggin if (!purged) { 1133db64fe02SNick Piggin purge_vmap_area_lazy(); 1134db64fe02SNick Piggin purged = 1; 1135db64fe02SNick Piggin goto retry; 1136db64fe02SNick Piggin } 11374da56b99SChris Wilson 11384da56b99SChris Wilson if (gfpflags_allow_blocking(gfp_mask)) { 11394da56b99SChris Wilson unsigned long freed = 0; 11404da56b99SChris Wilson blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 11414da56b99SChris Wilson if (freed > 0) { 11424da56b99SChris Wilson purged = 0; 11434da56b99SChris Wilson goto retry; 11444da56b99SChris Wilson } 11454da56b99SChris Wilson } 11464da56b99SChris Wilson 114703497d76SFlorian Fainelli if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) 1148756a025fSJoe Perches pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", 1149756a025fSJoe Perches size); 115068ad4a33SUladzislau Rezki (Sony) 115168ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 1152db64fe02SNick Piggin return ERR_PTR(-EBUSY); 1153db64fe02SNick Piggin } 1154db64fe02SNick Piggin 11554da56b99SChris Wilson int register_vmap_purge_notifier(struct notifier_block *nb) 11564da56b99SChris Wilson { 11574da56b99SChris Wilson return blocking_notifier_chain_register(&vmap_notify_list, nb); 11584da56b99SChris Wilson } 11594da56b99SChris Wilson EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 11604da56b99SChris Wilson 11614da56b99SChris Wilson int unregister_vmap_purge_notifier(struct notifier_block *nb) 11624da56b99SChris Wilson { 11634da56b99SChris Wilson return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 11644da56b99SChris Wilson } 11654da56b99SChris Wilson EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 11664da56b99SChris Wilson 1167db64fe02SNick Piggin static void __free_vmap_area(struct vmap_area *va) 1168db64fe02SNick Piggin { 1169db64fe02SNick Piggin BUG_ON(RB_EMPTY_NODE(&va->rb_node)); 117089699605SNick Piggin 117189699605SNick Piggin /* 117268ad4a33SUladzislau Rezki (Sony) * Remove from the busy tree/list. 117389699605SNick Piggin */ 117468ad4a33SUladzislau Rezki (Sony) unlink_va(va, &vmap_area_root); 1175db64fe02SNick Piggin 1176ca23e405STejun Heo /* 117768ad4a33SUladzislau Rezki (Sony) * Merge VA with its neighbors, otherwise just add it. 1178ca23e405STejun Heo */ 117968ad4a33SUladzislau Rezki (Sony) merge_or_add_vmap_area(va, 118068ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, &free_vmap_area_list); 1181db64fe02SNick Piggin } 1182db64fe02SNick Piggin 1183db64fe02SNick Piggin /* 1184db64fe02SNick Piggin * Free a region of KVA allocated by alloc_vmap_area 1185db64fe02SNick Piggin */ 1186db64fe02SNick Piggin static void free_vmap_area(struct vmap_area *va) 1187db64fe02SNick Piggin { 1188db64fe02SNick Piggin spin_lock(&vmap_area_lock); 1189db64fe02SNick Piggin __free_vmap_area(va); 1190db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 1191db64fe02SNick Piggin } 1192db64fe02SNick Piggin 1193db64fe02SNick Piggin /* 1194db64fe02SNick Piggin * Clear the pagetable entries of a given vmap_area 1195db64fe02SNick Piggin */ 1196db64fe02SNick Piggin static void unmap_vmap_area(struct vmap_area *va) 1197db64fe02SNick Piggin { 1198db64fe02SNick Piggin vunmap_page_range(va->va_start, va->va_end); 1199db64fe02SNick Piggin } 1200db64fe02SNick Piggin 1201db64fe02SNick Piggin /* 1202db64fe02SNick Piggin * lazy_max_pages is the maximum amount of virtual address space we gather up 1203db64fe02SNick Piggin * before attempting to purge with a TLB flush. 1204db64fe02SNick Piggin * 1205db64fe02SNick Piggin * There is a tradeoff here: a larger number will cover more kernel page tables 1206db64fe02SNick Piggin * and take slightly longer to purge, but it will linearly reduce the number of 1207db64fe02SNick Piggin * global TLB flushes that must be performed. It would seem natural to scale 1208db64fe02SNick Piggin * this number up linearly with the number of CPUs (because vmapping activity 1209db64fe02SNick Piggin * could also scale linearly with the number of CPUs), however it is likely 1210db64fe02SNick Piggin * that in practice, workloads might be constrained in other ways that mean 1211db64fe02SNick Piggin * vmap activity will not scale linearly with CPUs. Also, I want to be 1212db64fe02SNick Piggin * conservative and not introduce a big latency on huge systems, so go with 1213db64fe02SNick Piggin * a less aggressive log scale. It will still be an improvement over the old 1214db64fe02SNick Piggin * code, and it will be simple to change the scale factor if we find that it 1215db64fe02SNick Piggin * becomes a problem on bigger systems. 1216db64fe02SNick Piggin */ 1217db64fe02SNick Piggin static unsigned long lazy_max_pages(void) 1218db64fe02SNick Piggin { 1219db64fe02SNick Piggin unsigned int log; 1220db64fe02SNick Piggin 1221db64fe02SNick Piggin log = fls(num_online_cpus()); 1222db64fe02SNick Piggin 1223db64fe02SNick Piggin return log * (32UL * 1024 * 1024 / PAGE_SIZE); 1224db64fe02SNick Piggin } 1225db64fe02SNick Piggin 12264d36e6f8SUladzislau Rezki (Sony) static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); 1227db64fe02SNick Piggin 12280574ecd1SChristoph Hellwig /* 12290574ecd1SChristoph Hellwig * Serialize vmap purging. There is no actual criticial section protected 12300574ecd1SChristoph Hellwig * by this look, but we want to avoid concurrent calls for performance 12310574ecd1SChristoph Hellwig * reasons and to make the pcpu_get_vm_areas more deterministic. 12320574ecd1SChristoph Hellwig */ 1233f9e09977SChristoph Hellwig static DEFINE_MUTEX(vmap_purge_lock); 12340574ecd1SChristoph Hellwig 123502b709dfSNick Piggin /* for per-CPU blocks */ 123602b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void); 123702b709dfSNick Piggin 1238db64fe02SNick Piggin /* 12393ee48b6aSCliff Wickman * called before a call to iounmap() if the caller wants vm_area_struct's 12403ee48b6aSCliff Wickman * immediately freed. 12413ee48b6aSCliff Wickman */ 12423ee48b6aSCliff Wickman void set_iounmap_nonlazy(void) 12433ee48b6aSCliff Wickman { 12444d36e6f8SUladzislau Rezki (Sony) atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1); 12453ee48b6aSCliff Wickman } 12463ee48b6aSCliff Wickman 12473ee48b6aSCliff Wickman /* 1248db64fe02SNick Piggin * Purges all lazily-freed vmap areas. 1249db64fe02SNick Piggin */ 12500574ecd1SChristoph Hellwig static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) 1251db64fe02SNick Piggin { 12524d36e6f8SUladzislau Rezki (Sony) unsigned long resched_threshold; 125380c4bd7aSChris Wilson struct llist_node *valist; 1254db64fe02SNick Piggin struct vmap_area *va; 1255cbb76676SVegard Nossum struct vmap_area *n_va; 1256db64fe02SNick Piggin 12570574ecd1SChristoph Hellwig lockdep_assert_held(&vmap_purge_lock); 125802b709dfSNick Piggin 125980c4bd7aSChris Wilson valist = llist_del_all(&vmap_purge_list); 126068571be9SUladzislau Rezki (Sony) if (unlikely(valist == NULL)) 126168571be9SUladzislau Rezki (Sony) return false; 126268571be9SUladzislau Rezki (Sony) 126368571be9SUladzislau Rezki (Sony) /* 126468571be9SUladzislau Rezki (Sony) * TODO: to calculate a flush range without looping. 126568571be9SUladzislau Rezki (Sony) * The list can be up to lazy_max_pages() elements. 126668571be9SUladzislau Rezki (Sony) */ 126780c4bd7aSChris Wilson llist_for_each_entry(va, valist, purge_list) { 12680574ecd1SChristoph Hellwig if (va->va_start < start) 12690574ecd1SChristoph Hellwig start = va->va_start; 12700574ecd1SChristoph Hellwig if (va->va_end > end) 12710574ecd1SChristoph Hellwig end = va->va_end; 1272db64fe02SNick Piggin } 1273db64fe02SNick Piggin 12740574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 12754d36e6f8SUladzislau Rezki (Sony) resched_threshold = lazy_max_pages() << 1; 1276db64fe02SNick Piggin 1277db64fe02SNick Piggin spin_lock(&vmap_area_lock); 1278763b218dSJoel Fernandes llist_for_each_entry_safe(va, n_va, valist, purge_list) { 12794d36e6f8SUladzislau Rezki (Sony) unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; 1280763b218dSJoel Fernandes 1281db64fe02SNick Piggin __free_vmap_area(va); 12824d36e6f8SUladzislau Rezki (Sony) atomic_long_sub(nr, &vmap_lazy_nr); 128368571be9SUladzislau Rezki (Sony) 12844d36e6f8SUladzislau Rezki (Sony) if (atomic_long_read(&vmap_lazy_nr) < resched_threshold) 1285763b218dSJoel Fernandes cond_resched_lock(&vmap_area_lock); 1286763b218dSJoel Fernandes } 1287db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 12880574ecd1SChristoph Hellwig return true; 1289db64fe02SNick Piggin } 1290db64fe02SNick Piggin 1291db64fe02SNick Piggin /* 1292496850e5SNick Piggin * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 1293496850e5SNick Piggin * is already purging. 1294496850e5SNick Piggin */ 1295496850e5SNick Piggin static void try_purge_vmap_area_lazy(void) 1296496850e5SNick Piggin { 1297f9e09977SChristoph Hellwig if (mutex_trylock(&vmap_purge_lock)) { 12980574ecd1SChristoph Hellwig __purge_vmap_area_lazy(ULONG_MAX, 0); 1299f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 13000574ecd1SChristoph Hellwig } 1301496850e5SNick Piggin } 1302496850e5SNick Piggin 1303496850e5SNick Piggin /* 1304db64fe02SNick Piggin * Kick off a purge of the outstanding lazy areas. 1305db64fe02SNick Piggin */ 1306db64fe02SNick Piggin static void purge_vmap_area_lazy(void) 1307db64fe02SNick Piggin { 1308f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 13090574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 13100574ecd1SChristoph Hellwig __purge_vmap_area_lazy(ULONG_MAX, 0); 1311f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 1312db64fe02SNick Piggin } 1313db64fe02SNick Piggin 1314db64fe02SNick Piggin /* 131564141da5SJeremy Fitzhardinge * Free a vmap area, caller ensuring that the area has been unmapped 131664141da5SJeremy Fitzhardinge * and flush_cache_vunmap had been called for the correct range 131764141da5SJeremy Fitzhardinge * previously. 1318db64fe02SNick Piggin */ 131964141da5SJeremy Fitzhardinge static void free_vmap_area_noflush(struct vmap_area *va) 1320db64fe02SNick Piggin { 13214d36e6f8SUladzislau Rezki (Sony) unsigned long nr_lazy; 132280c4bd7aSChris Wilson 13234d36e6f8SUladzislau Rezki (Sony) nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> 13244d36e6f8SUladzislau Rezki (Sony) PAGE_SHIFT, &vmap_lazy_nr); 132580c4bd7aSChris Wilson 132680c4bd7aSChris Wilson /* After this point, we may free va at any time */ 132780c4bd7aSChris Wilson llist_add(&va->purge_list, &vmap_purge_list); 132880c4bd7aSChris Wilson 132980c4bd7aSChris Wilson if (unlikely(nr_lazy > lazy_max_pages())) 1330496850e5SNick Piggin try_purge_vmap_area_lazy(); 1331db64fe02SNick Piggin } 1332db64fe02SNick Piggin 1333b29acbdcSNick Piggin /* 1334b29acbdcSNick Piggin * Free and unmap a vmap area 1335b29acbdcSNick Piggin */ 1336b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va) 1337b29acbdcSNick Piggin { 1338b29acbdcSNick Piggin flush_cache_vunmap(va->va_start, va->va_end); 1339c8eef01eSChristoph Hellwig unmap_vmap_area(va); 134082a2e924SChintan Pandya if (debug_pagealloc_enabled()) 134182a2e924SChintan Pandya flush_tlb_kernel_range(va->va_start, va->va_end); 134282a2e924SChintan Pandya 1343c8eef01eSChristoph Hellwig free_vmap_area_noflush(va); 1344b29acbdcSNick Piggin } 1345b29acbdcSNick Piggin 1346db64fe02SNick Piggin static struct vmap_area *find_vmap_area(unsigned long addr) 1347db64fe02SNick Piggin { 1348db64fe02SNick Piggin struct vmap_area *va; 1349db64fe02SNick Piggin 1350db64fe02SNick Piggin spin_lock(&vmap_area_lock); 1351db64fe02SNick Piggin va = __find_vmap_area(addr); 1352db64fe02SNick Piggin spin_unlock(&vmap_area_lock); 1353db64fe02SNick Piggin 1354db64fe02SNick Piggin return va; 1355db64fe02SNick Piggin } 1356db64fe02SNick Piggin 1357db64fe02SNick Piggin /*** Per cpu kva allocator ***/ 1358db64fe02SNick Piggin 1359db64fe02SNick Piggin /* 1360db64fe02SNick Piggin * vmap space is limited especially on 32 bit architectures. Ensure there is 1361db64fe02SNick Piggin * room for at least 16 percpu vmap blocks per CPU. 1362db64fe02SNick Piggin */ 1363db64fe02SNick Piggin /* 1364db64fe02SNick Piggin * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 1365db64fe02SNick Piggin * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 1366db64fe02SNick Piggin * instead (we just need a rough idea) 1367db64fe02SNick Piggin */ 1368db64fe02SNick Piggin #if BITS_PER_LONG == 32 1369db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024) 1370db64fe02SNick Piggin #else 1371db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024*1024) 1372db64fe02SNick Piggin #endif 1373db64fe02SNick Piggin 1374db64fe02SNick Piggin #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 1375db64fe02SNick Piggin #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 1376db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 1377db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 1378db64fe02SNick Piggin #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 1379db64fe02SNick Piggin #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 1380f982f915SClemens Ladisch #define VMAP_BBMAP_BITS \ 1381f982f915SClemens Ladisch VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 1382db64fe02SNick Piggin VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 1383f982f915SClemens Ladisch VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 1384db64fe02SNick Piggin 1385db64fe02SNick Piggin #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 1386db64fe02SNick Piggin 1387db64fe02SNick Piggin struct vmap_block_queue { 1388db64fe02SNick Piggin spinlock_t lock; 1389db64fe02SNick Piggin struct list_head free; 1390db64fe02SNick Piggin }; 1391db64fe02SNick Piggin 1392db64fe02SNick Piggin struct vmap_block { 1393db64fe02SNick Piggin spinlock_t lock; 1394db64fe02SNick Piggin struct vmap_area *va; 1395db64fe02SNick Piggin unsigned long free, dirty; 13967d61bfe8SRoman Pen unsigned long dirty_min, dirty_max; /*< dirty range */ 1397db64fe02SNick Piggin struct list_head free_list; 1398db64fe02SNick Piggin struct rcu_head rcu_head; 139902b709dfSNick Piggin struct list_head purge; 1400db64fe02SNick Piggin }; 1401db64fe02SNick Piggin 1402db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 1403db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 1404db64fe02SNick Piggin 1405db64fe02SNick Piggin /* 1406db64fe02SNick Piggin * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block 1407db64fe02SNick Piggin * in the free path. Could get rid of this if we change the API to return a 1408db64fe02SNick Piggin * "cookie" from alloc, to be passed to free. But no big deal yet. 1409db64fe02SNick Piggin */ 1410db64fe02SNick Piggin static DEFINE_SPINLOCK(vmap_block_tree_lock); 1411db64fe02SNick Piggin static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); 1412db64fe02SNick Piggin 1413db64fe02SNick Piggin /* 1414db64fe02SNick Piggin * We should probably have a fallback mechanism to allocate virtual memory 1415db64fe02SNick Piggin * out of partially filled vmap blocks. However vmap block sizing should be 1416db64fe02SNick Piggin * fairly reasonable according to the vmalloc size, so it shouldn't be a 1417db64fe02SNick Piggin * big problem. 1418db64fe02SNick Piggin */ 1419db64fe02SNick Piggin 1420db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr) 1421db64fe02SNick Piggin { 1422db64fe02SNick Piggin addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 1423db64fe02SNick Piggin addr /= VMAP_BLOCK_SIZE; 1424db64fe02SNick Piggin return addr; 1425db64fe02SNick Piggin } 1426db64fe02SNick Piggin 1427cf725ce2SRoman Pen static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 1428cf725ce2SRoman Pen { 1429cf725ce2SRoman Pen unsigned long addr; 1430cf725ce2SRoman Pen 1431cf725ce2SRoman Pen addr = va_start + (pages_off << PAGE_SHIFT); 1432cf725ce2SRoman Pen BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 1433cf725ce2SRoman Pen return (void *)addr; 1434cf725ce2SRoman Pen } 1435cf725ce2SRoman Pen 1436cf725ce2SRoman Pen /** 1437cf725ce2SRoman Pen * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 1438cf725ce2SRoman Pen * block. Of course pages number can't exceed VMAP_BBMAP_BITS 1439cf725ce2SRoman Pen * @order: how many 2^order pages should be occupied in newly allocated block 1440cf725ce2SRoman Pen * @gfp_mask: flags for the page level allocator 1441cf725ce2SRoman Pen * 1442a862f68aSMike Rapoport * Return: virtual address in a newly allocated block or ERR_PTR(-errno) 1443cf725ce2SRoman Pen */ 1444cf725ce2SRoman Pen static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 1445db64fe02SNick Piggin { 1446db64fe02SNick Piggin struct vmap_block_queue *vbq; 1447db64fe02SNick Piggin struct vmap_block *vb; 1448db64fe02SNick Piggin struct vmap_area *va; 1449db64fe02SNick Piggin unsigned long vb_idx; 1450db64fe02SNick Piggin int node, err; 1451cf725ce2SRoman Pen void *vaddr; 1452db64fe02SNick Piggin 1453db64fe02SNick Piggin node = numa_node_id(); 1454db64fe02SNick Piggin 1455db64fe02SNick Piggin vb = kmalloc_node(sizeof(struct vmap_block), 1456db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 1457db64fe02SNick Piggin if (unlikely(!vb)) 1458db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 1459db64fe02SNick Piggin 1460db64fe02SNick Piggin va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 1461db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, 1462db64fe02SNick Piggin node, gfp_mask); 1463ddf9c6d4STobias Klauser if (IS_ERR(va)) { 1464db64fe02SNick Piggin kfree(vb); 1465e7d86340SJulia Lawall return ERR_CAST(va); 1466db64fe02SNick Piggin } 1467db64fe02SNick Piggin 1468db64fe02SNick Piggin err = radix_tree_preload(gfp_mask); 1469db64fe02SNick Piggin if (unlikely(err)) { 1470db64fe02SNick Piggin kfree(vb); 1471db64fe02SNick Piggin free_vmap_area(va); 1472db64fe02SNick Piggin return ERR_PTR(err); 1473db64fe02SNick Piggin } 1474db64fe02SNick Piggin 1475cf725ce2SRoman Pen vaddr = vmap_block_vaddr(va->va_start, 0); 1476db64fe02SNick Piggin spin_lock_init(&vb->lock); 1477db64fe02SNick Piggin vb->va = va; 1478cf725ce2SRoman Pen /* At least something should be left free */ 1479cf725ce2SRoman Pen BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 1480cf725ce2SRoman Pen vb->free = VMAP_BBMAP_BITS - (1UL << order); 1481db64fe02SNick Piggin vb->dirty = 0; 14827d61bfe8SRoman Pen vb->dirty_min = VMAP_BBMAP_BITS; 14837d61bfe8SRoman Pen vb->dirty_max = 0; 1484db64fe02SNick Piggin INIT_LIST_HEAD(&vb->free_list); 1485db64fe02SNick Piggin 1486db64fe02SNick Piggin vb_idx = addr_to_vb_idx(va->va_start); 1487db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 1488db64fe02SNick Piggin err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 1489db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 1490db64fe02SNick Piggin BUG_ON(err); 1491db64fe02SNick Piggin radix_tree_preload_end(); 1492db64fe02SNick Piggin 1493db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 1494db64fe02SNick Piggin spin_lock(&vbq->lock); 149568ac546fSRoman Pen list_add_tail_rcu(&vb->free_list, &vbq->free); 1496db64fe02SNick Piggin spin_unlock(&vbq->lock); 14973f04ba85STejun Heo put_cpu_var(vmap_block_queue); 1498db64fe02SNick Piggin 1499cf725ce2SRoman Pen return vaddr; 1500db64fe02SNick Piggin } 1501db64fe02SNick Piggin 1502db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb) 1503db64fe02SNick Piggin { 1504db64fe02SNick Piggin struct vmap_block *tmp; 1505db64fe02SNick Piggin unsigned long vb_idx; 1506db64fe02SNick Piggin 1507db64fe02SNick Piggin vb_idx = addr_to_vb_idx(vb->va->va_start); 1508db64fe02SNick Piggin spin_lock(&vmap_block_tree_lock); 1509db64fe02SNick Piggin tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 1510db64fe02SNick Piggin spin_unlock(&vmap_block_tree_lock); 1511db64fe02SNick Piggin BUG_ON(tmp != vb); 1512db64fe02SNick Piggin 151364141da5SJeremy Fitzhardinge free_vmap_area_noflush(vb->va); 151422a3c7d1SLai Jiangshan kfree_rcu(vb, rcu_head); 1515db64fe02SNick Piggin } 1516db64fe02SNick Piggin 151702b709dfSNick Piggin static void purge_fragmented_blocks(int cpu) 151802b709dfSNick Piggin { 151902b709dfSNick Piggin LIST_HEAD(purge); 152002b709dfSNick Piggin struct vmap_block *vb; 152102b709dfSNick Piggin struct vmap_block *n_vb; 152202b709dfSNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 152302b709dfSNick Piggin 152402b709dfSNick Piggin rcu_read_lock(); 152502b709dfSNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 152602b709dfSNick Piggin 152702b709dfSNick Piggin if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) 152802b709dfSNick Piggin continue; 152902b709dfSNick Piggin 153002b709dfSNick Piggin spin_lock(&vb->lock); 153102b709dfSNick Piggin if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 153202b709dfSNick Piggin vb->free = 0; /* prevent further allocs after releasing lock */ 153302b709dfSNick Piggin vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 15347d61bfe8SRoman Pen vb->dirty_min = 0; 15357d61bfe8SRoman Pen vb->dirty_max = VMAP_BBMAP_BITS; 153602b709dfSNick Piggin spin_lock(&vbq->lock); 153702b709dfSNick Piggin list_del_rcu(&vb->free_list); 153802b709dfSNick Piggin spin_unlock(&vbq->lock); 153902b709dfSNick Piggin spin_unlock(&vb->lock); 154002b709dfSNick Piggin list_add_tail(&vb->purge, &purge); 154102b709dfSNick Piggin } else 154202b709dfSNick Piggin spin_unlock(&vb->lock); 154302b709dfSNick Piggin } 154402b709dfSNick Piggin rcu_read_unlock(); 154502b709dfSNick Piggin 154602b709dfSNick Piggin list_for_each_entry_safe(vb, n_vb, &purge, purge) { 154702b709dfSNick Piggin list_del(&vb->purge); 154802b709dfSNick Piggin free_vmap_block(vb); 154902b709dfSNick Piggin } 155002b709dfSNick Piggin } 155102b709dfSNick Piggin 155202b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void) 155302b709dfSNick Piggin { 155402b709dfSNick Piggin int cpu; 155502b709dfSNick Piggin 155602b709dfSNick Piggin for_each_possible_cpu(cpu) 155702b709dfSNick Piggin purge_fragmented_blocks(cpu); 155802b709dfSNick Piggin } 155902b709dfSNick Piggin 1560db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 1561db64fe02SNick Piggin { 1562db64fe02SNick Piggin struct vmap_block_queue *vbq; 1563db64fe02SNick Piggin struct vmap_block *vb; 1564cf725ce2SRoman Pen void *vaddr = NULL; 1565db64fe02SNick Piggin unsigned int order; 1566db64fe02SNick Piggin 1567891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 1568db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 1569aa91c4d8SJan Kara if (WARN_ON(size == 0)) { 1570aa91c4d8SJan Kara /* 1571aa91c4d8SJan Kara * Allocating 0 bytes isn't what caller wants since 1572aa91c4d8SJan Kara * get_order(0) returns funny result. Just warn and terminate 1573aa91c4d8SJan Kara * early. 1574aa91c4d8SJan Kara */ 1575aa91c4d8SJan Kara return NULL; 1576aa91c4d8SJan Kara } 1577db64fe02SNick Piggin order = get_order(size); 1578db64fe02SNick Piggin 1579db64fe02SNick Piggin rcu_read_lock(); 1580db64fe02SNick Piggin vbq = &get_cpu_var(vmap_block_queue); 1581db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1582cf725ce2SRoman Pen unsigned long pages_off; 1583db64fe02SNick Piggin 1584db64fe02SNick Piggin spin_lock(&vb->lock); 1585cf725ce2SRoman Pen if (vb->free < (1UL << order)) { 1586cf725ce2SRoman Pen spin_unlock(&vb->lock); 1587cf725ce2SRoman Pen continue; 1588cf725ce2SRoman Pen } 158902b709dfSNick Piggin 1590cf725ce2SRoman Pen pages_off = VMAP_BBMAP_BITS - vb->free; 1591cf725ce2SRoman Pen vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 1592db64fe02SNick Piggin vb->free -= 1UL << order; 1593db64fe02SNick Piggin if (vb->free == 0) { 1594db64fe02SNick Piggin spin_lock(&vbq->lock); 1595de560423SNick Piggin list_del_rcu(&vb->free_list); 1596db64fe02SNick Piggin spin_unlock(&vbq->lock); 1597db64fe02SNick Piggin } 1598cf725ce2SRoman Pen 1599db64fe02SNick Piggin spin_unlock(&vb->lock); 1600db64fe02SNick Piggin break; 1601db64fe02SNick Piggin } 160202b709dfSNick Piggin 16033f04ba85STejun Heo put_cpu_var(vmap_block_queue); 1604db64fe02SNick Piggin rcu_read_unlock(); 1605db64fe02SNick Piggin 1606cf725ce2SRoman Pen /* Allocate new block if nothing was found */ 1607cf725ce2SRoman Pen if (!vaddr) 1608cf725ce2SRoman Pen vaddr = new_vmap_block(order, gfp_mask); 1609db64fe02SNick Piggin 1610cf725ce2SRoman Pen return vaddr; 1611db64fe02SNick Piggin } 1612db64fe02SNick Piggin 1613db64fe02SNick Piggin static void vb_free(const void *addr, unsigned long size) 1614db64fe02SNick Piggin { 1615db64fe02SNick Piggin unsigned long offset; 1616db64fe02SNick Piggin unsigned long vb_idx; 1617db64fe02SNick Piggin unsigned int order; 1618db64fe02SNick Piggin struct vmap_block *vb; 1619db64fe02SNick Piggin 1620891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 1621db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 1622b29acbdcSNick Piggin 1623b29acbdcSNick Piggin flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); 1624b29acbdcSNick Piggin 1625db64fe02SNick Piggin order = get_order(size); 1626db64fe02SNick Piggin 1627db64fe02SNick Piggin offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 16287d61bfe8SRoman Pen offset >>= PAGE_SHIFT; 1629db64fe02SNick Piggin 1630db64fe02SNick Piggin vb_idx = addr_to_vb_idx((unsigned long)addr); 1631db64fe02SNick Piggin rcu_read_lock(); 1632db64fe02SNick Piggin vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 1633db64fe02SNick Piggin rcu_read_unlock(); 1634db64fe02SNick Piggin BUG_ON(!vb); 1635db64fe02SNick Piggin 163664141da5SJeremy Fitzhardinge vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); 163764141da5SJeremy Fitzhardinge 163882a2e924SChintan Pandya if (debug_pagealloc_enabled()) 163982a2e924SChintan Pandya flush_tlb_kernel_range((unsigned long)addr, 164082a2e924SChintan Pandya (unsigned long)addr + size); 164182a2e924SChintan Pandya 1642db64fe02SNick Piggin spin_lock(&vb->lock); 16437d61bfe8SRoman Pen 16447d61bfe8SRoman Pen /* Expand dirty range */ 16457d61bfe8SRoman Pen vb->dirty_min = min(vb->dirty_min, offset); 16467d61bfe8SRoman Pen vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 1647d086817dSMinChan Kim 1648db64fe02SNick Piggin vb->dirty += 1UL << order; 1649db64fe02SNick Piggin if (vb->dirty == VMAP_BBMAP_BITS) { 1650de560423SNick Piggin BUG_ON(vb->free); 1651db64fe02SNick Piggin spin_unlock(&vb->lock); 1652db64fe02SNick Piggin free_vmap_block(vb); 1653db64fe02SNick Piggin } else 1654db64fe02SNick Piggin spin_unlock(&vb->lock); 1655db64fe02SNick Piggin } 1656db64fe02SNick Piggin 1657868b104dSRick Edgecombe static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) 1658db64fe02SNick Piggin { 1659db64fe02SNick Piggin int cpu; 1660db64fe02SNick Piggin 16619b463334SJeremy Fitzhardinge if (unlikely(!vmap_initialized)) 16629b463334SJeremy Fitzhardinge return; 16639b463334SJeremy Fitzhardinge 16645803ed29SChristoph Hellwig might_sleep(); 16655803ed29SChristoph Hellwig 1666db64fe02SNick Piggin for_each_possible_cpu(cpu) { 1667db64fe02SNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 1668db64fe02SNick Piggin struct vmap_block *vb; 1669db64fe02SNick Piggin 1670db64fe02SNick Piggin rcu_read_lock(); 1671db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1672db64fe02SNick Piggin spin_lock(&vb->lock); 16737d61bfe8SRoman Pen if (vb->dirty) { 16747d61bfe8SRoman Pen unsigned long va_start = vb->va->va_start; 1675db64fe02SNick Piggin unsigned long s, e; 1676b136be5eSJoonsoo Kim 16777d61bfe8SRoman Pen s = va_start + (vb->dirty_min << PAGE_SHIFT); 16787d61bfe8SRoman Pen e = va_start + (vb->dirty_max << PAGE_SHIFT); 1679db64fe02SNick Piggin 16807d61bfe8SRoman Pen start = min(s, start); 16817d61bfe8SRoman Pen end = max(e, end); 16827d61bfe8SRoman Pen 1683db64fe02SNick Piggin flush = 1; 1684db64fe02SNick Piggin } 1685db64fe02SNick Piggin spin_unlock(&vb->lock); 1686db64fe02SNick Piggin } 1687db64fe02SNick Piggin rcu_read_unlock(); 1688db64fe02SNick Piggin } 1689db64fe02SNick Piggin 1690f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 16910574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 16920574ecd1SChristoph Hellwig if (!__purge_vmap_area_lazy(start, end) && flush) 16930574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 1694f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 1695db64fe02SNick Piggin } 1696868b104dSRick Edgecombe 1697868b104dSRick Edgecombe /** 1698868b104dSRick Edgecombe * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 1699868b104dSRick Edgecombe * 1700868b104dSRick Edgecombe * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 1701868b104dSRick Edgecombe * to amortize TLB flushing overheads. What this means is that any page you 1702868b104dSRick Edgecombe * have now, may, in a former life, have been mapped into kernel virtual 1703868b104dSRick Edgecombe * address by the vmap layer and so there might be some CPUs with TLB entries 1704868b104dSRick Edgecombe * still referencing that page (additional to the regular 1:1 kernel mapping). 1705868b104dSRick Edgecombe * 1706868b104dSRick Edgecombe * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 1707868b104dSRick Edgecombe * be sure that none of the pages we have control over will have any aliases 1708868b104dSRick Edgecombe * from the vmap layer. 1709868b104dSRick Edgecombe */ 1710868b104dSRick Edgecombe void vm_unmap_aliases(void) 1711868b104dSRick Edgecombe { 1712868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 1713868b104dSRick Edgecombe int flush = 0; 1714868b104dSRick Edgecombe 1715868b104dSRick Edgecombe _vm_unmap_aliases(start, end, flush); 1716868b104dSRick Edgecombe } 1717db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases); 1718db64fe02SNick Piggin 1719db64fe02SNick Piggin /** 1720db64fe02SNick Piggin * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 1721db64fe02SNick Piggin * @mem: the pointer returned by vm_map_ram 1722db64fe02SNick Piggin * @count: the count passed to that vm_map_ram call (cannot unmap partial) 1723db64fe02SNick Piggin */ 1724db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count) 1725db64fe02SNick Piggin { 172665ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 1727db64fe02SNick Piggin unsigned long addr = (unsigned long)mem; 17289c3acf60SChristoph Hellwig struct vmap_area *va; 1729db64fe02SNick Piggin 17305803ed29SChristoph Hellwig might_sleep(); 1731db64fe02SNick Piggin BUG_ON(!addr); 1732db64fe02SNick Piggin BUG_ON(addr < VMALLOC_START); 1733db64fe02SNick Piggin BUG_ON(addr > VMALLOC_END); 1734a1c0b1a0SShawn Lin BUG_ON(!PAGE_ALIGNED(addr)); 1735db64fe02SNick Piggin 17369c3acf60SChristoph Hellwig if (likely(count <= VMAP_MAX_ALLOC)) { 173705e3ff95SChintan Pandya debug_check_no_locks_freed(mem, size); 1738db64fe02SNick Piggin vb_free(mem, size); 17399c3acf60SChristoph Hellwig return; 17409c3acf60SChristoph Hellwig } 17419c3acf60SChristoph Hellwig 17429c3acf60SChristoph Hellwig va = find_vmap_area(addr); 17439c3acf60SChristoph Hellwig BUG_ON(!va); 174405e3ff95SChintan Pandya debug_check_no_locks_freed((void *)va->va_start, 174505e3ff95SChintan Pandya (va->va_end - va->va_start)); 17469c3acf60SChristoph Hellwig free_unmap_vmap_area(va); 1747db64fe02SNick Piggin } 1748db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram); 1749db64fe02SNick Piggin 1750db64fe02SNick Piggin /** 1751db64fe02SNick Piggin * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 1752db64fe02SNick Piggin * @pages: an array of pointers to the pages to be mapped 1753db64fe02SNick Piggin * @count: number of pages 1754db64fe02SNick Piggin * @node: prefer to allocate data structures on this node 1755db64fe02SNick Piggin * @prot: memory protection to use. PAGE_KERNEL for regular RAM 1756e99c97adSRandy Dunlap * 175736437638SGioh Kim * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 175836437638SGioh Kim * faster than vmap so it's good. But if you mix long-life and short-life 175936437638SGioh Kim * objects with vm_map_ram(), it could consume lots of address space through 176036437638SGioh Kim * fragmentation (especially on a 32bit machine). You could see failures in 176136437638SGioh Kim * the end. Please use this function for short-lived objects. 176236437638SGioh Kim * 1763e99c97adSRandy Dunlap * Returns: a pointer to the address that has been mapped, or %NULL on failure 1764db64fe02SNick Piggin */ 1765db64fe02SNick Piggin void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 1766db64fe02SNick Piggin { 176765ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 1768db64fe02SNick Piggin unsigned long addr; 1769db64fe02SNick Piggin void *mem; 1770db64fe02SNick Piggin 1771db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) { 1772db64fe02SNick Piggin mem = vb_alloc(size, GFP_KERNEL); 1773db64fe02SNick Piggin if (IS_ERR(mem)) 1774db64fe02SNick Piggin return NULL; 1775db64fe02SNick Piggin addr = (unsigned long)mem; 1776db64fe02SNick Piggin } else { 1777db64fe02SNick Piggin struct vmap_area *va; 1778db64fe02SNick Piggin va = alloc_vmap_area(size, PAGE_SIZE, 1779db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 1780db64fe02SNick Piggin if (IS_ERR(va)) 1781db64fe02SNick Piggin return NULL; 1782db64fe02SNick Piggin 1783db64fe02SNick Piggin addr = va->va_start; 1784db64fe02SNick Piggin mem = (void *)addr; 1785db64fe02SNick Piggin } 1786db64fe02SNick Piggin if (vmap_page_range(addr, addr + size, prot, pages) < 0) { 1787db64fe02SNick Piggin vm_unmap_ram(mem, count); 1788db64fe02SNick Piggin return NULL; 1789db64fe02SNick Piggin } 1790db64fe02SNick Piggin return mem; 1791db64fe02SNick Piggin } 1792db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram); 1793db64fe02SNick Piggin 17944341fa45SJoonsoo Kim static struct vm_struct *vmlist __initdata; 179592eac168SMike Rapoport 1796f0aa6617STejun Heo /** 1797be9b7335SNicolas Pitre * vm_area_add_early - add vmap area early during boot 1798be9b7335SNicolas Pitre * @vm: vm_struct to add 1799be9b7335SNicolas Pitre * 1800be9b7335SNicolas Pitre * This function is used to add fixed kernel vm area to vmlist before 1801be9b7335SNicolas Pitre * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 1802be9b7335SNicolas Pitre * should contain proper values and the other fields should be zero. 1803be9b7335SNicolas Pitre * 1804be9b7335SNicolas Pitre * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1805be9b7335SNicolas Pitre */ 1806be9b7335SNicolas Pitre void __init vm_area_add_early(struct vm_struct *vm) 1807be9b7335SNicolas Pitre { 1808be9b7335SNicolas Pitre struct vm_struct *tmp, **p; 1809be9b7335SNicolas Pitre 1810be9b7335SNicolas Pitre BUG_ON(vmap_initialized); 1811be9b7335SNicolas Pitre for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1812be9b7335SNicolas Pitre if (tmp->addr >= vm->addr) { 1813be9b7335SNicolas Pitre BUG_ON(tmp->addr < vm->addr + vm->size); 1814be9b7335SNicolas Pitre break; 1815be9b7335SNicolas Pitre } else 1816be9b7335SNicolas Pitre BUG_ON(tmp->addr + tmp->size > vm->addr); 1817be9b7335SNicolas Pitre } 1818be9b7335SNicolas Pitre vm->next = *p; 1819be9b7335SNicolas Pitre *p = vm; 1820be9b7335SNicolas Pitre } 1821be9b7335SNicolas Pitre 1822be9b7335SNicolas Pitre /** 1823f0aa6617STejun Heo * vm_area_register_early - register vmap area early during boot 1824f0aa6617STejun Heo * @vm: vm_struct to register 1825c0c0a293STejun Heo * @align: requested alignment 1826f0aa6617STejun Heo * 1827f0aa6617STejun Heo * This function is used to register kernel vm area before 1828f0aa6617STejun Heo * vmalloc_init() is called. @vm->size and @vm->flags should contain 1829f0aa6617STejun Heo * proper values on entry and other fields should be zero. On return, 1830f0aa6617STejun Heo * vm->addr contains the allocated address. 1831f0aa6617STejun Heo * 1832f0aa6617STejun Heo * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1833f0aa6617STejun Heo */ 1834c0c0a293STejun Heo void __init vm_area_register_early(struct vm_struct *vm, size_t align) 1835f0aa6617STejun Heo { 1836f0aa6617STejun Heo static size_t vm_init_off __initdata; 1837c0c0a293STejun Heo unsigned long addr; 1838f0aa6617STejun Heo 1839c0c0a293STejun Heo addr = ALIGN(VMALLOC_START + vm_init_off, align); 1840c0c0a293STejun Heo vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; 1841c0c0a293STejun Heo 1842c0c0a293STejun Heo vm->addr = (void *)addr; 1843f0aa6617STejun Heo 1844be9b7335SNicolas Pitre vm_area_add_early(vm); 1845f0aa6617STejun Heo } 1846f0aa6617STejun Heo 184768ad4a33SUladzislau Rezki (Sony) static void vmap_init_free_space(void) 184868ad4a33SUladzislau Rezki (Sony) { 184968ad4a33SUladzislau Rezki (Sony) unsigned long vmap_start = 1; 185068ad4a33SUladzislau Rezki (Sony) const unsigned long vmap_end = ULONG_MAX; 185168ad4a33SUladzislau Rezki (Sony) struct vmap_area *busy, *free; 185268ad4a33SUladzislau Rezki (Sony) 185368ad4a33SUladzislau Rezki (Sony) /* 185468ad4a33SUladzislau Rezki (Sony) * B F B B B F 185568ad4a33SUladzislau Rezki (Sony) * -|-----|.....|-----|-----|-----|.....|- 185668ad4a33SUladzislau Rezki (Sony) * | The KVA space | 185768ad4a33SUladzislau Rezki (Sony) * |<--------------------------------->| 185868ad4a33SUladzislau Rezki (Sony) */ 185968ad4a33SUladzislau Rezki (Sony) list_for_each_entry(busy, &vmap_area_list, list) { 186068ad4a33SUladzislau Rezki (Sony) if (busy->va_start - vmap_start > 0) { 186168ad4a33SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 186268ad4a33SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) { 186368ad4a33SUladzislau Rezki (Sony) free->va_start = vmap_start; 186468ad4a33SUladzislau Rezki (Sony) free->va_end = busy->va_start; 186568ad4a33SUladzislau Rezki (Sony) 186668ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL, 186768ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, 186868ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list); 186968ad4a33SUladzislau Rezki (Sony) } 187068ad4a33SUladzislau Rezki (Sony) } 187168ad4a33SUladzislau Rezki (Sony) 187268ad4a33SUladzislau Rezki (Sony) vmap_start = busy->va_end; 187368ad4a33SUladzislau Rezki (Sony) } 187468ad4a33SUladzislau Rezki (Sony) 187568ad4a33SUladzislau Rezki (Sony) if (vmap_end - vmap_start > 0) { 187668ad4a33SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 187768ad4a33SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) { 187868ad4a33SUladzislau Rezki (Sony) free->va_start = vmap_start; 187968ad4a33SUladzislau Rezki (Sony) free->va_end = vmap_end; 188068ad4a33SUladzislau Rezki (Sony) 188168ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL, 188268ad4a33SUladzislau Rezki (Sony) &free_vmap_area_root, 188368ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list); 188468ad4a33SUladzislau Rezki (Sony) } 188568ad4a33SUladzislau Rezki (Sony) } 188668ad4a33SUladzislau Rezki (Sony) } 188768ad4a33SUladzislau Rezki (Sony) 1888db64fe02SNick Piggin void __init vmalloc_init(void) 1889db64fe02SNick Piggin { 1890822c18f2SIvan Kokshaysky struct vmap_area *va; 1891822c18f2SIvan Kokshaysky struct vm_struct *tmp; 1892db64fe02SNick Piggin int i; 1893db64fe02SNick Piggin 189468ad4a33SUladzislau Rezki (Sony) /* 189568ad4a33SUladzislau Rezki (Sony) * Create the cache for vmap_area objects. 189668ad4a33SUladzislau Rezki (Sony) */ 189768ad4a33SUladzislau Rezki (Sony) vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); 189868ad4a33SUladzislau Rezki (Sony) 1899db64fe02SNick Piggin for_each_possible_cpu(i) { 1900db64fe02SNick Piggin struct vmap_block_queue *vbq; 190132fcfd40SAl Viro struct vfree_deferred *p; 1902db64fe02SNick Piggin 1903db64fe02SNick Piggin vbq = &per_cpu(vmap_block_queue, i); 1904db64fe02SNick Piggin spin_lock_init(&vbq->lock); 1905db64fe02SNick Piggin INIT_LIST_HEAD(&vbq->free); 190632fcfd40SAl Viro p = &per_cpu(vfree_deferred, i); 190732fcfd40SAl Viro init_llist_head(&p->list); 190832fcfd40SAl Viro INIT_WORK(&p->wq, free_work); 1909db64fe02SNick Piggin } 19109b463334SJeremy Fitzhardinge 1911822c18f2SIvan Kokshaysky /* Import existing vmlist entries. */ 1912822c18f2SIvan Kokshaysky for (tmp = vmlist; tmp; tmp = tmp->next) { 191368ad4a33SUladzislau Rezki (Sony) va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 191468ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(!va)) 191568ad4a33SUladzislau Rezki (Sony) continue; 191668ad4a33SUladzislau Rezki (Sony) 1917dbda591dSKyongHo va->flags = VM_VM_AREA; 1918822c18f2SIvan Kokshaysky va->va_start = (unsigned long)tmp->addr; 1919822c18f2SIvan Kokshaysky va->va_end = va->va_start + tmp->size; 1920dbda591dSKyongHo va->vm = tmp; 192168ad4a33SUladzislau Rezki (Sony) insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 1922822c18f2SIvan Kokshaysky } 1923ca23e405STejun Heo 192468ad4a33SUladzislau Rezki (Sony) /* 192568ad4a33SUladzislau Rezki (Sony) * Now we can initialize a free vmap space. 192668ad4a33SUladzislau Rezki (Sony) */ 192768ad4a33SUladzislau Rezki (Sony) vmap_init_free_space(); 19289b463334SJeremy Fitzhardinge vmap_initialized = true; 1929db64fe02SNick Piggin } 1930db64fe02SNick Piggin 19318fc48985STejun Heo /** 19328fc48985STejun Heo * map_kernel_range_noflush - map kernel VM area with the specified pages 19338fc48985STejun Heo * @addr: start of the VM area to map 19348fc48985STejun Heo * @size: size of the VM area to map 19358fc48985STejun Heo * @prot: page protection flags to use 19368fc48985STejun Heo * @pages: pages to map 19378fc48985STejun Heo * 19388fc48985STejun Heo * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size 19398fc48985STejun Heo * specify should have been allocated using get_vm_area() and its 19408fc48985STejun Heo * friends. 19418fc48985STejun Heo * 19428fc48985STejun Heo * NOTE: 19438fc48985STejun Heo * This function does NOT do any cache flushing. The caller is 19448fc48985STejun Heo * responsible for calling flush_cache_vmap() on to-be-mapped areas 19458fc48985STejun Heo * before calling this function. 19468fc48985STejun Heo * 19478fc48985STejun Heo * RETURNS: 19488fc48985STejun Heo * The number of pages mapped on success, -errno on failure. 19498fc48985STejun Heo */ 19508fc48985STejun Heo int map_kernel_range_noflush(unsigned long addr, unsigned long size, 19518fc48985STejun Heo pgprot_t prot, struct page **pages) 19528fc48985STejun Heo { 19538fc48985STejun Heo return vmap_page_range_noflush(addr, addr + size, prot, pages); 19548fc48985STejun Heo } 19558fc48985STejun Heo 19568fc48985STejun Heo /** 19578fc48985STejun Heo * unmap_kernel_range_noflush - unmap kernel VM area 19588fc48985STejun Heo * @addr: start of the VM area to unmap 19598fc48985STejun Heo * @size: size of the VM area to unmap 19608fc48985STejun Heo * 19618fc48985STejun Heo * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size 19628fc48985STejun Heo * specify should have been allocated using get_vm_area() and its 19638fc48985STejun Heo * friends. 19648fc48985STejun Heo * 19658fc48985STejun Heo * NOTE: 19668fc48985STejun Heo * This function does NOT do any cache flushing. The caller is 19678fc48985STejun Heo * responsible for calling flush_cache_vunmap() on to-be-mapped areas 19688fc48985STejun Heo * before calling this function and flush_tlb_kernel_range() after. 19698fc48985STejun Heo */ 19708fc48985STejun Heo void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 19718fc48985STejun Heo { 19728fc48985STejun Heo vunmap_page_range(addr, addr + size); 19738fc48985STejun Heo } 197481e88fdcSHuang Ying EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush); 19758fc48985STejun Heo 19768fc48985STejun Heo /** 19778fc48985STejun Heo * unmap_kernel_range - unmap kernel VM area and flush cache and TLB 19788fc48985STejun Heo * @addr: start of the VM area to unmap 19798fc48985STejun Heo * @size: size of the VM area to unmap 19808fc48985STejun Heo * 19818fc48985STejun Heo * Similar to unmap_kernel_range_noflush() but flushes vcache before 19828fc48985STejun Heo * the unmapping and tlb after. 19838fc48985STejun Heo */ 1984db64fe02SNick Piggin void unmap_kernel_range(unsigned long addr, unsigned long size) 1985db64fe02SNick Piggin { 1986db64fe02SNick Piggin unsigned long end = addr + size; 1987f6fcba70STejun Heo 1988f6fcba70STejun Heo flush_cache_vunmap(addr, end); 1989db64fe02SNick Piggin vunmap_page_range(addr, end); 1990db64fe02SNick Piggin flush_tlb_kernel_range(addr, end); 1991db64fe02SNick Piggin } 199293ef6d6cSMinchan Kim EXPORT_SYMBOL_GPL(unmap_kernel_range); 1993db64fe02SNick Piggin 1994f6f8ed47SWANG Chao int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages) 1995db64fe02SNick Piggin { 1996db64fe02SNick Piggin unsigned long addr = (unsigned long)area->addr; 1997762216abSWanpeng Li unsigned long end = addr + get_vm_area_size(area); 1998db64fe02SNick Piggin int err; 1999db64fe02SNick Piggin 2000f6f8ed47SWANG Chao err = vmap_page_range(addr, end, prot, pages); 2001db64fe02SNick Piggin 2002f6f8ed47SWANG Chao return err > 0 ? 0 : err; 2003db64fe02SNick Piggin } 2004db64fe02SNick Piggin EXPORT_SYMBOL_GPL(map_vm_area); 2005db64fe02SNick Piggin 2006f5252e00SMitsuo Hayasaka static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 20075e6cafc8SMarek Szyprowski unsigned long flags, const void *caller) 2008cf88c790STejun Heo { 2009c69480adSJoonsoo Kim spin_lock(&vmap_area_lock); 2010cf88c790STejun Heo vm->flags = flags; 2011cf88c790STejun Heo vm->addr = (void *)va->va_start; 2012cf88c790STejun Heo vm->size = va->va_end - va->va_start; 2013cf88c790STejun Heo vm->caller = caller; 2014db1aecafSMinchan Kim va->vm = vm; 2015cf88c790STejun Heo va->flags |= VM_VM_AREA; 2016c69480adSJoonsoo Kim spin_unlock(&vmap_area_lock); 2017f5252e00SMitsuo Hayasaka } 2018cf88c790STejun Heo 201920fc02b4SZhang Yanfei static void clear_vm_uninitialized_flag(struct vm_struct *vm) 2020f5252e00SMitsuo Hayasaka { 2021d4033afdSJoonsoo Kim /* 202220fc02b4SZhang Yanfei * Before removing VM_UNINITIALIZED, 2023d4033afdSJoonsoo Kim * we should make sure that vm has proper values. 2024d4033afdSJoonsoo Kim * Pair with smp_rmb() in show_numa_info(). 2025d4033afdSJoonsoo Kim */ 2026d4033afdSJoonsoo Kim smp_wmb(); 202720fc02b4SZhang Yanfei vm->flags &= ~VM_UNINITIALIZED; 2028cf88c790STejun Heo } 2029cf88c790STejun Heo 2030db64fe02SNick Piggin static struct vm_struct *__get_vm_area_node(unsigned long size, 20312dca6999SDavid Miller unsigned long align, unsigned long flags, unsigned long start, 20325e6cafc8SMarek Szyprowski unsigned long end, int node, gfp_t gfp_mask, const void *caller) 2033db64fe02SNick Piggin { 20340006526dSKautuk Consul struct vmap_area *va; 2035db64fe02SNick Piggin struct vm_struct *area; 20361da177e4SLinus Torvalds 203752fd24caSGiridhar Pemmasani BUG_ON(in_interrupt()); 20381da177e4SLinus Torvalds size = PAGE_ALIGN(size); 203931be8309SOGAWA Hirofumi if (unlikely(!size)) 204031be8309SOGAWA Hirofumi return NULL; 20411da177e4SLinus Torvalds 2042252e5c6eSzijun_hu if (flags & VM_IOREMAP) 2043252e5c6eSzijun_hu align = 1ul << clamp_t(int, get_count_order_long(size), 2044252e5c6eSzijun_hu PAGE_SHIFT, IOREMAP_MAX_ORDER); 2045252e5c6eSzijun_hu 2046cf88c790STejun Heo area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 20471da177e4SLinus Torvalds if (unlikely(!area)) 20481da177e4SLinus Torvalds return NULL; 20491da177e4SLinus Torvalds 205071394fe5SAndrey Ryabinin if (!(flags & VM_NO_GUARD)) 20511da177e4SLinus Torvalds size += PAGE_SIZE; 20521da177e4SLinus Torvalds 2053db64fe02SNick Piggin va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 2054db64fe02SNick Piggin if (IS_ERR(va)) { 2055db64fe02SNick Piggin kfree(area); 2056db64fe02SNick Piggin return NULL; 20571da177e4SLinus Torvalds } 20581da177e4SLinus Torvalds 2059f5252e00SMitsuo Hayasaka setup_vmalloc_vm(area, va, flags, caller); 2060f5252e00SMitsuo Hayasaka 20611da177e4SLinus Torvalds return area; 20621da177e4SLinus Torvalds } 20631da177e4SLinus Torvalds 2064930fc45aSChristoph Lameter struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 2065930fc45aSChristoph Lameter unsigned long start, unsigned long end) 2066930fc45aSChristoph Lameter { 206700ef2d2fSDavid Rientjes return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 206800ef2d2fSDavid Rientjes GFP_KERNEL, __builtin_return_address(0)); 2069930fc45aSChristoph Lameter } 20705992b6daSRusty Russell EXPORT_SYMBOL_GPL(__get_vm_area); 2071930fc45aSChristoph Lameter 2072c2968612SBenjamin Herrenschmidt struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 2073c2968612SBenjamin Herrenschmidt unsigned long start, unsigned long end, 20745e6cafc8SMarek Szyprowski const void *caller) 2075c2968612SBenjamin Herrenschmidt { 207600ef2d2fSDavid Rientjes return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 207700ef2d2fSDavid Rientjes GFP_KERNEL, caller); 2078c2968612SBenjamin Herrenschmidt } 2079c2968612SBenjamin Herrenschmidt 20801da177e4SLinus Torvalds /** 2081183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area 20821da177e4SLinus Torvalds * @size: size of the area 20831da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 20841da177e4SLinus Torvalds * 20851da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area, 20861da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor 20871da177e4SLinus Torvalds * on success or %NULL on failure. 2088a862f68aSMike Rapoport * 2089a862f68aSMike Rapoport * Return: the area descriptor on success or %NULL on failure. 20901da177e4SLinus Torvalds */ 20911da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 20921da177e4SLinus Torvalds { 20932dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 209400ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, 209500ef2d2fSDavid Rientjes __builtin_return_address(0)); 209623016969SChristoph Lameter } 209723016969SChristoph Lameter 209823016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 20995e6cafc8SMarek Szyprowski const void *caller) 210023016969SChristoph Lameter { 21012dca6999SDavid Miller return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 210200ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, caller); 21031da177e4SLinus Torvalds } 21041da177e4SLinus Torvalds 2105e9da6e99SMarek Szyprowski /** 2106e9da6e99SMarek Szyprowski * find_vm_area - find a continuous kernel virtual area 2107e9da6e99SMarek Szyprowski * @addr: base address 2108e9da6e99SMarek Szyprowski * 2109e9da6e99SMarek Szyprowski * Search for the kernel VM area starting at @addr, and return it. 2110e9da6e99SMarek Szyprowski * It is up to the caller to do all required locking to keep the returned 2111e9da6e99SMarek Szyprowski * pointer valid. 2112a862f68aSMike Rapoport * 2113a862f68aSMike Rapoport * Return: pointer to the found area or %NULL on faulure 2114e9da6e99SMarek Szyprowski */ 2115e9da6e99SMarek Szyprowski struct vm_struct *find_vm_area(const void *addr) 211683342314SNick Piggin { 2117db64fe02SNick Piggin struct vmap_area *va; 211883342314SNick Piggin 2119db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 2120db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) 2121db1aecafSMinchan Kim return va->vm; 212283342314SNick Piggin 21237856dfebSAndi Kleen return NULL; 21247856dfebSAndi Kleen } 21257856dfebSAndi Kleen 21261da177e4SLinus Torvalds /** 2127183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area 21281da177e4SLinus Torvalds * @addr: base address 21291da177e4SLinus Torvalds * 21301da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it. 21311da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe 21327856dfebSAndi Kleen * on SMP machines, except for its size or flags. 2133a862f68aSMike Rapoport * 2134a862f68aSMike Rapoport * Return: pointer to the found area or %NULL on faulure 21351da177e4SLinus Torvalds */ 2136b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr) 21371da177e4SLinus Torvalds { 2138db64fe02SNick Piggin struct vmap_area *va; 2139db64fe02SNick Piggin 21405803ed29SChristoph Hellwig might_sleep(); 21415803ed29SChristoph Hellwig 2142db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 2143db64fe02SNick Piggin if (va && va->flags & VM_VM_AREA) { 2144db1aecafSMinchan Kim struct vm_struct *vm = va->vm; 2145f5252e00SMitsuo Hayasaka 2146c69480adSJoonsoo Kim spin_lock(&vmap_area_lock); 2147c69480adSJoonsoo Kim va->vm = NULL; 2148c69480adSJoonsoo Kim va->flags &= ~VM_VM_AREA; 214978c72746SYisheng Xie va->flags |= VM_LAZY_FREE; 2150c69480adSJoonsoo Kim spin_unlock(&vmap_area_lock); 2151c69480adSJoonsoo Kim 2152a5af5aa8SAndrey Ryabinin kasan_free_shadow(vm); 2153dd32c279SKAMEZAWA Hiroyuki free_unmap_vmap_area(va); 2154dd32c279SKAMEZAWA Hiroyuki 2155db64fe02SNick Piggin return vm; 2156db64fe02SNick Piggin } 2157db64fe02SNick Piggin return NULL; 21581da177e4SLinus Torvalds } 21591da177e4SLinus Torvalds 2160868b104dSRick Edgecombe static inline void set_area_direct_map(const struct vm_struct *area, 2161868b104dSRick Edgecombe int (*set_direct_map)(struct page *page)) 2162868b104dSRick Edgecombe { 2163868b104dSRick Edgecombe int i; 2164868b104dSRick Edgecombe 2165868b104dSRick Edgecombe for (i = 0; i < area->nr_pages; i++) 2166868b104dSRick Edgecombe if (page_address(area->pages[i])) 2167868b104dSRick Edgecombe set_direct_map(area->pages[i]); 2168868b104dSRick Edgecombe } 2169868b104dSRick Edgecombe 2170868b104dSRick Edgecombe /* Handle removing and resetting vm mappings related to the vm_struct. */ 2171868b104dSRick Edgecombe static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) 2172868b104dSRick Edgecombe { 2173868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 2174868b104dSRick Edgecombe int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; 217531e67340SRick Edgecombe int flush_dmap = 0; 2176868b104dSRick Edgecombe int i; 2177868b104dSRick Edgecombe 2178868b104dSRick Edgecombe remove_vm_area(area->addr); 2179868b104dSRick Edgecombe 2180868b104dSRick Edgecombe /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */ 2181868b104dSRick Edgecombe if (!flush_reset) 2182868b104dSRick Edgecombe return; 2183868b104dSRick Edgecombe 2184868b104dSRick Edgecombe /* 2185868b104dSRick Edgecombe * If not deallocating pages, just do the flush of the VM area and 2186868b104dSRick Edgecombe * return. 2187868b104dSRick Edgecombe */ 2188868b104dSRick Edgecombe if (!deallocate_pages) { 2189868b104dSRick Edgecombe vm_unmap_aliases(); 2190868b104dSRick Edgecombe return; 2191868b104dSRick Edgecombe } 2192868b104dSRick Edgecombe 2193868b104dSRick Edgecombe /* 2194868b104dSRick Edgecombe * If execution gets here, flush the vm mapping and reset the direct 2195868b104dSRick Edgecombe * map. Find the start and end range of the direct mappings to make sure 2196868b104dSRick Edgecombe * the vm_unmap_aliases() flush includes the direct map. 2197868b104dSRick Edgecombe */ 2198868b104dSRick Edgecombe for (i = 0; i < area->nr_pages; i++) { 21998e41f872SRick Edgecombe unsigned long addr = (unsigned long)page_address(area->pages[i]); 22008e41f872SRick Edgecombe if (addr) { 2201868b104dSRick Edgecombe start = min(addr, start); 22028e41f872SRick Edgecombe end = max(addr + PAGE_SIZE, end); 220331e67340SRick Edgecombe flush_dmap = 1; 2204868b104dSRick Edgecombe } 2205868b104dSRick Edgecombe } 2206868b104dSRick Edgecombe 2207868b104dSRick Edgecombe /* 2208868b104dSRick Edgecombe * Set direct map to something invalid so that it won't be cached if 2209868b104dSRick Edgecombe * there are any accesses after the TLB flush, then flush the TLB and 2210868b104dSRick Edgecombe * reset the direct map permissions to the default. 2211868b104dSRick Edgecombe */ 2212868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_invalid_noflush); 221331e67340SRick Edgecombe _vm_unmap_aliases(start, end, flush_dmap); 2214868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_default_noflush); 2215868b104dSRick Edgecombe } 2216868b104dSRick Edgecombe 2217b3bdda02SChristoph Lameter static void __vunmap(const void *addr, int deallocate_pages) 22181da177e4SLinus Torvalds { 22191da177e4SLinus Torvalds struct vm_struct *area; 22201da177e4SLinus Torvalds 22211da177e4SLinus Torvalds if (!addr) 22221da177e4SLinus Torvalds return; 22231da177e4SLinus Torvalds 2224e69e9d4aSHATAYAMA Daisuke if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 2225ab15d9b4SDan Carpenter addr)) 22261da177e4SLinus Torvalds return; 22271da177e4SLinus Torvalds 22286ade2032SLiviu Dudau area = find_vm_area(addr); 22291da177e4SLinus Torvalds if (unlikely(!area)) { 22304c8573e2SArjan van de Ven WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 22311da177e4SLinus Torvalds addr); 22321da177e4SLinus Torvalds return; 22331da177e4SLinus Torvalds } 22341da177e4SLinus Torvalds 223505e3ff95SChintan Pandya debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); 223605e3ff95SChintan Pandya debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); 22379a11b49aSIngo Molnar 2238868b104dSRick Edgecombe vm_remove_mappings(area, deallocate_pages); 2239868b104dSRick Edgecombe 22401da177e4SLinus Torvalds if (deallocate_pages) { 22411da177e4SLinus Torvalds int i; 22421da177e4SLinus Torvalds 22431da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 2244bf53d6f8SChristoph Lameter struct page *page = area->pages[i]; 2245bf53d6f8SChristoph Lameter 2246bf53d6f8SChristoph Lameter BUG_ON(!page); 22474949148aSVladimir Davydov __free_pages(page, 0); 22481da177e4SLinus Torvalds } 22491da177e4SLinus Torvalds 2250244d63eeSDavid Rientjes kvfree(area->pages); 22511da177e4SLinus Torvalds } 22521da177e4SLinus Torvalds 22531da177e4SLinus Torvalds kfree(area); 22541da177e4SLinus Torvalds return; 22551da177e4SLinus Torvalds } 22561da177e4SLinus Torvalds 2257bf22e37aSAndrey Ryabinin static inline void __vfree_deferred(const void *addr) 2258bf22e37aSAndrey Ryabinin { 2259bf22e37aSAndrey Ryabinin /* 2260bf22e37aSAndrey Ryabinin * Use raw_cpu_ptr() because this can be called from preemptible 2261bf22e37aSAndrey Ryabinin * context. Preemption is absolutely fine here, because the llist_add() 2262bf22e37aSAndrey Ryabinin * implementation is lockless, so it works even if we are adding to 2263bf22e37aSAndrey Ryabinin * nother cpu's list. schedule_work() should be fine with this too. 2264bf22e37aSAndrey Ryabinin */ 2265bf22e37aSAndrey Ryabinin struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 2266bf22e37aSAndrey Ryabinin 2267bf22e37aSAndrey Ryabinin if (llist_add((struct llist_node *)addr, &p->list)) 2268bf22e37aSAndrey Ryabinin schedule_work(&p->wq); 2269bf22e37aSAndrey Ryabinin } 2270bf22e37aSAndrey Ryabinin 2271bf22e37aSAndrey Ryabinin /** 2272bf22e37aSAndrey Ryabinin * vfree_atomic - release memory allocated by vmalloc() 2273bf22e37aSAndrey Ryabinin * @addr: memory base address 2274bf22e37aSAndrey Ryabinin * 2275bf22e37aSAndrey Ryabinin * This one is just like vfree() but can be called in any atomic context 2276bf22e37aSAndrey Ryabinin * except NMIs. 2277bf22e37aSAndrey Ryabinin */ 2278bf22e37aSAndrey Ryabinin void vfree_atomic(const void *addr) 2279bf22e37aSAndrey Ryabinin { 2280bf22e37aSAndrey Ryabinin BUG_ON(in_nmi()); 2281bf22e37aSAndrey Ryabinin 2282bf22e37aSAndrey Ryabinin kmemleak_free(addr); 2283bf22e37aSAndrey Ryabinin 2284bf22e37aSAndrey Ryabinin if (!addr) 2285bf22e37aSAndrey Ryabinin return; 2286bf22e37aSAndrey Ryabinin __vfree_deferred(addr); 2287bf22e37aSAndrey Ryabinin } 2288bf22e37aSAndrey Ryabinin 2289c67dc624SRoman Penyaev static void __vfree(const void *addr) 2290c67dc624SRoman Penyaev { 2291c67dc624SRoman Penyaev if (unlikely(in_interrupt())) 2292c67dc624SRoman Penyaev __vfree_deferred(addr); 2293c67dc624SRoman Penyaev else 2294c67dc624SRoman Penyaev __vunmap(addr, 1); 2295c67dc624SRoman Penyaev } 2296c67dc624SRoman Penyaev 22971da177e4SLinus Torvalds /** 22981da177e4SLinus Torvalds * vfree - release memory allocated by vmalloc() 22991da177e4SLinus Torvalds * @addr: memory base address 23001da177e4SLinus Torvalds * 2301183ff22bSSimon Arlott * Free the virtually continuous memory area starting at @addr, as 230280e93effSPekka Enberg * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 230380e93effSPekka Enberg * NULL, no operation is performed. 23041da177e4SLinus Torvalds * 230532fcfd40SAl Viro * Must not be called in NMI context (strictly speaking, only if we don't 230632fcfd40SAl Viro * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 230732fcfd40SAl Viro * conventions for vfree() arch-depenedent would be a really bad idea) 230832fcfd40SAl Viro * 23093ca4ea3aSAndrey Ryabinin * May sleep if called *not* from interrupt context. 23103ca4ea3aSAndrey Ryabinin * 23110e056eb5Smchehab@s-opensource.com * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node) 23121da177e4SLinus Torvalds */ 2313b3bdda02SChristoph Lameter void vfree(const void *addr) 23141da177e4SLinus Torvalds { 231532fcfd40SAl Viro BUG_ON(in_nmi()); 231689219d37SCatalin Marinas 231789219d37SCatalin Marinas kmemleak_free(addr); 231889219d37SCatalin Marinas 2319a8dda165SAndrey Ryabinin might_sleep_if(!in_interrupt()); 2320a8dda165SAndrey Ryabinin 232132fcfd40SAl Viro if (!addr) 232232fcfd40SAl Viro return; 2323c67dc624SRoman Penyaev 2324c67dc624SRoman Penyaev __vfree(addr); 23251da177e4SLinus Torvalds } 23261da177e4SLinus Torvalds EXPORT_SYMBOL(vfree); 23271da177e4SLinus Torvalds 23281da177e4SLinus Torvalds /** 23291da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap() 23301da177e4SLinus Torvalds * @addr: memory base address 23311da177e4SLinus Torvalds * 23321da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr, 23331da177e4SLinus Torvalds * which was created from the page array passed to vmap(). 23341da177e4SLinus Torvalds * 233580e93effSPekka Enberg * Must not be called in interrupt context. 23361da177e4SLinus Torvalds */ 2337b3bdda02SChristoph Lameter void vunmap(const void *addr) 23381da177e4SLinus Torvalds { 23391da177e4SLinus Torvalds BUG_ON(in_interrupt()); 234034754b69SPeter Zijlstra might_sleep(); 234132fcfd40SAl Viro if (addr) 23421da177e4SLinus Torvalds __vunmap(addr, 0); 23431da177e4SLinus Torvalds } 23441da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap); 23451da177e4SLinus Torvalds 23461da177e4SLinus Torvalds /** 23471da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space 23481da177e4SLinus Torvalds * @pages: array of page pointers 23491da177e4SLinus Torvalds * @count: number of pages to map 23501da177e4SLinus Torvalds * @flags: vm_area->flags 23511da177e4SLinus Torvalds * @prot: page protection for the mapping 23521da177e4SLinus Torvalds * 23531da177e4SLinus Torvalds * Maps @count pages from @pages into contiguous kernel virtual 23541da177e4SLinus Torvalds * space. 2355a862f68aSMike Rapoport * 2356a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 23571da177e4SLinus Torvalds */ 23581da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count, 23591da177e4SLinus Torvalds unsigned long flags, pgprot_t prot) 23601da177e4SLinus Torvalds { 23611da177e4SLinus Torvalds struct vm_struct *area; 236265ee03c4SGuillermo Julián Moreno unsigned long size; /* In bytes */ 23631da177e4SLinus Torvalds 236434754b69SPeter Zijlstra might_sleep(); 236534754b69SPeter Zijlstra 2366ca79b0c2SArun KS if (count > totalram_pages()) 23671da177e4SLinus Torvalds return NULL; 23681da177e4SLinus Torvalds 236965ee03c4SGuillermo Julián Moreno size = (unsigned long)count << PAGE_SHIFT; 237065ee03c4SGuillermo Julián Moreno area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 23711da177e4SLinus Torvalds if (!area) 23721da177e4SLinus Torvalds return NULL; 237323016969SChristoph Lameter 2374f6f8ed47SWANG Chao if (map_vm_area(area, prot, pages)) { 23751da177e4SLinus Torvalds vunmap(area->addr); 23761da177e4SLinus Torvalds return NULL; 23771da177e4SLinus Torvalds } 23781da177e4SLinus Torvalds 23791da177e4SLinus Torvalds return area->addr; 23801da177e4SLinus Torvalds } 23811da177e4SLinus Torvalds EXPORT_SYMBOL(vmap); 23821da177e4SLinus Torvalds 23838594a21cSMichal Hocko static void *__vmalloc_node(unsigned long size, unsigned long align, 23848594a21cSMichal Hocko gfp_t gfp_mask, pgprot_t prot, 23858594a21cSMichal Hocko int node, const void *caller); 2386e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 23873722e13cSWanpeng Li pgprot_t prot, int node) 23881da177e4SLinus Torvalds { 23891da177e4SLinus Torvalds struct page **pages; 23901da177e4SLinus Torvalds unsigned int nr_pages, array_size, i; 2391930f036bSDavid Rientjes const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 2392704b862fSLaura Abbott const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN; 2393704b862fSLaura Abbott const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ? 2394704b862fSLaura Abbott 0 : 2395704b862fSLaura Abbott __GFP_HIGHMEM; 23961da177e4SLinus Torvalds 2397762216abSWanpeng Li nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; 23981da177e4SLinus Torvalds array_size = (nr_pages * sizeof(struct page *)); 23991da177e4SLinus Torvalds 24001da177e4SLinus Torvalds area->nr_pages = nr_pages; 24011da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */ 24028757d5faSJan Kiszka if (array_size > PAGE_SIZE) { 2403704b862fSLaura Abbott pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask, 24043722e13cSWanpeng Li PAGE_KERNEL, node, area->caller); 2405286e1ea3SAndrew Morton } else { 2406976d6dfbSJan Beulich pages = kmalloc_node(array_size, nested_gfp, node); 2407286e1ea3SAndrew Morton } 24081da177e4SLinus Torvalds area->pages = pages; 24091da177e4SLinus Torvalds if (!area->pages) { 24101da177e4SLinus Torvalds remove_vm_area(area->addr); 24111da177e4SLinus Torvalds kfree(area); 24121da177e4SLinus Torvalds return NULL; 24131da177e4SLinus Torvalds } 24141da177e4SLinus Torvalds 24151da177e4SLinus Torvalds for (i = 0; i < area->nr_pages; i++) { 2416bf53d6f8SChristoph Lameter struct page *page; 2417bf53d6f8SChristoph Lameter 24184b90951cSJianguo Wu if (node == NUMA_NO_NODE) 2419704b862fSLaura Abbott page = alloc_page(alloc_mask|highmem_mask); 2420930fc45aSChristoph Lameter else 2421704b862fSLaura Abbott page = alloc_pages_node(node, alloc_mask|highmem_mask, 0); 2422bf53d6f8SChristoph Lameter 2423bf53d6f8SChristoph Lameter if (unlikely(!page)) { 24241da177e4SLinus Torvalds /* Successfully allocated i pages, free them in __vunmap() */ 24251da177e4SLinus Torvalds area->nr_pages = i; 24261da177e4SLinus Torvalds goto fail; 24271da177e4SLinus Torvalds } 2428bf53d6f8SChristoph Lameter area->pages[i] = page; 2429704b862fSLaura Abbott if (gfpflags_allow_blocking(gfp_mask|highmem_mask)) 2430660654f9SEric Dumazet cond_resched(); 24311da177e4SLinus Torvalds } 24321da177e4SLinus Torvalds 2433f6f8ed47SWANG Chao if (map_vm_area(area, prot, pages)) 24341da177e4SLinus Torvalds goto fail; 24351da177e4SLinus Torvalds return area->addr; 24361da177e4SLinus Torvalds 24371da177e4SLinus Torvalds fail: 2438a8e99259SMichal Hocko warn_alloc(gfp_mask, NULL, 24397877cdccSMichal Hocko "vmalloc: allocation failure, allocated %ld of %ld bytes", 244022943ab1SDave Hansen (area->nr_pages*PAGE_SIZE), area->size); 2441c67dc624SRoman Penyaev __vfree(area->addr); 24421da177e4SLinus Torvalds return NULL; 24431da177e4SLinus Torvalds } 24441da177e4SLinus Torvalds 2445d0a21265SDavid Rientjes /** 2446d0a21265SDavid Rientjes * __vmalloc_node_range - allocate virtually contiguous memory 2447d0a21265SDavid Rientjes * @size: allocation size 2448d0a21265SDavid Rientjes * @align: desired alignment 2449d0a21265SDavid Rientjes * @start: vm area range start 2450d0a21265SDavid Rientjes * @end: vm area range end 2451d0a21265SDavid Rientjes * @gfp_mask: flags for the page level allocator 2452d0a21265SDavid Rientjes * @prot: protection mask for the allocated pages 2453cb9e3c29SAndrey Ryabinin * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 245400ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 2455d0a21265SDavid Rientjes * @caller: caller's return address 2456d0a21265SDavid Rientjes * 2457d0a21265SDavid Rientjes * Allocate enough pages to cover @size from the page level 2458d0a21265SDavid Rientjes * allocator with @gfp_mask flags. Map them into contiguous 2459d0a21265SDavid Rientjes * kernel virtual space, using a pagetable protection of @prot. 2460a862f68aSMike Rapoport * 2461a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 2462d0a21265SDavid Rientjes */ 2463d0a21265SDavid Rientjes void *__vmalloc_node_range(unsigned long size, unsigned long align, 2464d0a21265SDavid Rientjes unsigned long start, unsigned long end, gfp_t gfp_mask, 2465cb9e3c29SAndrey Ryabinin pgprot_t prot, unsigned long vm_flags, int node, 2466cb9e3c29SAndrey Ryabinin const void *caller) 2467930fc45aSChristoph Lameter { 2468d0a21265SDavid Rientjes struct vm_struct *area; 2469d0a21265SDavid Rientjes void *addr; 2470d0a21265SDavid Rientjes unsigned long real_size = size; 2471d0a21265SDavid Rientjes 2472d0a21265SDavid Rientjes size = PAGE_ALIGN(size); 2473ca79b0c2SArun KS if (!size || (size >> PAGE_SHIFT) > totalram_pages()) 2474de7d2b56SJoe Perches goto fail; 2475d0a21265SDavid Rientjes 2476cb9e3c29SAndrey Ryabinin area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | 2477cb9e3c29SAndrey Ryabinin vm_flags, start, end, node, gfp_mask, caller); 2478d0a21265SDavid Rientjes if (!area) 2479de7d2b56SJoe Perches goto fail; 2480d0a21265SDavid Rientjes 24813722e13cSWanpeng Li addr = __vmalloc_area_node(area, gfp_mask, prot, node); 24821368edf0SMel Gorman if (!addr) 2483b82225f3SWanpeng Li return NULL; 248489219d37SCatalin Marinas 248589219d37SCatalin Marinas /* 248620fc02b4SZhang Yanfei * In this function, newly allocated vm_struct has VM_UNINITIALIZED 248720fc02b4SZhang Yanfei * flag. It means that vm_struct is not fully initialized. 24884341fa45SJoonsoo Kim * Now, it is fully initialized, so remove this flag here. 2489f5252e00SMitsuo Hayasaka */ 249020fc02b4SZhang Yanfei clear_vm_uninitialized_flag(area); 2491f5252e00SMitsuo Hayasaka 249294f4a161SCatalin Marinas kmemleak_vmalloc(area, size, gfp_mask); 249389219d37SCatalin Marinas 249489219d37SCatalin Marinas return addr; 2495de7d2b56SJoe Perches 2496de7d2b56SJoe Perches fail: 2497a8e99259SMichal Hocko warn_alloc(gfp_mask, NULL, 24987877cdccSMichal Hocko "vmalloc: allocation failure: %lu bytes", real_size); 2499de7d2b56SJoe Perches return NULL; 2500930fc45aSChristoph Lameter } 2501930fc45aSChristoph Lameter 2502153178edSUladzislau Rezki (Sony) /* 2503153178edSUladzislau Rezki (Sony) * This is only for performance analysis of vmalloc and stress purpose. 2504153178edSUladzislau Rezki (Sony) * It is required by vmalloc test module, therefore do not use it other 2505153178edSUladzislau Rezki (Sony) * than that. 2506153178edSUladzislau Rezki (Sony) */ 2507153178edSUladzislau Rezki (Sony) #ifdef CONFIG_TEST_VMALLOC_MODULE 2508153178edSUladzislau Rezki (Sony) EXPORT_SYMBOL_GPL(__vmalloc_node_range); 2509153178edSUladzislau Rezki (Sony) #endif 2510153178edSUladzislau Rezki (Sony) 25111da177e4SLinus Torvalds /** 2512930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory 25131da177e4SLinus Torvalds * @size: allocation size 25142dca6999SDavid Miller * @align: desired alignment 25151da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator 25161da177e4SLinus Torvalds * @prot: protection mask for the allocated pages 251700ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 2518c85d194bSRandy Dunlap * @caller: caller's return address 25191da177e4SLinus Torvalds * 25201da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 25211da177e4SLinus Torvalds * allocator with @gfp_mask flags. Map them into contiguous 25221da177e4SLinus Torvalds * kernel virtual space, using a pagetable protection of @prot. 2523a7c3e901SMichal Hocko * 2524dcda9b04SMichal Hocko * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL 2525a7c3e901SMichal Hocko * and __GFP_NOFAIL are not supported 2526a7c3e901SMichal Hocko * 2527a7c3e901SMichal Hocko * Any use of gfp flags outside of GFP_KERNEL should be consulted 2528a7c3e901SMichal Hocko * with mm people. 2529a862f68aSMike Rapoport * 2530a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 25311da177e4SLinus Torvalds */ 25328594a21cSMichal Hocko static void *__vmalloc_node(unsigned long size, unsigned long align, 25332dca6999SDavid Miller gfp_t gfp_mask, pgprot_t prot, 25345e6cafc8SMarek Szyprowski int node, const void *caller) 25351da177e4SLinus Torvalds { 2536d0a21265SDavid Rientjes return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 2537cb9e3c29SAndrey Ryabinin gfp_mask, prot, 0, node, caller); 25381da177e4SLinus Torvalds } 25391da177e4SLinus Torvalds 2540930fc45aSChristoph Lameter void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 2541930fc45aSChristoph Lameter { 254200ef2d2fSDavid Rientjes return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE, 254323016969SChristoph Lameter __builtin_return_address(0)); 2544930fc45aSChristoph Lameter } 25451da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc); 25461da177e4SLinus Torvalds 25478594a21cSMichal Hocko static inline void *__vmalloc_node_flags(unsigned long size, 25488594a21cSMichal Hocko int node, gfp_t flags) 25498594a21cSMichal Hocko { 25508594a21cSMichal Hocko return __vmalloc_node(size, 1, flags, PAGE_KERNEL, 25518594a21cSMichal Hocko node, __builtin_return_address(0)); 25528594a21cSMichal Hocko } 25538594a21cSMichal Hocko 25548594a21cSMichal Hocko 25558594a21cSMichal Hocko void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags, 25568594a21cSMichal Hocko void *caller) 25578594a21cSMichal Hocko { 25588594a21cSMichal Hocko return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller); 25598594a21cSMichal Hocko } 25608594a21cSMichal Hocko 25611da177e4SLinus Torvalds /** 25621da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory 25631da177e4SLinus Torvalds * @size: allocation size 256492eac168SMike Rapoport * 25651da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 25661da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space. 25671da177e4SLinus Torvalds * 2568c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 25691da177e4SLinus Torvalds * use __vmalloc() instead. 2570a862f68aSMike Rapoport * 2571a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 25721da177e4SLinus Torvalds */ 25731da177e4SLinus Torvalds void *vmalloc(unsigned long size) 25741da177e4SLinus Torvalds { 257500ef2d2fSDavid Rientjes return __vmalloc_node_flags(size, NUMA_NO_NODE, 257619809c2dSMichal Hocko GFP_KERNEL); 25771da177e4SLinus Torvalds } 25781da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc); 25791da177e4SLinus Torvalds 2580930fc45aSChristoph Lameter /** 2581e1ca7788SDave Young * vzalloc - allocate virtually contiguous memory with zero fill 2582e1ca7788SDave Young * @size: allocation size 258392eac168SMike Rapoport * 2584e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 2585e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 2586e1ca7788SDave Young * The memory allocated is set to zero. 2587e1ca7788SDave Young * 2588e1ca7788SDave Young * For tight control over page level allocator and protection flags 2589e1ca7788SDave Young * use __vmalloc() instead. 2590a862f68aSMike Rapoport * 2591a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 2592e1ca7788SDave Young */ 2593e1ca7788SDave Young void *vzalloc(unsigned long size) 2594e1ca7788SDave Young { 259500ef2d2fSDavid Rientjes return __vmalloc_node_flags(size, NUMA_NO_NODE, 259619809c2dSMichal Hocko GFP_KERNEL | __GFP_ZERO); 2597e1ca7788SDave Young } 2598e1ca7788SDave Young EXPORT_SYMBOL(vzalloc); 2599e1ca7788SDave Young 2600e1ca7788SDave Young /** 2601ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 260283342314SNick Piggin * @size: allocation size 2603ead04089SRolf Eike Beer * 2604ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace 2605ead04089SRolf Eike Beer * without leaking data. 2606a862f68aSMike Rapoport * 2607a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 260883342314SNick Piggin */ 260983342314SNick Piggin void *vmalloc_user(unsigned long size) 261083342314SNick Piggin { 2611bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 2612bc84c535SRoman Penyaev GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, 2613bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 261400ef2d2fSDavid Rientjes __builtin_return_address(0)); 261583342314SNick Piggin } 261683342314SNick Piggin EXPORT_SYMBOL(vmalloc_user); 261783342314SNick Piggin 261883342314SNick Piggin /** 2619930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node 2620930fc45aSChristoph Lameter * @size: allocation size 2621d44e0780SRandy Dunlap * @node: numa node 2622930fc45aSChristoph Lameter * 2623930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level 2624930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space. 2625930fc45aSChristoph Lameter * 2626c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 2627930fc45aSChristoph Lameter * use __vmalloc() instead. 2628a862f68aSMike Rapoport * 2629a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 2630930fc45aSChristoph Lameter */ 2631930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node) 2632930fc45aSChristoph Lameter { 263319809c2dSMichal Hocko return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL, 263423016969SChristoph Lameter node, __builtin_return_address(0)); 2635930fc45aSChristoph Lameter } 2636930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node); 2637930fc45aSChristoph Lameter 2638e1ca7788SDave Young /** 2639e1ca7788SDave Young * vzalloc_node - allocate memory on a specific node with zero fill 2640e1ca7788SDave Young * @size: allocation size 2641e1ca7788SDave Young * @node: numa node 2642e1ca7788SDave Young * 2643e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 2644e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 2645e1ca7788SDave Young * The memory allocated is set to zero. 2646e1ca7788SDave Young * 2647e1ca7788SDave Young * For tight control over page level allocator and protection flags 2648e1ca7788SDave Young * use __vmalloc_node() instead. 2649a862f68aSMike Rapoport * 2650a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 2651e1ca7788SDave Young */ 2652e1ca7788SDave Young void *vzalloc_node(unsigned long size, int node) 2653e1ca7788SDave Young { 2654e1ca7788SDave Young return __vmalloc_node_flags(size, node, 265519809c2dSMichal Hocko GFP_KERNEL | __GFP_ZERO); 2656e1ca7788SDave Young } 2657e1ca7788SDave Young EXPORT_SYMBOL(vzalloc_node); 2658e1ca7788SDave Young 26591da177e4SLinus Torvalds /** 26601da177e4SLinus Torvalds * vmalloc_exec - allocate virtually contiguous, executable memory 26611da177e4SLinus Torvalds * @size: allocation size 26621da177e4SLinus Torvalds * 26631da177e4SLinus Torvalds * Kernel-internal function to allocate enough pages to cover @size 26641da177e4SLinus Torvalds * the page level allocator and map them into contiguous and 26651da177e4SLinus Torvalds * executable kernel virtual space. 26661da177e4SLinus Torvalds * 2667c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 26681da177e4SLinus Torvalds * use __vmalloc() instead. 2669a862f68aSMike Rapoport * 2670a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 26711da177e4SLinus Torvalds */ 26721da177e4SLinus Torvalds void *vmalloc_exec(unsigned long size) 26731da177e4SLinus Torvalds { 2674868b104dSRick Edgecombe return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, 2675868b104dSRick Edgecombe GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, 267600ef2d2fSDavid Rientjes NUMA_NO_NODE, __builtin_return_address(0)); 26771da177e4SLinus Torvalds } 26781da177e4SLinus Torvalds 26790d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 2680698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 26810d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 2682698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) 26830d08e0d3SAndi Kleen #else 2684698d0831SMichal Hocko /* 2685698d0831SMichal Hocko * 64b systems should always have either DMA or DMA32 zones. For others 2686698d0831SMichal Hocko * GFP_DMA32 should do the right thing and use the normal zone. 2687698d0831SMichal Hocko */ 2688698d0831SMichal Hocko #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 26890d08e0d3SAndi Kleen #endif 26900d08e0d3SAndi Kleen 26911da177e4SLinus Torvalds /** 26921da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 26931da177e4SLinus Torvalds * @size: allocation size 26941da177e4SLinus Torvalds * 26951da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the 26961da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space. 2697a862f68aSMike Rapoport * 2698a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 26991da177e4SLinus Torvalds */ 27001da177e4SLinus Torvalds void *vmalloc_32(unsigned long size) 27011da177e4SLinus Torvalds { 27022dca6999SDavid Miller return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, 270300ef2d2fSDavid Rientjes NUMA_NO_NODE, __builtin_return_address(0)); 27041da177e4SLinus Torvalds } 27051da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32); 27061da177e4SLinus Torvalds 270783342314SNick Piggin /** 2708ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 270983342314SNick Piggin * @size: allocation size 2710ead04089SRolf Eike Beer * 2711ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be 2712ead04089SRolf Eike Beer * mapped to userspace without leaking data. 2713a862f68aSMike Rapoport * 2714a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 271583342314SNick Piggin */ 271683342314SNick Piggin void *vmalloc_32_user(unsigned long size) 271783342314SNick Piggin { 2718bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 2719bc84c535SRoman Penyaev GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 2720bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 27215a82ac71SRoman Penyaev __builtin_return_address(0)); 272283342314SNick Piggin } 272383342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user); 272483342314SNick Piggin 2725d0107eb0SKAMEZAWA Hiroyuki /* 2726d0107eb0SKAMEZAWA Hiroyuki * small helper routine , copy contents to buf from addr. 2727d0107eb0SKAMEZAWA Hiroyuki * If the page is not present, fill zero. 2728d0107eb0SKAMEZAWA Hiroyuki */ 2729d0107eb0SKAMEZAWA Hiroyuki 2730d0107eb0SKAMEZAWA Hiroyuki static int aligned_vread(char *buf, char *addr, unsigned long count) 2731d0107eb0SKAMEZAWA Hiroyuki { 2732d0107eb0SKAMEZAWA Hiroyuki struct page *p; 2733d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 2734d0107eb0SKAMEZAWA Hiroyuki 2735d0107eb0SKAMEZAWA Hiroyuki while (count) { 2736d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 2737d0107eb0SKAMEZAWA Hiroyuki 2738891c49abSAlexander Kuleshov offset = offset_in_page(addr); 2739d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 2740d0107eb0SKAMEZAWA Hiroyuki if (length > count) 2741d0107eb0SKAMEZAWA Hiroyuki length = count; 2742d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 2743d0107eb0SKAMEZAWA Hiroyuki /* 2744d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 2745d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 2746d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 2747d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 2748d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 2749d0107eb0SKAMEZAWA Hiroyuki */ 2750d0107eb0SKAMEZAWA Hiroyuki if (p) { 2751d0107eb0SKAMEZAWA Hiroyuki /* 2752d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 2753d0107eb0SKAMEZAWA Hiroyuki * function description) 2754d0107eb0SKAMEZAWA Hiroyuki */ 27559b04c5feSCong Wang void *map = kmap_atomic(p); 2756d0107eb0SKAMEZAWA Hiroyuki memcpy(buf, map + offset, length); 27579b04c5feSCong Wang kunmap_atomic(map); 2758d0107eb0SKAMEZAWA Hiroyuki } else 2759d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, length); 2760d0107eb0SKAMEZAWA Hiroyuki 2761d0107eb0SKAMEZAWA Hiroyuki addr += length; 2762d0107eb0SKAMEZAWA Hiroyuki buf += length; 2763d0107eb0SKAMEZAWA Hiroyuki copied += length; 2764d0107eb0SKAMEZAWA Hiroyuki count -= length; 2765d0107eb0SKAMEZAWA Hiroyuki } 2766d0107eb0SKAMEZAWA Hiroyuki return copied; 2767d0107eb0SKAMEZAWA Hiroyuki } 2768d0107eb0SKAMEZAWA Hiroyuki 2769d0107eb0SKAMEZAWA Hiroyuki static int aligned_vwrite(char *buf, char *addr, unsigned long count) 2770d0107eb0SKAMEZAWA Hiroyuki { 2771d0107eb0SKAMEZAWA Hiroyuki struct page *p; 2772d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 2773d0107eb0SKAMEZAWA Hiroyuki 2774d0107eb0SKAMEZAWA Hiroyuki while (count) { 2775d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 2776d0107eb0SKAMEZAWA Hiroyuki 2777891c49abSAlexander Kuleshov offset = offset_in_page(addr); 2778d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 2779d0107eb0SKAMEZAWA Hiroyuki if (length > count) 2780d0107eb0SKAMEZAWA Hiroyuki length = count; 2781d0107eb0SKAMEZAWA Hiroyuki p = vmalloc_to_page(addr); 2782d0107eb0SKAMEZAWA Hiroyuki /* 2783d0107eb0SKAMEZAWA Hiroyuki * To do safe access to this _mapped_ area, we need 2784d0107eb0SKAMEZAWA Hiroyuki * lock. But adding lock here means that we need to add 2785d0107eb0SKAMEZAWA Hiroyuki * overhead of vmalloc()/vfree() calles for this _debug_ 2786d0107eb0SKAMEZAWA Hiroyuki * interface, rarely used. Instead of that, we'll use 2787d0107eb0SKAMEZAWA Hiroyuki * kmap() and get small overhead in this access function. 2788d0107eb0SKAMEZAWA Hiroyuki */ 2789d0107eb0SKAMEZAWA Hiroyuki if (p) { 2790d0107eb0SKAMEZAWA Hiroyuki /* 2791d0107eb0SKAMEZAWA Hiroyuki * we can expect USER0 is not used (see vread/vwrite's 2792d0107eb0SKAMEZAWA Hiroyuki * function description) 2793d0107eb0SKAMEZAWA Hiroyuki */ 27949b04c5feSCong Wang void *map = kmap_atomic(p); 2795d0107eb0SKAMEZAWA Hiroyuki memcpy(map + offset, buf, length); 27969b04c5feSCong Wang kunmap_atomic(map); 2797d0107eb0SKAMEZAWA Hiroyuki } 2798d0107eb0SKAMEZAWA Hiroyuki addr += length; 2799d0107eb0SKAMEZAWA Hiroyuki buf += length; 2800d0107eb0SKAMEZAWA Hiroyuki copied += length; 2801d0107eb0SKAMEZAWA Hiroyuki count -= length; 2802d0107eb0SKAMEZAWA Hiroyuki } 2803d0107eb0SKAMEZAWA Hiroyuki return copied; 2804d0107eb0SKAMEZAWA Hiroyuki } 2805d0107eb0SKAMEZAWA Hiroyuki 2806d0107eb0SKAMEZAWA Hiroyuki /** 2807d0107eb0SKAMEZAWA Hiroyuki * vread() - read vmalloc area in a safe way. 2808d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for reading data 2809d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 2810d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 2811d0107eb0SKAMEZAWA Hiroyuki * 2812d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 2813d0107eb0SKAMEZAWA Hiroyuki * copy data from that area to a given buffer. If the given memory range 2814d0107eb0SKAMEZAWA Hiroyuki * of [addr...addr+count) includes some valid address, data is copied to 2815d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, they'll be zero-filled. 2816d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 2817d0107eb0SKAMEZAWA Hiroyuki * 2818d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 2819a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 2820d0107eb0SKAMEZAWA Hiroyuki * 2821d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vread() is never necessary because the caller 2822d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 2823d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 2824d0107eb0SKAMEZAWA Hiroyuki * any informaion, as /dev/kmem. 2825a862f68aSMike Rapoport * 2826a862f68aSMike Rapoport * Return: number of bytes for which addr and buf should be increased 2827a862f68aSMike Rapoport * (same number as @count) or %0 if [addr...addr+count) doesn't 2828a862f68aSMike Rapoport * include any intersection with valid vmalloc area 2829d0107eb0SKAMEZAWA Hiroyuki */ 28301da177e4SLinus Torvalds long vread(char *buf, char *addr, unsigned long count) 28311da177e4SLinus Torvalds { 2832e81ce85fSJoonsoo Kim struct vmap_area *va; 2833e81ce85fSJoonsoo Kim struct vm_struct *vm; 28341da177e4SLinus Torvalds char *vaddr, *buf_start = buf; 2835d0107eb0SKAMEZAWA Hiroyuki unsigned long buflen = count; 28361da177e4SLinus Torvalds unsigned long n; 28371da177e4SLinus Torvalds 28381da177e4SLinus Torvalds /* Don't allow overflow */ 28391da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 28401da177e4SLinus Torvalds count = -(unsigned long) addr; 28411da177e4SLinus Torvalds 2842e81ce85fSJoonsoo Kim spin_lock(&vmap_area_lock); 2843e81ce85fSJoonsoo Kim list_for_each_entry(va, &vmap_area_list, list) { 2844e81ce85fSJoonsoo Kim if (!count) 2845e81ce85fSJoonsoo Kim break; 2846e81ce85fSJoonsoo Kim 2847e81ce85fSJoonsoo Kim if (!(va->flags & VM_VM_AREA)) 2848e81ce85fSJoonsoo Kim continue; 2849e81ce85fSJoonsoo Kim 2850e81ce85fSJoonsoo Kim vm = va->vm; 2851e81ce85fSJoonsoo Kim vaddr = (char *) vm->addr; 2852762216abSWanpeng Li if (addr >= vaddr + get_vm_area_size(vm)) 28531da177e4SLinus Torvalds continue; 28541da177e4SLinus Torvalds while (addr < vaddr) { 28551da177e4SLinus Torvalds if (count == 0) 28561da177e4SLinus Torvalds goto finished; 28571da177e4SLinus Torvalds *buf = '\0'; 28581da177e4SLinus Torvalds buf++; 28591da177e4SLinus Torvalds addr++; 28601da177e4SLinus Torvalds count--; 28611da177e4SLinus Torvalds } 2862762216abSWanpeng Li n = vaddr + get_vm_area_size(vm) - addr; 2863d0107eb0SKAMEZAWA Hiroyuki if (n > count) 2864d0107eb0SKAMEZAWA Hiroyuki n = count; 2865e81ce85fSJoonsoo Kim if (!(vm->flags & VM_IOREMAP)) 2866d0107eb0SKAMEZAWA Hiroyuki aligned_vread(buf, addr, n); 2867d0107eb0SKAMEZAWA Hiroyuki else /* IOREMAP area is treated as memory hole */ 2868d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, n); 2869d0107eb0SKAMEZAWA Hiroyuki buf += n; 2870d0107eb0SKAMEZAWA Hiroyuki addr += n; 2871d0107eb0SKAMEZAWA Hiroyuki count -= n; 28721da177e4SLinus Torvalds } 28731da177e4SLinus Torvalds finished: 2874e81ce85fSJoonsoo Kim spin_unlock(&vmap_area_lock); 2875d0107eb0SKAMEZAWA Hiroyuki 2876d0107eb0SKAMEZAWA Hiroyuki if (buf == buf_start) 2877d0107eb0SKAMEZAWA Hiroyuki return 0; 2878d0107eb0SKAMEZAWA Hiroyuki /* zero-fill memory holes */ 2879d0107eb0SKAMEZAWA Hiroyuki if (buf != buf_start + buflen) 2880d0107eb0SKAMEZAWA Hiroyuki memset(buf, 0, buflen - (buf - buf_start)); 2881d0107eb0SKAMEZAWA Hiroyuki 2882d0107eb0SKAMEZAWA Hiroyuki return buflen; 28831da177e4SLinus Torvalds } 28841da177e4SLinus Torvalds 2885d0107eb0SKAMEZAWA Hiroyuki /** 2886d0107eb0SKAMEZAWA Hiroyuki * vwrite() - write vmalloc area in a safe way. 2887d0107eb0SKAMEZAWA Hiroyuki * @buf: buffer for source data 2888d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 2889d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 2890d0107eb0SKAMEZAWA Hiroyuki * 2891d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 2892d0107eb0SKAMEZAWA Hiroyuki * copy data from a buffer to the given addr. If specified range of 2893d0107eb0SKAMEZAWA Hiroyuki * [addr...addr+count) includes some valid address, data is copied from 2894d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, no copy to hole. 2895d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 2896d0107eb0SKAMEZAWA Hiroyuki * 2897d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 2898a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 2899d0107eb0SKAMEZAWA Hiroyuki * 2900d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vwrite() is never necessary because the caller 2901d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 2902d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 2903d0107eb0SKAMEZAWA Hiroyuki * any informaion, as /dev/kmem. 2904a862f68aSMike Rapoport * 2905a862f68aSMike Rapoport * Return: number of bytes for which addr and buf should be 2906a862f68aSMike Rapoport * increased (same number as @count) or %0 if [addr...addr+count) 2907a862f68aSMike Rapoport * doesn't include any intersection with valid vmalloc area 2908d0107eb0SKAMEZAWA Hiroyuki */ 29091da177e4SLinus Torvalds long vwrite(char *buf, char *addr, unsigned long count) 29101da177e4SLinus Torvalds { 2911e81ce85fSJoonsoo Kim struct vmap_area *va; 2912e81ce85fSJoonsoo Kim struct vm_struct *vm; 2913d0107eb0SKAMEZAWA Hiroyuki char *vaddr; 2914d0107eb0SKAMEZAWA Hiroyuki unsigned long n, buflen; 2915d0107eb0SKAMEZAWA Hiroyuki int copied = 0; 29161da177e4SLinus Torvalds 29171da177e4SLinus Torvalds /* Don't allow overflow */ 29181da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 29191da177e4SLinus Torvalds count = -(unsigned long) addr; 2920d0107eb0SKAMEZAWA Hiroyuki buflen = count; 29211da177e4SLinus Torvalds 2922e81ce85fSJoonsoo Kim spin_lock(&vmap_area_lock); 2923e81ce85fSJoonsoo Kim list_for_each_entry(va, &vmap_area_list, list) { 2924e81ce85fSJoonsoo Kim if (!count) 2925e81ce85fSJoonsoo Kim break; 2926e81ce85fSJoonsoo Kim 2927e81ce85fSJoonsoo Kim if (!(va->flags & VM_VM_AREA)) 2928e81ce85fSJoonsoo Kim continue; 2929e81ce85fSJoonsoo Kim 2930e81ce85fSJoonsoo Kim vm = va->vm; 2931e81ce85fSJoonsoo Kim vaddr = (char *) vm->addr; 2932762216abSWanpeng Li if (addr >= vaddr + get_vm_area_size(vm)) 29331da177e4SLinus Torvalds continue; 29341da177e4SLinus Torvalds while (addr < vaddr) { 29351da177e4SLinus Torvalds if (count == 0) 29361da177e4SLinus Torvalds goto finished; 29371da177e4SLinus Torvalds buf++; 29381da177e4SLinus Torvalds addr++; 29391da177e4SLinus Torvalds count--; 29401da177e4SLinus Torvalds } 2941762216abSWanpeng Li n = vaddr + get_vm_area_size(vm) - addr; 2942d0107eb0SKAMEZAWA Hiroyuki if (n > count) 2943d0107eb0SKAMEZAWA Hiroyuki n = count; 2944e81ce85fSJoonsoo Kim if (!(vm->flags & VM_IOREMAP)) { 2945d0107eb0SKAMEZAWA Hiroyuki aligned_vwrite(buf, addr, n); 2946d0107eb0SKAMEZAWA Hiroyuki copied++; 2947d0107eb0SKAMEZAWA Hiroyuki } 2948d0107eb0SKAMEZAWA Hiroyuki buf += n; 2949d0107eb0SKAMEZAWA Hiroyuki addr += n; 2950d0107eb0SKAMEZAWA Hiroyuki count -= n; 29511da177e4SLinus Torvalds } 29521da177e4SLinus Torvalds finished: 2953e81ce85fSJoonsoo Kim spin_unlock(&vmap_area_lock); 2954d0107eb0SKAMEZAWA Hiroyuki if (!copied) 2955d0107eb0SKAMEZAWA Hiroyuki return 0; 2956d0107eb0SKAMEZAWA Hiroyuki return buflen; 29571da177e4SLinus Torvalds } 295883342314SNick Piggin 295983342314SNick Piggin /** 2960e69e9d4aSHATAYAMA Daisuke * remap_vmalloc_range_partial - map vmalloc pages to userspace 2961e69e9d4aSHATAYAMA Daisuke * @vma: vma to cover 2962e69e9d4aSHATAYAMA Daisuke * @uaddr: target user address to start at 2963e69e9d4aSHATAYAMA Daisuke * @kaddr: virtual address of vmalloc kernel memory 2964e69e9d4aSHATAYAMA Daisuke * @size: size of map area 2965e69e9d4aSHATAYAMA Daisuke * 2966e69e9d4aSHATAYAMA Daisuke * Returns: 0 for success, -Exxx on failure 2967e69e9d4aSHATAYAMA Daisuke * 2968e69e9d4aSHATAYAMA Daisuke * This function checks that @kaddr is a valid vmalloc'ed area, 2969e69e9d4aSHATAYAMA Daisuke * and that it is big enough to cover the range starting at 2970e69e9d4aSHATAYAMA Daisuke * @uaddr in @vma. Will return failure if that criteria isn't 2971e69e9d4aSHATAYAMA Daisuke * met. 2972e69e9d4aSHATAYAMA Daisuke * 2973e69e9d4aSHATAYAMA Daisuke * Similar to remap_pfn_range() (see mm/memory.c) 2974e69e9d4aSHATAYAMA Daisuke */ 2975e69e9d4aSHATAYAMA Daisuke int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 2976e69e9d4aSHATAYAMA Daisuke void *kaddr, unsigned long size) 2977e69e9d4aSHATAYAMA Daisuke { 2978e69e9d4aSHATAYAMA Daisuke struct vm_struct *area; 2979e69e9d4aSHATAYAMA Daisuke 2980e69e9d4aSHATAYAMA Daisuke size = PAGE_ALIGN(size); 2981e69e9d4aSHATAYAMA Daisuke 2982e69e9d4aSHATAYAMA Daisuke if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 2983e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2984e69e9d4aSHATAYAMA Daisuke 2985e69e9d4aSHATAYAMA Daisuke area = find_vm_area(kaddr); 2986e69e9d4aSHATAYAMA Daisuke if (!area) 2987e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2988e69e9d4aSHATAYAMA Daisuke 2989e69e9d4aSHATAYAMA Daisuke if (!(area->flags & VM_USERMAP)) 2990e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2991e69e9d4aSHATAYAMA Daisuke 2992401592d2SRoman Penyaev if (kaddr + size > area->addr + get_vm_area_size(area)) 2993e69e9d4aSHATAYAMA Daisuke return -EINVAL; 2994e69e9d4aSHATAYAMA Daisuke 2995e69e9d4aSHATAYAMA Daisuke do { 2996e69e9d4aSHATAYAMA Daisuke struct page *page = vmalloc_to_page(kaddr); 2997e69e9d4aSHATAYAMA Daisuke int ret; 2998e69e9d4aSHATAYAMA Daisuke 2999e69e9d4aSHATAYAMA Daisuke ret = vm_insert_page(vma, uaddr, page); 3000e69e9d4aSHATAYAMA Daisuke if (ret) 3001e69e9d4aSHATAYAMA Daisuke return ret; 3002e69e9d4aSHATAYAMA Daisuke 3003e69e9d4aSHATAYAMA Daisuke uaddr += PAGE_SIZE; 3004e69e9d4aSHATAYAMA Daisuke kaddr += PAGE_SIZE; 3005e69e9d4aSHATAYAMA Daisuke size -= PAGE_SIZE; 3006e69e9d4aSHATAYAMA Daisuke } while (size > 0); 3007e69e9d4aSHATAYAMA Daisuke 3008e69e9d4aSHATAYAMA Daisuke vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3009e69e9d4aSHATAYAMA Daisuke 3010e69e9d4aSHATAYAMA Daisuke return 0; 3011e69e9d4aSHATAYAMA Daisuke } 3012e69e9d4aSHATAYAMA Daisuke EXPORT_SYMBOL(remap_vmalloc_range_partial); 3013e69e9d4aSHATAYAMA Daisuke 3014e69e9d4aSHATAYAMA Daisuke /** 301583342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace 301683342314SNick Piggin * @vma: vma to cover (map full range of vma) 301783342314SNick Piggin * @addr: vmalloc memory 301883342314SNick Piggin * @pgoff: number of pages into addr before first page to map 30197682486bSRandy Dunlap * 30207682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure 302183342314SNick Piggin * 302283342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and 302383342314SNick Piggin * that it is big enough to cover the vma. Will return failure if 302483342314SNick Piggin * that criteria isn't met. 302583342314SNick Piggin * 302672fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c) 302783342314SNick Piggin */ 302883342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 302983342314SNick Piggin unsigned long pgoff) 303083342314SNick Piggin { 3031e69e9d4aSHATAYAMA Daisuke return remap_vmalloc_range_partial(vma, vma->vm_start, 3032e69e9d4aSHATAYAMA Daisuke addr + (pgoff << PAGE_SHIFT), 3033e69e9d4aSHATAYAMA Daisuke vma->vm_end - vma->vm_start); 303483342314SNick Piggin } 303583342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range); 303683342314SNick Piggin 30371eeb66a1SChristoph Hellwig /* 30381eeb66a1SChristoph Hellwig * Implement a stub for vmalloc_sync_all() if the architecture chose not to 30391eeb66a1SChristoph Hellwig * have one. 30401eeb66a1SChristoph Hellwig */ 30413b32123dSGideon Israel Dsouza void __weak vmalloc_sync_all(void) 30421eeb66a1SChristoph Hellwig { 30431eeb66a1SChristoph Hellwig } 30445f4352fbSJeremy Fitzhardinge 30455f4352fbSJeremy Fitzhardinge 30468b1e0f81SAnshuman Khandual static int f(pte_t *pte, unsigned long addr, void *data) 30475f4352fbSJeremy Fitzhardinge { 3048cd12909cSDavid Vrabel pte_t ***p = data; 3049cd12909cSDavid Vrabel 3050cd12909cSDavid Vrabel if (p) { 3051cd12909cSDavid Vrabel *(*p) = pte; 3052cd12909cSDavid Vrabel (*p)++; 3053cd12909cSDavid Vrabel } 30545f4352fbSJeremy Fitzhardinge return 0; 30555f4352fbSJeremy Fitzhardinge } 30565f4352fbSJeremy Fitzhardinge 30575f4352fbSJeremy Fitzhardinge /** 30585f4352fbSJeremy Fitzhardinge * alloc_vm_area - allocate a range of kernel address space 30595f4352fbSJeremy Fitzhardinge * @size: size of the area 3060cd12909cSDavid Vrabel * @ptes: returns the PTEs for the address space 30617682486bSRandy Dunlap * 30627682486bSRandy Dunlap * Returns: NULL on failure, vm_struct on success 30635f4352fbSJeremy Fitzhardinge * 30645f4352fbSJeremy Fitzhardinge * This function reserves a range of kernel address space, and 30655f4352fbSJeremy Fitzhardinge * allocates pagetables to map that range. No actual mappings 3066cd12909cSDavid Vrabel * are created. 3067cd12909cSDavid Vrabel * 3068cd12909cSDavid Vrabel * If @ptes is non-NULL, pointers to the PTEs (in init_mm) 3069cd12909cSDavid Vrabel * allocated for the VM area are returned. 30705f4352fbSJeremy Fitzhardinge */ 3071cd12909cSDavid Vrabel struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) 30725f4352fbSJeremy Fitzhardinge { 30735f4352fbSJeremy Fitzhardinge struct vm_struct *area; 30745f4352fbSJeremy Fitzhardinge 307523016969SChristoph Lameter area = get_vm_area_caller(size, VM_IOREMAP, 307623016969SChristoph Lameter __builtin_return_address(0)); 30775f4352fbSJeremy Fitzhardinge if (area == NULL) 30785f4352fbSJeremy Fitzhardinge return NULL; 30795f4352fbSJeremy Fitzhardinge 30805f4352fbSJeremy Fitzhardinge /* 30815f4352fbSJeremy Fitzhardinge * This ensures that page tables are constructed for this region 30825f4352fbSJeremy Fitzhardinge * of kernel virtual address space and mapped into init_mm. 30835f4352fbSJeremy Fitzhardinge */ 30845f4352fbSJeremy Fitzhardinge if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 3085cd12909cSDavid Vrabel size, f, ptes ? &ptes : NULL)) { 30865f4352fbSJeremy Fitzhardinge free_vm_area(area); 30875f4352fbSJeremy Fitzhardinge return NULL; 30885f4352fbSJeremy Fitzhardinge } 30895f4352fbSJeremy Fitzhardinge 30905f4352fbSJeremy Fitzhardinge return area; 30915f4352fbSJeremy Fitzhardinge } 30925f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(alloc_vm_area); 30935f4352fbSJeremy Fitzhardinge 30945f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area) 30955f4352fbSJeremy Fitzhardinge { 30965f4352fbSJeremy Fitzhardinge struct vm_struct *ret; 30975f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr); 30985f4352fbSJeremy Fitzhardinge BUG_ON(ret != area); 30995f4352fbSJeremy Fitzhardinge kfree(area); 31005f4352fbSJeremy Fitzhardinge } 31015f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area); 3102a10aa579SChristoph Lameter 31034f8b02b4STejun Heo #ifdef CONFIG_SMP 3104ca23e405STejun Heo static struct vmap_area *node_to_va(struct rb_node *n) 3105ca23e405STejun Heo { 31064583e773SGeliang Tang return rb_entry_safe(n, struct vmap_area, rb_node); 3107ca23e405STejun Heo } 3108ca23e405STejun Heo 3109ca23e405STejun Heo /** 311068ad4a33SUladzislau Rezki (Sony) * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to 311168ad4a33SUladzislau Rezki (Sony) * @addr: target address 3112ca23e405STejun Heo * 311368ad4a33SUladzislau Rezki (Sony) * Returns: vmap_area if it is found. If there is no such area 311468ad4a33SUladzislau Rezki (Sony) * the first highest(reverse order) vmap_area is returned 311568ad4a33SUladzislau Rezki (Sony) * i.e. va->va_start < addr && va->va_end < addr or NULL 311668ad4a33SUladzislau Rezki (Sony) * if there are no any areas before @addr. 3117ca23e405STejun Heo */ 311868ad4a33SUladzislau Rezki (Sony) static struct vmap_area * 311968ad4a33SUladzislau Rezki (Sony) pvm_find_va_enclose_addr(unsigned long addr) 3120ca23e405STejun Heo { 312168ad4a33SUladzislau Rezki (Sony) struct vmap_area *va, *tmp; 312268ad4a33SUladzislau Rezki (Sony) struct rb_node *n; 312368ad4a33SUladzislau Rezki (Sony) 312468ad4a33SUladzislau Rezki (Sony) n = free_vmap_area_root.rb_node; 312568ad4a33SUladzislau Rezki (Sony) va = NULL; 3126ca23e405STejun Heo 3127ca23e405STejun Heo while (n) { 312868ad4a33SUladzislau Rezki (Sony) tmp = rb_entry(n, struct vmap_area, rb_node); 312968ad4a33SUladzislau Rezki (Sony) if (tmp->va_start <= addr) { 313068ad4a33SUladzislau Rezki (Sony) va = tmp; 313168ad4a33SUladzislau Rezki (Sony) if (tmp->va_end >= addr) 3132ca23e405STejun Heo break; 3133ca23e405STejun Heo 313468ad4a33SUladzislau Rezki (Sony) n = n->rb_right; 3135ca23e405STejun Heo } else { 313668ad4a33SUladzislau Rezki (Sony) n = n->rb_left; 3137ca23e405STejun Heo } 313868ad4a33SUladzislau Rezki (Sony) } 313968ad4a33SUladzislau Rezki (Sony) 314068ad4a33SUladzislau Rezki (Sony) return va; 3141ca23e405STejun Heo } 3142ca23e405STejun Heo 3143ca23e405STejun Heo /** 314468ad4a33SUladzislau Rezki (Sony) * pvm_determine_end_from_reverse - find the highest aligned address 314568ad4a33SUladzislau Rezki (Sony) * of free block below VMALLOC_END 314668ad4a33SUladzislau Rezki (Sony) * @va: 314768ad4a33SUladzislau Rezki (Sony) * in - the VA we start the search(reverse order); 314868ad4a33SUladzislau Rezki (Sony) * out - the VA with the highest aligned end address. 3149ca23e405STejun Heo * 315068ad4a33SUladzislau Rezki (Sony) * Returns: determined end address within vmap_area 3151ca23e405STejun Heo */ 315268ad4a33SUladzislau Rezki (Sony) static unsigned long 315368ad4a33SUladzislau Rezki (Sony) pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) 3154ca23e405STejun Heo { 315568ad4a33SUladzislau Rezki (Sony) unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 3156ca23e405STejun Heo unsigned long addr; 3157ca23e405STejun Heo 315868ad4a33SUladzislau Rezki (Sony) if (likely(*va)) { 315968ad4a33SUladzislau Rezki (Sony) list_for_each_entry_from_reverse((*va), 316068ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list, list) { 316168ad4a33SUladzislau Rezki (Sony) addr = min((*va)->va_end & ~(align - 1), vmalloc_end); 316268ad4a33SUladzislau Rezki (Sony) if ((*va)->va_start < addr) 316368ad4a33SUladzislau Rezki (Sony) return addr; 316468ad4a33SUladzislau Rezki (Sony) } 3165ca23e405STejun Heo } 3166ca23e405STejun Heo 316768ad4a33SUladzislau Rezki (Sony) return 0; 3168ca23e405STejun Heo } 3169ca23e405STejun Heo 3170ca23e405STejun Heo /** 3171ca23e405STejun Heo * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 3172ca23e405STejun Heo * @offsets: array containing offset of each area 3173ca23e405STejun Heo * @sizes: array containing size of each area 3174ca23e405STejun Heo * @nr_vms: the number of areas to allocate 3175ca23e405STejun Heo * @align: alignment, all entries in @offsets and @sizes must be aligned to this 3176ca23e405STejun Heo * 3177ca23e405STejun Heo * Returns: kmalloc'd vm_struct pointer array pointing to allocated 3178ca23e405STejun Heo * vm_structs on success, %NULL on failure 3179ca23e405STejun Heo * 3180ca23e405STejun Heo * Percpu allocator wants to use congruent vm areas so that it can 3181ca23e405STejun Heo * maintain the offsets among percpu areas. This function allocates 3182ec3f64fcSDavid Rientjes * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 3183ec3f64fcSDavid Rientjes * be scattered pretty far, distance between two areas easily going up 3184ec3f64fcSDavid Rientjes * to gigabytes. To avoid interacting with regular vmallocs, these 3185ec3f64fcSDavid Rientjes * areas are allocated from top. 3186ca23e405STejun Heo * 3187ca23e405STejun Heo * Despite its complicated look, this allocator is rather simple. It 318868ad4a33SUladzislau Rezki (Sony) * does everything top-down and scans free blocks from the end looking 318968ad4a33SUladzislau Rezki (Sony) * for matching base. While scanning, if any of the areas do not fit the 319068ad4a33SUladzislau Rezki (Sony) * base address is pulled down to fit the area. Scanning is repeated till 319168ad4a33SUladzislau Rezki (Sony) * all the areas fit and then all necessary data structures are inserted 319268ad4a33SUladzislau Rezki (Sony) * and the result is returned. 3193ca23e405STejun Heo */ 3194ca23e405STejun Heo struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 3195ca23e405STejun Heo const size_t *sizes, int nr_vms, 3196ec3f64fcSDavid Rientjes size_t align) 3197ca23e405STejun Heo { 3198ca23e405STejun Heo const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 3199ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 320068ad4a33SUladzislau Rezki (Sony) struct vmap_area **vas, *va; 3201ca23e405STejun Heo struct vm_struct **vms; 3202ca23e405STejun Heo int area, area2, last_area, term_area; 320368ad4a33SUladzislau Rezki (Sony) unsigned long base, start, size, end, last_end; 3204ca23e405STejun Heo bool purged = false; 320568ad4a33SUladzislau Rezki (Sony) enum fit_type type; 3206ca23e405STejun Heo 3207ca23e405STejun Heo /* verify parameters and allocate data structures */ 3208891c49abSAlexander Kuleshov BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 3209ca23e405STejun Heo for (last_area = 0, area = 0; area < nr_vms; area++) { 3210ca23e405STejun Heo start = offsets[area]; 3211ca23e405STejun Heo end = start + sizes[area]; 3212ca23e405STejun Heo 3213ca23e405STejun Heo /* is everything aligned properly? */ 3214ca23e405STejun Heo BUG_ON(!IS_ALIGNED(offsets[area], align)); 3215ca23e405STejun Heo BUG_ON(!IS_ALIGNED(sizes[area], align)); 3216ca23e405STejun Heo 3217ca23e405STejun Heo /* detect the area with the highest address */ 3218ca23e405STejun Heo if (start > offsets[last_area]) 3219ca23e405STejun Heo last_area = area; 3220ca23e405STejun Heo 3221c568da28SWei Yang for (area2 = area + 1; area2 < nr_vms; area2++) { 3222ca23e405STejun Heo unsigned long start2 = offsets[area2]; 3223ca23e405STejun Heo unsigned long end2 = start2 + sizes[area2]; 3224ca23e405STejun Heo 3225c568da28SWei Yang BUG_ON(start2 < end && start < end2); 3226ca23e405STejun Heo } 3227ca23e405STejun Heo } 3228ca23e405STejun Heo last_end = offsets[last_area] + sizes[last_area]; 3229ca23e405STejun Heo 3230ca23e405STejun Heo if (vmalloc_end - vmalloc_start < last_end) { 3231ca23e405STejun Heo WARN_ON(true); 3232ca23e405STejun Heo return NULL; 3233ca23e405STejun Heo } 3234ca23e405STejun Heo 32354d67d860SThomas Meyer vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 32364d67d860SThomas Meyer vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 3237ca23e405STejun Heo if (!vas || !vms) 3238f1db7afdSKautuk Consul goto err_free2; 3239ca23e405STejun Heo 3240ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 324168ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); 3242ec3f64fcSDavid Rientjes vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 3243ca23e405STejun Heo if (!vas[area] || !vms[area]) 3244ca23e405STejun Heo goto err_free; 3245ca23e405STejun Heo } 3246ca23e405STejun Heo retry: 3247ca23e405STejun Heo spin_lock(&vmap_area_lock); 3248ca23e405STejun Heo 3249ca23e405STejun Heo /* start scanning - we scan from the top, begin with the last area */ 3250ca23e405STejun Heo area = term_area = last_area; 3251ca23e405STejun Heo start = offsets[area]; 3252ca23e405STejun Heo end = start + sizes[area]; 3253ca23e405STejun Heo 325468ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(vmalloc_end); 325568ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end; 3256ca23e405STejun Heo 3257ca23e405STejun Heo while (true) { 3258ca23e405STejun Heo /* 3259ca23e405STejun Heo * base might have underflowed, add last_end before 3260ca23e405STejun Heo * comparing. 3261ca23e405STejun Heo */ 326268ad4a33SUladzislau Rezki (Sony) if (base + last_end < vmalloc_start + last_end) 326368ad4a33SUladzislau Rezki (Sony) goto overflow; 3264ca23e405STejun Heo 3265ca23e405STejun Heo /* 326668ad4a33SUladzislau Rezki (Sony) * Fitting base has not been found. 3267ca23e405STejun Heo */ 326868ad4a33SUladzislau Rezki (Sony) if (va == NULL) 326968ad4a33SUladzislau Rezki (Sony) goto overflow; 3270ca23e405STejun Heo 3271ca23e405STejun Heo /* 327268ad4a33SUladzislau Rezki (Sony) * If this VA does not fit, move base downwards and recheck. 3273ca23e405STejun Heo */ 327468ad4a33SUladzislau Rezki (Sony) if (base + start < va->va_start || base + end > va->va_end) { 327568ad4a33SUladzislau Rezki (Sony) va = node_to_va(rb_prev(&va->rb_node)); 327668ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end; 3277ca23e405STejun Heo term_area = area; 3278ca23e405STejun Heo continue; 3279ca23e405STejun Heo } 3280ca23e405STejun Heo 3281ca23e405STejun Heo /* 3282ca23e405STejun Heo * This area fits, move on to the previous one. If 3283ca23e405STejun Heo * the previous one is the terminal one, we're done. 3284ca23e405STejun Heo */ 3285ca23e405STejun Heo area = (area + nr_vms - 1) % nr_vms; 3286ca23e405STejun Heo if (area == term_area) 3287ca23e405STejun Heo break; 328868ad4a33SUladzislau Rezki (Sony) 3289ca23e405STejun Heo start = offsets[area]; 3290ca23e405STejun Heo end = start + sizes[area]; 329168ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(base + end); 3292ca23e405STejun Heo } 329368ad4a33SUladzislau Rezki (Sony) 3294ca23e405STejun Heo /* we've found a fitting base, insert all va's */ 3295ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 329668ad4a33SUladzislau Rezki (Sony) int ret; 3297ca23e405STejun Heo 329868ad4a33SUladzislau Rezki (Sony) start = base + offsets[area]; 329968ad4a33SUladzislau Rezki (Sony) size = sizes[area]; 330068ad4a33SUladzislau Rezki (Sony) 330168ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(start); 330268ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(va == NULL)) 330368ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */ 330468ad4a33SUladzislau Rezki (Sony) goto recovery; 330568ad4a33SUladzislau Rezki (Sony) 330668ad4a33SUladzislau Rezki (Sony) type = classify_va_fit_type(va, start, size); 330768ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(type == NOTHING_FIT)) 330868ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */ 330968ad4a33SUladzislau Rezki (Sony) goto recovery; 331068ad4a33SUladzislau Rezki (Sony) 331168ad4a33SUladzislau Rezki (Sony) ret = adjust_va_to_fit_type(va, start, size, type); 331268ad4a33SUladzislau Rezki (Sony) if (unlikely(ret)) 331368ad4a33SUladzislau Rezki (Sony) goto recovery; 331468ad4a33SUladzislau Rezki (Sony) 331568ad4a33SUladzislau Rezki (Sony) /* Allocated area. */ 331668ad4a33SUladzislau Rezki (Sony) va = vas[area]; 331768ad4a33SUladzislau Rezki (Sony) va->va_start = start; 331868ad4a33SUladzislau Rezki (Sony) va->va_end = start + size; 331968ad4a33SUladzislau Rezki (Sony) 332068ad4a33SUladzislau Rezki (Sony) insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 3321ca23e405STejun Heo } 3322ca23e405STejun Heo 3323ca23e405STejun Heo spin_unlock(&vmap_area_lock); 3324ca23e405STejun Heo 3325ca23e405STejun Heo /* insert all vm's */ 3326ca23e405STejun Heo for (area = 0; area < nr_vms; area++) 33273645cb4aSZhang Yanfei setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, 3328ca23e405STejun Heo pcpu_get_vm_areas); 3329ca23e405STejun Heo 3330ca23e405STejun Heo kfree(vas); 3331ca23e405STejun Heo return vms; 3332ca23e405STejun Heo 333368ad4a33SUladzislau Rezki (Sony) recovery: 333468ad4a33SUladzislau Rezki (Sony) /* Remove previously inserted areas. */ 333568ad4a33SUladzislau Rezki (Sony) while (area--) { 333668ad4a33SUladzislau Rezki (Sony) __free_vmap_area(vas[area]); 333768ad4a33SUladzislau Rezki (Sony) vas[area] = NULL; 333868ad4a33SUladzislau Rezki (Sony) } 333968ad4a33SUladzislau Rezki (Sony) 334068ad4a33SUladzislau Rezki (Sony) overflow: 334168ad4a33SUladzislau Rezki (Sony) spin_unlock(&vmap_area_lock); 334268ad4a33SUladzislau Rezki (Sony) if (!purged) { 334368ad4a33SUladzislau Rezki (Sony) purge_vmap_area_lazy(); 334468ad4a33SUladzislau Rezki (Sony) purged = true; 334568ad4a33SUladzislau Rezki (Sony) 334668ad4a33SUladzislau Rezki (Sony) /* Before "retry", check if we recover. */ 334768ad4a33SUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) { 334868ad4a33SUladzislau Rezki (Sony) if (vas[area]) 334968ad4a33SUladzislau Rezki (Sony) continue; 335068ad4a33SUladzislau Rezki (Sony) 335168ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc( 335268ad4a33SUladzislau Rezki (Sony) vmap_area_cachep, GFP_KERNEL); 335368ad4a33SUladzislau Rezki (Sony) if (!vas[area]) 335468ad4a33SUladzislau Rezki (Sony) goto err_free; 335568ad4a33SUladzislau Rezki (Sony) } 335668ad4a33SUladzislau Rezki (Sony) 335768ad4a33SUladzislau Rezki (Sony) goto retry; 335868ad4a33SUladzislau Rezki (Sony) } 335968ad4a33SUladzislau Rezki (Sony) 3360ca23e405STejun Heo err_free: 3361ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 336268ad4a33SUladzislau Rezki (Sony) if (vas[area]) 336368ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, vas[area]); 336468ad4a33SUladzislau Rezki (Sony) 3365ca23e405STejun Heo kfree(vms[area]); 3366ca23e405STejun Heo } 3367f1db7afdSKautuk Consul err_free2: 3368ca23e405STejun Heo kfree(vas); 3369ca23e405STejun Heo kfree(vms); 3370ca23e405STejun Heo return NULL; 3371ca23e405STejun Heo } 3372ca23e405STejun Heo 3373ca23e405STejun Heo /** 3374ca23e405STejun Heo * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 3375ca23e405STejun Heo * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 3376ca23e405STejun Heo * @nr_vms: the number of allocated areas 3377ca23e405STejun Heo * 3378ca23e405STejun Heo * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 3379ca23e405STejun Heo */ 3380ca23e405STejun Heo void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 3381ca23e405STejun Heo { 3382ca23e405STejun Heo int i; 3383ca23e405STejun Heo 3384ca23e405STejun Heo for (i = 0; i < nr_vms; i++) 3385ca23e405STejun Heo free_vm_area(vms[i]); 3386ca23e405STejun Heo kfree(vms); 3387ca23e405STejun Heo } 33884f8b02b4STejun Heo #endif /* CONFIG_SMP */ 3389a10aa579SChristoph Lameter 3390a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS 3391a10aa579SChristoph Lameter static void *s_start(struct seq_file *m, loff_t *pos) 3392d4033afdSJoonsoo Kim __acquires(&vmap_area_lock) 3393a10aa579SChristoph Lameter { 3394d4033afdSJoonsoo Kim spin_lock(&vmap_area_lock); 33953f500069Szijun_hu return seq_list_start(&vmap_area_list, *pos); 3396a10aa579SChristoph Lameter } 3397a10aa579SChristoph Lameter 3398a10aa579SChristoph Lameter static void *s_next(struct seq_file *m, void *p, loff_t *pos) 3399a10aa579SChristoph Lameter { 34003f500069Szijun_hu return seq_list_next(p, &vmap_area_list, pos); 3401a10aa579SChristoph Lameter } 3402a10aa579SChristoph Lameter 3403a10aa579SChristoph Lameter static void s_stop(struct seq_file *m, void *p) 3404d4033afdSJoonsoo Kim __releases(&vmap_area_lock) 3405a10aa579SChristoph Lameter { 3406d4033afdSJoonsoo Kim spin_unlock(&vmap_area_lock); 3407a10aa579SChristoph Lameter } 3408a10aa579SChristoph Lameter 3409a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v) 3410a47a126aSEric Dumazet { 3411e5adfffcSKirill A. Shutemov if (IS_ENABLED(CONFIG_NUMA)) { 3412a47a126aSEric Dumazet unsigned int nr, *counters = m->private; 3413a47a126aSEric Dumazet 3414a47a126aSEric Dumazet if (!counters) 3415a47a126aSEric Dumazet return; 3416a47a126aSEric Dumazet 3417af12346cSWanpeng Li if (v->flags & VM_UNINITIALIZED) 3418af12346cSWanpeng Li return; 34197e5b528bSDmitry Vyukov /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 34207e5b528bSDmitry Vyukov smp_rmb(); 3421af12346cSWanpeng Li 3422a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 3423a47a126aSEric Dumazet 3424a47a126aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr++) 3425a47a126aSEric Dumazet counters[page_to_nid(v->pages[nr])]++; 3426a47a126aSEric Dumazet 3427a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY) 3428a47a126aSEric Dumazet if (counters[nr]) 3429a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]); 3430a47a126aSEric Dumazet } 3431a47a126aSEric Dumazet } 3432a47a126aSEric Dumazet 3433a10aa579SChristoph Lameter static int s_show(struct seq_file *m, void *p) 3434a10aa579SChristoph Lameter { 34353f500069Szijun_hu struct vmap_area *va; 3436d4033afdSJoonsoo Kim struct vm_struct *v; 3437d4033afdSJoonsoo Kim 34383f500069Szijun_hu va = list_entry(p, struct vmap_area, list); 34393f500069Szijun_hu 3440c2ce8c14SWanpeng Li /* 3441c2ce8c14SWanpeng Li * s_show can encounter race with remove_vm_area, !VM_VM_AREA on 3442c2ce8c14SWanpeng Li * behalf of vmap area is being tear down or vm_map_ram allocation. 3443c2ce8c14SWanpeng Li */ 344478c72746SYisheng Xie if (!(va->flags & VM_VM_AREA)) { 344578c72746SYisheng Xie seq_printf(m, "0x%pK-0x%pK %7ld %s\n", 344678c72746SYisheng Xie (void *)va->va_start, (void *)va->va_end, 344778c72746SYisheng Xie va->va_end - va->va_start, 344878c72746SYisheng Xie va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram"); 344978c72746SYisheng Xie 3450d4033afdSJoonsoo Kim return 0; 345178c72746SYisheng Xie } 3452d4033afdSJoonsoo Kim 3453d4033afdSJoonsoo Kim v = va->vm; 3454a10aa579SChristoph Lameter 345545ec1690SKees Cook seq_printf(m, "0x%pK-0x%pK %7ld", 3456a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size); 3457a10aa579SChristoph Lameter 345862c70bceSJoe Perches if (v->caller) 345962c70bceSJoe Perches seq_printf(m, " %pS", v->caller); 346023016969SChristoph Lameter 3461a10aa579SChristoph Lameter if (v->nr_pages) 3462a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages); 3463a10aa579SChristoph Lameter 3464a10aa579SChristoph Lameter if (v->phys_addr) 3465199eaa05SMiles Chen seq_printf(m, " phys=%pa", &v->phys_addr); 3466a10aa579SChristoph Lameter 3467a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP) 3468f4527c90SFabian Frederick seq_puts(m, " ioremap"); 3469a10aa579SChristoph Lameter 3470a10aa579SChristoph Lameter if (v->flags & VM_ALLOC) 3471f4527c90SFabian Frederick seq_puts(m, " vmalloc"); 3472a10aa579SChristoph Lameter 3473a10aa579SChristoph Lameter if (v->flags & VM_MAP) 3474f4527c90SFabian Frederick seq_puts(m, " vmap"); 3475a10aa579SChristoph Lameter 3476a10aa579SChristoph Lameter if (v->flags & VM_USERMAP) 3477f4527c90SFabian Frederick seq_puts(m, " user"); 3478a10aa579SChristoph Lameter 3479244d63eeSDavid Rientjes if (is_vmalloc_addr(v->pages)) 3480f4527c90SFabian Frederick seq_puts(m, " vpages"); 3481a10aa579SChristoph Lameter 3482a47a126aSEric Dumazet show_numa_info(m, v); 3483a10aa579SChristoph Lameter seq_putc(m, '\n'); 3484a10aa579SChristoph Lameter return 0; 3485a10aa579SChristoph Lameter } 3486a10aa579SChristoph Lameter 34875f6a6a9cSAlexey Dobriyan static const struct seq_operations vmalloc_op = { 3488a10aa579SChristoph Lameter .start = s_start, 3489a10aa579SChristoph Lameter .next = s_next, 3490a10aa579SChristoph Lameter .stop = s_stop, 3491a10aa579SChristoph Lameter .show = s_show, 3492a10aa579SChristoph Lameter }; 34935f6a6a9cSAlexey Dobriyan 34945f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void) 34955f6a6a9cSAlexey Dobriyan { 3496fddda2b7SChristoph Hellwig if (IS_ENABLED(CONFIG_NUMA)) 34970825a6f9SJoe Perches proc_create_seq_private("vmallocinfo", 0400, NULL, 349844414d82SChristoph Hellwig &vmalloc_op, 349944414d82SChristoph Hellwig nr_node_ids * sizeof(unsigned int), NULL); 3500fddda2b7SChristoph Hellwig else 35010825a6f9SJoe Perches proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op); 35025f6a6a9cSAlexey Dobriyan return 0; 35035f6a6a9cSAlexey Dobriyan } 35045f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init); 3505db3808c1SJoonsoo Kim 3506a10aa579SChristoph Lameter #endif 3507