1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds 41da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 51da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 61da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 7930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005 8d758ffe6SUladzislau Rezki (Sony) * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 11db64fe02SNick Piggin #include <linux/vmalloc.h> 121da177e4SLinus Torvalds #include <linux/mm.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/highmem.h> 15c3edc401SIngo Molnar #include <linux/sched/signal.h> 161da177e4SLinus Torvalds #include <linux/slab.h> 171da177e4SLinus Torvalds #include <linux/spinlock.h> 181da177e4SLinus Torvalds #include <linux/interrupt.h> 195f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h> 20a10aa579SChristoph Lameter #include <linux/seq_file.h> 21868b104dSRick Edgecombe #include <linux/set_memory.h> 223ac7fe5aSThomas Gleixner #include <linux/debugobjects.h> 2323016969SChristoph Lameter #include <linux/kallsyms.h> 24db64fe02SNick Piggin #include <linux/list.h> 254da56b99SChris Wilson #include <linux/notifier.h> 26db64fe02SNick Piggin #include <linux/rbtree.h> 270f14599cSMatthew Wilcox (Oracle) #include <linux/xarray.h> 285da96bddSMel Gorman #include <linux/io.h> 29db64fe02SNick Piggin #include <linux/rcupdate.h> 30f0aa6617STejun Heo #include <linux/pfn.h> 3189219d37SCatalin Marinas #include <linux/kmemleak.h> 3260063497SArun Sharma #include <linux/atomic.h> 333b32123dSGideon Israel Dsouza #include <linux/compiler.h> 344e5aa1f4SShakeel Butt #include <linux/memcontrol.h> 3532fcfd40SAl Viro #include <linux/llist.h> 364c91c07cSLorenzo Stoakes #include <linux/uio.h> 370f616be1SToshi Kani #include <linux/bitops.h> 3868ad4a33SUladzislau Rezki (Sony) #include <linux/rbtree_augmented.h> 39bdebd6a2SJann Horn #include <linux/overflow.h> 40c0eb315aSNicholas Piggin #include <linux/pgtable.h> 41f7ee1f13SChristophe Leroy #include <linux/hugetlb.h> 42451769ebSMichal Hocko #include <linux/sched/mm.h> 431da177e4SLinus Torvalds #include <asm/tlbflush.h> 442dca6999SDavid Miller #include <asm/shmparam.h> 451da177e4SLinus Torvalds 46cf243da6SUladzislau Rezki (Sony) #define CREATE_TRACE_POINTS 47cf243da6SUladzislau Rezki (Sony) #include <trace/events/vmalloc.h> 48cf243da6SUladzislau Rezki (Sony) 49dd56b046SMel Gorman #include "internal.h" 502a681cfaSJoerg Roedel #include "pgalloc-track.h" 51dd56b046SMel Gorman 5282a70ce0SChristoph Hellwig #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 5382a70ce0SChristoph Hellwig static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1; 5482a70ce0SChristoph Hellwig 5582a70ce0SChristoph Hellwig static int __init set_nohugeiomap(char *str) 5682a70ce0SChristoph Hellwig { 5782a70ce0SChristoph Hellwig ioremap_max_page_shift = PAGE_SHIFT; 5882a70ce0SChristoph Hellwig return 0; 5982a70ce0SChristoph Hellwig } 6082a70ce0SChristoph Hellwig early_param("nohugeiomap", set_nohugeiomap); 6182a70ce0SChristoph Hellwig #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 6282a70ce0SChristoph Hellwig static const unsigned int ioremap_max_page_shift = PAGE_SHIFT; 6382a70ce0SChristoph Hellwig #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 6482a70ce0SChristoph Hellwig 65121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 66121e6f32SNicholas Piggin static bool __ro_after_init vmap_allow_huge = true; 67121e6f32SNicholas Piggin 68121e6f32SNicholas Piggin static int __init set_nohugevmalloc(char *str) 69121e6f32SNicholas Piggin { 70121e6f32SNicholas Piggin vmap_allow_huge = false; 71121e6f32SNicholas Piggin return 0; 72121e6f32SNicholas Piggin } 73121e6f32SNicholas Piggin early_param("nohugevmalloc", set_nohugevmalloc); 74121e6f32SNicholas Piggin #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 75121e6f32SNicholas Piggin static const bool vmap_allow_huge = false; 76121e6f32SNicholas Piggin #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 77121e6f32SNicholas Piggin 78186525bdSIngo Molnar bool is_vmalloc_addr(const void *x) 79186525bdSIngo Molnar { 804aff1dc4SAndrey Konovalov unsigned long addr = (unsigned long)kasan_reset_tag(x); 81186525bdSIngo Molnar 82186525bdSIngo Molnar return addr >= VMALLOC_START && addr < VMALLOC_END; 83186525bdSIngo Molnar } 84186525bdSIngo Molnar EXPORT_SYMBOL(is_vmalloc_addr); 85186525bdSIngo Molnar 8632fcfd40SAl Viro struct vfree_deferred { 8732fcfd40SAl Viro struct llist_head list; 8832fcfd40SAl Viro struct work_struct wq; 8932fcfd40SAl Viro }; 9032fcfd40SAl Viro static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 9132fcfd40SAl Viro 92db64fe02SNick Piggin /*** Page table manipulation functions ***/ 935e9e3d77SNicholas Piggin static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 945e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 95f7ee1f13SChristophe Leroy unsigned int max_page_shift, pgtbl_mod_mask *mask) 965e9e3d77SNicholas Piggin { 975e9e3d77SNicholas Piggin pte_t *pte; 985e9e3d77SNicholas Piggin u64 pfn; 99f7ee1f13SChristophe Leroy unsigned long size = PAGE_SIZE; 1005e9e3d77SNicholas Piggin 1015e9e3d77SNicholas Piggin pfn = phys_addr >> PAGE_SHIFT; 1025e9e3d77SNicholas Piggin pte = pte_alloc_kernel_track(pmd, addr, mask); 1035e9e3d77SNicholas Piggin if (!pte) 1045e9e3d77SNicholas Piggin return -ENOMEM; 1055e9e3d77SNicholas Piggin do { 106c33c7948SRyan Roberts BUG_ON(!pte_none(ptep_get(pte))); 107f7ee1f13SChristophe Leroy 108f7ee1f13SChristophe Leroy #ifdef CONFIG_HUGETLB_PAGE 109f7ee1f13SChristophe Leroy size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); 110f7ee1f13SChristophe Leroy if (size != PAGE_SIZE) { 111f7ee1f13SChristophe Leroy pte_t entry = pfn_pte(pfn, prot); 112f7ee1f13SChristophe Leroy 113f7ee1f13SChristophe Leroy entry = arch_make_huge_pte(entry, ilog2(size), 0); 114935d4f0cSRyan Roberts set_huge_pte_at(&init_mm, addr, pte, entry, size); 115f7ee1f13SChristophe Leroy pfn += PFN_DOWN(size); 116f7ee1f13SChristophe Leroy continue; 117f7ee1f13SChristophe Leroy } 118f7ee1f13SChristophe Leroy #endif 1195e9e3d77SNicholas Piggin set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); 1205e9e3d77SNicholas Piggin pfn++; 121f7ee1f13SChristophe Leroy } while (pte += PFN_DOWN(size), addr += size, addr != end); 1225e9e3d77SNicholas Piggin *mask |= PGTBL_PTE_MODIFIED; 1235e9e3d77SNicholas Piggin return 0; 1245e9e3d77SNicholas Piggin } 1255e9e3d77SNicholas Piggin 1265e9e3d77SNicholas Piggin static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, 1275e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 1285e9e3d77SNicholas Piggin unsigned int max_page_shift) 1295e9e3d77SNicholas Piggin { 1305e9e3d77SNicholas Piggin if (max_page_shift < PMD_SHIFT) 1315e9e3d77SNicholas Piggin return 0; 1325e9e3d77SNicholas Piggin 1335e9e3d77SNicholas Piggin if (!arch_vmap_pmd_supported(prot)) 1345e9e3d77SNicholas Piggin return 0; 1355e9e3d77SNicholas Piggin 1365e9e3d77SNicholas Piggin if ((end - addr) != PMD_SIZE) 1375e9e3d77SNicholas Piggin return 0; 1385e9e3d77SNicholas Piggin 1395e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, PMD_SIZE)) 1405e9e3d77SNicholas Piggin return 0; 1415e9e3d77SNicholas Piggin 1425e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, PMD_SIZE)) 1435e9e3d77SNicholas Piggin return 0; 1445e9e3d77SNicholas Piggin 1455e9e3d77SNicholas Piggin if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) 1465e9e3d77SNicholas Piggin return 0; 1475e9e3d77SNicholas Piggin 1485e9e3d77SNicholas Piggin return pmd_set_huge(pmd, phys_addr, prot); 1495e9e3d77SNicholas Piggin } 1505e9e3d77SNicholas Piggin 1515e9e3d77SNicholas Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 1525e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 1535e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask) 1545e9e3d77SNicholas Piggin { 1555e9e3d77SNicholas Piggin pmd_t *pmd; 1565e9e3d77SNicholas Piggin unsigned long next; 1575e9e3d77SNicholas Piggin 1585e9e3d77SNicholas Piggin pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 1595e9e3d77SNicholas Piggin if (!pmd) 1605e9e3d77SNicholas Piggin return -ENOMEM; 1615e9e3d77SNicholas Piggin do { 1625e9e3d77SNicholas Piggin next = pmd_addr_end(addr, end); 1635e9e3d77SNicholas Piggin 1645e9e3d77SNicholas Piggin if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, 1655e9e3d77SNicholas Piggin max_page_shift)) { 1665e9e3d77SNicholas Piggin *mask |= PGTBL_PMD_MODIFIED; 1675e9e3d77SNicholas Piggin continue; 1685e9e3d77SNicholas Piggin } 1695e9e3d77SNicholas Piggin 170f7ee1f13SChristophe Leroy if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask)) 1715e9e3d77SNicholas Piggin return -ENOMEM; 1725e9e3d77SNicholas Piggin } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); 1735e9e3d77SNicholas Piggin return 0; 1745e9e3d77SNicholas Piggin } 1755e9e3d77SNicholas Piggin 1765e9e3d77SNicholas Piggin static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, 1775e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 1785e9e3d77SNicholas Piggin unsigned int max_page_shift) 1795e9e3d77SNicholas Piggin { 1805e9e3d77SNicholas Piggin if (max_page_shift < PUD_SHIFT) 1815e9e3d77SNicholas Piggin return 0; 1825e9e3d77SNicholas Piggin 1835e9e3d77SNicholas Piggin if (!arch_vmap_pud_supported(prot)) 1845e9e3d77SNicholas Piggin return 0; 1855e9e3d77SNicholas Piggin 1865e9e3d77SNicholas Piggin if ((end - addr) != PUD_SIZE) 1875e9e3d77SNicholas Piggin return 0; 1885e9e3d77SNicholas Piggin 1895e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, PUD_SIZE)) 1905e9e3d77SNicholas Piggin return 0; 1915e9e3d77SNicholas Piggin 1925e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, PUD_SIZE)) 1935e9e3d77SNicholas Piggin return 0; 1945e9e3d77SNicholas Piggin 1955e9e3d77SNicholas Piggin if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) 1965e9e3d77SNicholas Piggin return 0; 1975e9e3d77SNicholas Piggin 1985e9e3d77SNicholas Piggin return pud_set_huge(pud, phys_addr, prot); 1995e9e3d77SNicholas Piggin } 2005e9e3d77SNicholas Piggin 2015e9e3d77SNicholas Piggin static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 2025e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 2035e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask) 2045e9e3d77SNicholas Piggin { 2055e9e3d77SNicholas Piggin pud_t *pud; 2065e9e3d77SNicholas Piggin unsigned long next; 2075e9e3d77SNicholas Piggin 2085e9e3d77SNicholas Piggin pud = pud_alloc_track(&init_mm, p4d, addr, mask); 2095e9e3d77SNicholas Piggin if (!pud) 2105e9e3d77SNicholas Piggin return -ENOMEM; 2115e9e3d77SNicholas Piggin do { 2125e9e3d77SNicholas Piggin next = pud_addr_end(addr, end); 2135e9e3d77SNicholas Piggin 2145e9e3d77SNicholas Piggin if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, 2155e9e3d77SNicholas Piggin max_page_shift)) { 2165e9e3d77SNicholas Piggin *mask |= PGTBL_PUD_MODIFIED; 2175e9e3d77SNicholas Piggin continue; 2185e9e3d77SNicholas Piggin } 2195e9e3d77SNicholas Piggin 2205e9e3d77SNicholas Piggin if (vmap_pmd_range(pud, addr, next, phys_addr, prot, 2215e9e3d77SNicholas Piggin max_page_shift, mask)) 2225e9e3d77SNicholas Piggin return -ENOMEM; 2235e9e3d77SNicholas Piggin } while (pud++, phys_addr += (next - addr), addr = next, addr != end); 2245e9e3d77SNicholas Piggin return 0; 2255e9e3d77SNicholas Piggin } 2265e9e3d77SNicholas Piggin 2275e9e3d77SNicholas Piggin static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, 2285e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 2295e9e3d77SNicholas Piggin unsigned int max_page_shift) 2305e9e3d77SNicholas Piggin { 2315e9e3d77SNicholas Piggin if (max_page_shift < P4D_SHIFT) 2325e9e3d77SNicholas Piggin return 0; 2335e9e3d77SNicholas Piggin 2345e9e3d77SNicholas Piggin if (!arch_vmap_p4d_supported(prot)) 2355e9e3d77SNicholas Piggin return 0; 2365e9e3d77SNicholas Piggin 2375e9e3d77SNicholas Piggin if ((end - addr) != P4D_SIZE) 2385e9e3d77SNicholas Piggin return 0; 2395e9e3d77SNicholas Piggin 2405e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, P4D_SIZE)) 2415e9e3d77SNicholas Piggin return 0; 2425e9e3d77SNicholas Piggin 2435e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, P4D_SIZE)) 2445e9e3d77SNicholas Piggin return 0; 2455e9e3d77SNicholas Piggin 2465e9e3d77SNicholas Piggin if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) 2475e9e3d77SNicholas Piggin return 0; 2485e9e3d77SNicholas Piggin 2495e9e3d77SNicholas Piggin return p4d_set_huge(p4d, phys_addr, prot); 2505e9e3d77SNicholas Piggin } 2515e9e3d77SNicholas Piggin 2525e9e3d77SNicholas Piggin static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 2535e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 2545e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask) 2555e9e3d77SNicholas Piggin { 2565e9e3d77SNicholas Piggin p4d_t *p4d; 2575e9e3d77SNicholas Piggin unsigned long next; 2585e9e3d77SNicholas Piggin 2595e9e3d77SNicholas Piggin p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 2605e9e3d77SNicholas Piggin if (!p4d) 2615e9e3d77SNicholas Piggin return -ENOMEM; 2625e9e3d77SNicholas Piggin do { 2635e9e3d77SNicholas Piggin next = p4d_addr_end(addr, end); 2645e9e3d77SNicholas Piggin 2655e9e3d77SNicholas Piggin if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, 2665e9e3d77SNicholas Piggin max_page_shift)) { 2675e9e3d77SNicholas Piggin *mask |= PGTBL_P4D_MODIFIED; 2685e9e3d77SNicholas Piggin continue; 2695e9e3d77SNicholas Piggin } 2705e9e3d77SNicholas Piggin 2715e9e3d77SNicholas Piggin if (vmap_pud_range(p4d, addr, next, phys_addr, prot, 2725e9e3d77SNicholas Piggin max_page_shift, mask)) 2735e9e3d77SNicholas Piggin return -ENOMEM; 2745e9e3d77SNicholas Piggin } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); 2755e9e3d77SNicholas Piggin return 0; 2765e9e3d77SNicholas Piggin } 2775e9e3d77SNicholas Piggin 2785d87510dSNicholas Piggin static int vmap_range_noflush(unsigned long addr, unsigned long end, 2795e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 2805e9e3d77SNicholas Piggin unsigned int max_page_shift) 2815e9e3d77SNicholas Piggin { 2825e9e3d77SNicholas Piggin pgd_t *pgd; 2835e9e3d77SNicholas Piggin unsigned long start; 2845e9e3d77SNicholas Piggin unsigned long next; 2855e9e3d77SNicholas Piggin int err; 2865e9e3d77SNicholas Piggin pgtbl_mod_mask mask = 0; 2875e9e3d77SNicholas Piggin 2885e9e3d77SNicholas Piggin might_sleep(); 2895e9e3d77SNicholas Piggin BUG_ON(addr >= end); 2905e9e3d77SNicholas Piggin 2915e9e3d77SNicholas Piggin start = addr; 2925e9e3d77SNicholas Piggin pgd = pgd_offset_k(addr); 2935e9e3d77SNicholas Piggin do { 2945e9e3d77SNicholas Piggin next = pgd_addr_end(addr, end); 2955e9e3d77SNicholas Piggin err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, 2965e9e3d77SNicholas Piggin max_page_shift, &mask); 2975e9e3d77SNicholas Piggin if (err) 2985e9e3d77SNicholas Piggin break; 2995e9e3d77SNicholas Piggin } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); 3005e9e3d77SNicholas Piggin 3015e9e3d77SNicholas Piggin if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 3025e9e3d77SNicholas Piggin arch_sync_kernel_mappings(start, end); 3035e9e3d77SNicholas Piggin 3045e9e3d77SNicholas Piggin return err; 3055e9e3d77SNicholas Piggin } 306b221385bSAdrian Bunk 307d7bca919SAlexei Starovoitov int vmap_page_range(unsigned long addr, unsigned long end, 308d7bca919SAlexei Starovoitov phys_addr_t phys_addr, pgprot_t prot) 309d7bca919SAlexei Starovoitov { 310d7bca919SAlexei Starovoitov int err; 311d7bca919SAlexei Starovoitov 312d7bca919SAlexei Starovoitov err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), 313d7bca919SAlexei Starovoitov ioremap_max_page_shift); 314d7bca919SAlexei Starovoitov flush_cache_vmap(addr, end); 315d7bca919SAlexei Starovoitov if (!err) 316d7bca919SAlexei Starovoitov err = kmsan_ioremap_page_range(addr, end, phys_addr, prot, 317d7bca919SAlexei Starovoitov ioremap_max_page_shift); 318d7bca919SAlexei Starovoitov return err; 319d7bca919SAlexei Starovoitov } 320d7bca919SAlexei Starovoitov 32182a70ce0SChristoph Hellwig int ioremap_page_range(unsigned long addr, unsigned long end, 32282a70ce0SChristoph Hellwig phys_addr_t phys_addr, pgprot_t prot) 3235d87510dSNicholas Piggin { 3243e49a866SAlexei Starovoitov struct vm_struct *area; 3255d87510dSNicholas Piggin 3263e49a866SAlexei Starovoitov area = find_vm_area((void *)addr); 3273e49a866SAlexei Starovoitov if (!area || !(area->flags & VM_IOREMAP)) { 3283e49a866SAlexei Starovoitov WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr); 3293e49a866SAlexei Starovoitov return -EINVAL; 3303e49a866SAlexei Starovoitov } 3313e49a866SAlexei Starovoitov if (addr != (unsigned long)area->addr || 3323e49a866SAlexei Starovoitov (void *)end != area->addr + get_vm_area_size(area)) { 3333e49a866SAlexei Starovoitov WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n", 3343e49a866SAlexei Starovoitov addr, end, (long)area->addr, 3353e49a866SAlexei Starovoitov (long)area->addr + get_vm_area_size(area)); 3363e49a866SAlexei Starovoitov return -ERANGE; 3373e49a866SAlexei Starovoitov } 338d7bca919SAlexei Starovoitov return vmap_page_range(addr, end, phys_addr, prot); 3395d87510dSNicholas Piggin } 3405d87510dSNicholas Piggin 3412ba3e694SJoerg Roedel static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 3422ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 3431da177e4SLinus Torvalds { 3441da177e4SLinus Torvalds pte_t *pte; 3451da177e4SLinus Torvalds 3461da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr); 3471da177e4SLinus Torvalds do { 3481da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 3491da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 3501da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 3512ba3e694SJoerg Roedel *mask |= PGTBL_PTE_MODIFIED; 3521da177e4SLinus Torvalds } 3531da177e4SLinus Torvalds 3542ba3e694SJoerg Roedel static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 3552ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 3561da177e4SLinus Torvalds { 3571da177e4SLinus Torvalds pmd_t *pmd; 3581da177e4SLinus Torvalds unsigned long next; 3592ba3e694SJoerg Roedel int cleared; 3601da177e4SLinus Torvalds 3611da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 3621da177e4SLinus Torvalds do { 3631da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 3642ba3e694SJoerg Roedel 3652ba3e694SJoerg Roedel cleared = pmd_clear_huge(pmd); 3662ba3e694SJoerg Roedel if (cleared || pmd_bad(*pmd)) 3672ba3e694SJoerg Roedel *mask |= PGTBL_PMD_MODIFIED; 3682ba3e694SJoerg Roedel 3692ba3e694SJoerg Roedel if (cleared) 370b9820d8fSToshi Kani continue; 3711da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 3721da177e4SLinus Torvalds continue; 3732ba3e694SJoerg Roedel vunmap_pte_range(pmd, addr, next, mask); 374e47110e9SAneesh Kumar K.V 375e47110e9SAneesh Kumar K.V cond_resched(); 3761da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 3771da177e4SLinus Torvalds } 3781da177e4SLinus Torvalds 3792ba3e694SJoerg Roedel static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 3802ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 3811da177e4SLinus Torvalds { 3821da177e4SLinus Torvalds pud_t *pud; 3831da177e4SLinus Torvalds unsigned long next; 3842ba3e694SJoerg Roedel int cleared; 3851da177e4SLinus Torvalds 386c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 3871da177e4SLinus Torvalds do { 3881da177e4SLinus Torvalds next = pud_addr_end(addr, end); 3892ba3e694SJoerg Roedel 3902ba3e694SJoerg Roedel cleared = pud_clear_huge(pud); 3912ba3e694SJoerg Roedel if (cleared || pud_bad(*pud)) 3922ba3e694SJoerg Roedel *mask |= PGTBL_PUD_MODIFIED; 3932ba3e694SJoerg Roedel 3942ba3e694SJoerg Roedel if (cleared) 395b9820d8fSToshi Kani continue; 3961da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 3971da177e4SLinus Torvalds continue; 3982ba3e694SJoerg Roedel vunmap_pmd_range(pud, addr, next, mask); 3991da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 4001da177e4SLinus Torvalds } 4011da177e4SLinus Torvalds 4022ba3e694SJoerg Roedel static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 4032ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 404c2febafcSKirill A. Shutemov { 405c2febafcSKirill A. Shutemov p4d_t *p4d; 406c2febafcSKirill A. Shutemov unsigned long next; 407c2febafcSKirill A. Shutemov 408c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 409c2febafcSKirill A. Shutemov do { 410c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 4112ba3e694SJoerg Roedel 412c8db8c26SLi kunyu p4d_clear_huge(p4d); 413c8db8c26SLi kunyu if (p4d_bad(*p4d)) 4142ba3e694SJoerg Roedel *mask |= PGTBL_P4D_MODIFIED; 4152ba3e694SJoerg Roedel 416c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 417c2febafcSKirill A. Shutemov continue; 4182ba3e694SJoerg Roedel vunmap_pud_range(p4d, addr, next, mask); 419c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 420c2febafcSKirill A. Shutemov } 421c2febafcSKirill A. Shutemov 4224ad0ae8cSNicholas Piggin /* 4234ad0ae8cSNicholas Piggin * vunmap_range_noflush is similar to vunmap_range, but does not 4244ad0ae8cSNicholas Piggin * flush caches or TLBs. 425b521c43fSChristoph Hellwig * 4264ad0ae8cSNicholas Piggin * The caller is responsible for calling flush_cache_vmap() before calling 4274ad0ae8cSNicholas Piggin * this function, and flush_tlb_kernel_range after it has returned 4284ad0ae8cSNicholas Piggin * successfully (and before the addresses are expected to cause a page fault 4294ad0ae8cSNicholas Piggin * or be re-mapped for something else, if TLB flushes are being delayed or 4304ad0ae8cSNicholas Piggin * coalesced). 431b521c43fSChristoph Hellwig * 4324ad0ae8cSNicholas Piggin * This is an internal function only. Do not use outside mm/. 433b521c43fSChristoph Hellwig */ 434b073d7f8SAlexander Potapenko void __vunmap_range_noflush(unsigned long start, unsigned long end) 4351da177e4SLinus Torvalds { 4361da177e4SLinus Torvalds unsigned long next; 437b521c43fSChristoph Hellwig pgd_t *pgd; 4382ba3e694SJoerg Roedel unsigned long addr = start; 4392ba3e694SJoerg Roedel pgtbl_mod_mask mask = 0; 4401da177e4SLinus Torvalds 4411da177e4SLinus Torvalds BUG_ON(addr >= end); 4421da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 4431da177e4SLinus Torvalds do { 4441da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 4452ba3e694SJoerg Roedel if (pgd_bad(*pgd)) 4462ba3e694SJoerg Roedel mask |= PGTBL_PGD_MODIFIED; 4471da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 4481da177e4SLinus Torvalds continue; 4492ba3e694SJoerg Roedel vunmap_p4d_range(pgd, addr, next, &mask); 4501da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 4512ba3e694SJoerg Roedel 4522ba3e694SJoerg Roedel if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 4532ba3e694SJoerg Roedel arch_sync_kernel_mappings(start, end); 4541da177e4SLinus Torvalds } 4551da177e4SLinus Torvalds 456b073d7f8SAlexander Potapenko void vunmap_range_noflush(unsigned long start, unsigned long end) 457b073d7f8SAlexander Potapenko { 458b073d7f8SAlexander Potapenko kmsan_vunmap_range_noflush(start, end); 459b073d7f8SAlexander Potapenko __vunmap_range_noflush(start, end); 460b073d7f8SAlexander Potapenko } 461b073d7f8SAlexander Potapenko 4624ad0ae8cSNicholas Piggin /** 4634ad0ae8cSNicholas Piggin * vunmap_range - unmap kernel virtual addresses 4644ad0ae8cSNicholas Piggin * @addr: start of the VM area to unmap 4654ad0ae8cSNicholas Piggin * @end: end of the VM area to unmap (non-inclusive) 4664ad0ae8cSNicholas Piggin * 4674ad0ae8cSNicholas Piggin * Clears any present PTEs in the virtual address range, flushes TLBs and 4684ad0ae8cSNicholas Piggin * caches. Any subsequent access to the address before it has been re-mapped 4694ad0ae8cSNicholas Piggin * is a kernel bug. 4704ad0ae8cSNicholas Piggin */ 4714ad0ae8cSNicholas Piggin void vunmap_range(unsigned long addr, unsigned long end) 4724ad0ae8cSNicholas Piggin { 4734ad0ae8cSNicholas Piggin flush_cache_vunmap(addr, end); 4744ad0ae8cSNicholas Piggin vunmap_range_noflush(addr, end); 4754ad0ae8cSNicholas Piggin flush_tlb_kernel_range(addr, end); 4764ad0ae8cSNicholas Piggin } 4774ad0ae8cSNicholas Piggin 4780a264884SNicholas Piggin static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, 4792ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 4802ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 4811da177e4SLinus Torvalds { 4821da177e4SLinus Torvalds pte_t *pte; 4831da177e4SLinus Torvalds 484db64fe02SNick Piggin /* 485db64fe02SNick Piggin * nr is a running index into the array which helps higher level 486db64fe02SNick Piggin * callers keep track of where we're up to. 487db64fe02SNick Piggin */ 488db64fe02SNick Piggin 4892ba3e694SJoerg Roedel pte = pte_alloc_kernel_track(pmd, addr, mask); 4901da177e4SLinus Torvalds if (!pte) 4911da177e4SLinus Torvalds return -ENOMEM; 4921da177e4SLinus Torvalds do { 493db64fe02SNick Piggin struct page *page = pages[*nr]; 494db64fe02SNick Piggin 495c33c7948SRyan Roberts if (WARN_ON(!pte_none(ptep_get(pte)))) 496db64fe02SNick Piggin return -EBUSY; 497db64fe02SNick Piggin if (WARN_ON(!page)) 4981da177e4SLinus Torvalds return -ENOMEM; 4994fcdcc12SYury Norov if (WARN_ON(!pfn_valid(page_to_pfn(page)))) 5004fcdcc12SYury Norov return -EINVAL; 5014fcdcc12SYury Norov 5021da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 503db64fe02SNick Piggin (*nr)++; 5041da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 5052ba3e694SJoerg Roedel *mask |= PGTBL_PTE_MODIFIED; 5061da177e4SLinus Torvalds return 0; 5071da177e4SLinus Torvalds } 5081da177e4SLinus Torvalds 5090a264884SNicholas Piggin static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, 5102ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 5112ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 5121da177e4SLinus Torvalds { 5131da177e4SLinus Torvalds pmd_t *pmd; 5141da177e4SLinus Torvalds unsigned long next; 5151da177e4SLinus Torvalds 5162ba3e694SJoerg Roedel pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 5171da177e4SLinus Torvalds if (!pmd) 5181da177e4SLinus Torvalds return -ENOMEM; 5191da177e4SLinus Torvalds do { 5201da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 5210a264884SNicholas Piggin if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask)) 5221da177e4SLinus Torvalds return -ENOMEM; 5231da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 5241da177e4SLinus Torvalds return 0; 5251da177e4SLinus Torvalds } 5261da177e4SLinus Torvalds 5270a264884SNicholas Piggin static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr, 5282ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 5292ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 5301da177e4SLinus Torvalds { 5311da177e4SLinus Torvalds pud_t *pud; 5321da177e4SLinus Torvalds unsigned long next; 5331da177e4SLinus Torvalds 5342ba3e694SJoerg Roedel pud = pud_alloc_track(&init_mm, p4d, addr, mask); 5351da177e4SLinus Torvalds if (!pud) 5361da177e4SLinus Torvalds return -ENOMEM; 5371da177e4SLinus Torvalds do { 5381da177e4SLinus Torvalds next = pud_addr_end(addr, end); 5390a264884SNicholas Piggin if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask)) 5401da177e4SLinus Torvalds return -ENOMEM; 5411da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 5421da177e4SLinus Torvalds return 0; 5431da177e4SLinus Torvalds } 5441da177e4SLinus Torvalds 5450a264884SNicholas Piggin static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, 5462ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 5472ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 548c2febafcSKirill A. Shutemov { 549c2febafcSKirill A. Shutemov p4d_t *p4d; 550c2febafcSKirill A. Shutemov unsigned long next; 551c2febafcSKirill A. Shutemov 5522ba3e694SJoerg Roedel p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 553c2febafcSKirill A. Shutemov if (!p4d) 554c2febafcSKirill A. Shutemov return -ENOMEM; 555c2febafcSKirill A. Shutemov do { 556c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 5570a264884SNicholas Piggin if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask)) 558c2febafcSKirill A. Shutemov return -ENOMEM; 559c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 560c2febafcSKirill A. Shutemov return 0; 561c2febafcSKirill A. Shutemov } 562c2febafcSKirill A. Shutemov 563121e6f32SNicholas Piggin static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, 564121e6f32SNicholas Piggin pgprot_t prot, struct page **pages) 565121e6f32SNicholas Piggin { 566121e6f32SNicholas Piggin unsigned long start = addr; 567121e6f32SNicholas Piggin pgd_t *pgd; 568121e6f32SNicholas Piggin unsigned long next; 569121e6f32SNicholas Piggin int err = 0; 570121e6f32SNicholas Piggin int nr = 0; 571121e6f32SNicholas Piggin pgtbl_mod_mask mask = 0; 572121e6f32SNicholas Piggin 573121e6f32SNicholas Piggin BUG_ON(addr >= end); 574121e6f32SNicholas Piggin pgd = pgd_offset_k(addr); 575121e6f32SNicholas Piggin do { 576121e6f32SNicholas Piggin next = pgd_addr_end(addr, end); 577121e6f32SNicholas Piggin if (pgd_bad(*pgd)) 578121e6f32SNicholas Piggin mask |= PGTBL_PGD_MODIFIED; 579121e6f32SNicholas Piggin err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); 580121e6f32SNicholas Piggin if (err) 581121e6f32SNicholas Piggin return err; 582121e6f32SNicholas Piggin } while (pgd++, addr = next, addr != end); 583121e6f32SNicholas Piggin 584121e6f32SNicholas Piggin if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 585121e6f32SNicholas Piggin arch_sync_kernel_mappings(start, end); 586121e6f32SNicholas Piggin 587121e6f32SNicholas Piggin return 0; 588121e6f32SNicholas Piggin } 589121e6f32SNicholas Piggin 590b67177ecSNicholas Piggin /* 591b67177ecSNicholas Piggin * vmap_pages_range_noflush is similar to vmap_pages_range, but does not 592b67177ecSNicholas Piggin * flush caches. 593b67177ecSNicholas Piggin * 594b67177ecSNicholas Piggin * The caller is responsible for calling flush_cache_vmap() after this 595b67177ecSNicholas Piggin * function returns successfully and before the addresses are accessed. 596b67177ecSNicholas Piggin * 597b67177ecSNicholas Piggin * This is an internal function only. Do not use outside mm/. 598b67177ecSNicholas Piggin */ 599b073d7f8SAlexander Potapenko int __vmap_pages_range_noflush(unsigned long addr, unsigned long end, 600121e6f32SNicholas Piggin pgprot_t prot, struct page **pages, unsigned int page_shift) 601121e6f32SNicholas Piggin { 602121e6f32SNicholas Piggin unsigned int i, nr = (end - addr) >> PAGE_SHIFT; 603121e6f32SNicholas Piggin 604121e6f32SNicholas Piggin WARN_ON(page_shift < PAGE_SHIFT); 605121e6f32SNicholas Piggin 606121e6f32SNicholas Piggin if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || 607121e6f32SNicholas Piggin page_shift == PAGE_SHIFT) 608121e6f32SNicholas Piggin return vmap_small_pages_range_noflush(addr, end, prot, pages); 609121e6f32SNicholas Piggin 610121e6f32SNicholas Piggin for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) { 611121e6f32SNicholas Piggin int err; 612121e6f32SNicholas Piggin 613121e6f32SNicholas Piggin err = vmap_range_noflush(addr, addr + (1UL << page_shift), 61408262ac5SMatthew Wilcox page_to_phys(pages[i]), prot, 615121e6f32SNicholas Piggin page_shift); 616121e6f32SNicholas Piggin if (err) 617121e6f32SNicholas Piggin return err; 618121e6f32SNicholas Piggin 619121e6f32SNicholas Piggin addr += 1UL << page_shift; 620121e6f32SNicholas Piggin } 621121e6f32SNicholas Piggin 622121e6f32SNicholas Piggin return 0; 623121e6f32SNicholas Piggin } 624121e6f32SNicholas Piggin 625b073d7f8SAlexander Potapenko int vmap_pages_range_noflush(unsigned long addr, unsigned long end, 626b073d7f8SAlexander Potapenko pgprot_t prot, struct page **pages, unsigned int page_shift) 627b073d7f8SAlexander Potapenko { 62847ebd031SAlexander Potapenko int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages, 62947ebd031SAlexander Potapenko page_shift); 63047ebd031SAlexander Potapenko 63147ebd031SAlexander Potapenko if (ret) 63247ebd031SAlexander Potapenko return ret; 633b073d7f8SAlexander Potapenko return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 634b073d7f8SAlexander Potapenko } 635b073d7f8SAlexander Potapenko 636b67177ecSNicholas Piggin /** 637b67177ecSNicholas Piggin * vmap_pages_range - map pages to a kernel virtual address 638b67177ecSNicholas Piggin * @addr: start of the VM area to map 639b67177ecSNicholas Piggin * @end: end of the VM area to map (non-inclusive) 640b67177ecSNicholas Piggin * @prot: page protection flags to use 641b67177ecSNicholas Piggin * @pages: pages to map (always PAGE_SIZE pages) 642b67177ecSNicholas Piggin * @page_shift: maximum shift that the pages may be mapped with, @pages must 643b67177ecSNicholas Piggin * be aligned and contiguous up to at least this shift. 644b67177ecSNicholas Piggin * 645b67177ecSNicholas Piggin * RETURNS: 646b67177ecSNicholas Piggin * 0 on success, -errno on failure. 647b67177ecSNicholas Piggin */ 648121e6f32SNicholas Piggin static int vmap_pages_range(unsigned long addr, unsigned long end, 649121e6f32SNicholas Piggin pgprot_t prot, struct page **pages, unsigned int page_shift) 650121e6f32SNicholas Piggin { 651121e6f32SNicholas Piggin int err; 652121e6f32SNicholas Piggin 653121e6f32SNicholas Piggin err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 654121e6f32SNicholas Piggin flush_cache_vmap(addr, end); 655121e6f32SNicholas Piggin return err; 656121e6f32SNicholas Piggin } 657121e6f32SNicholas Piggin 658e6f79822SAlexei Starovoitov static int check_sparse_vm_area(struct vm_struct *area, unsigned long start, 659e6f79822SAlexei Starovoitov unsigned long end) 660e6f79822SAlexei Starovoitov { 661e6f79822SAlexei Starovoitov might_sleep(); 662e6f79822SAlexei Starovoitov if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS)) 663e6f79822SAlexei Starovoitov return -EINVAL; 664e6f79822SAlexei Starovoitov if (WARN_ON_ONCE(area->flags & VM_NO_GUARD)) 665e6f79822SAlexei Starovoitov return -EINVAL; 666e6f79822SAlexei Starovoitov if (WARN_ON_ONCE(!(area->flags & VM_SPARSE))) 667e6f79822SAlexei Starovoitov return -EINVAL; 668e6f79822SAlexei Starovoitov if ((end - start) >> PAGE_SHIFT > totalram_pages()) 669e6f79822SAlexei Starovoitov return -E2BIG; 670e6f79822SAlexei Starovoitov if (start < (unsigned long)area->addr || 671e6f79822SAlexei Starovoitov (void *)end > area->addr + get_vm_area_size(area)) 672e6f79822SAlexei Starovoitov return -ERANGE; 673e6f79822SAlexei Starovoitov return 0; 674e6f79822SAlexei Starovoitov } 675e6f79822SAlexei Starovoitov 676e6f79822SAlexei Starovoitov /** 677e6f79822SAlexei Starovoitov * vm_area_map_pages - map pages inside given sparse vm_area 678e6f79822SAlexei Starovoitov * @area: vm_area 679e6f79822SAlexei Starovoitov * @start: start address inside vm_area 680e6f79822SAlexei Starovoitov * @end: end address inside vm_area 681e6f79822SAlexei Starovoitov * @pages: pages to map (always PAGE_SIZE pages) 682e6f79822SAlexei Starovoitov */ 683e6f79822SAlexei Starovoitov int vm_area_map_pages(struct vm_struct *area, unsigned long start, 684e6f79822SAlexei Starovoitov unsigned long end, struct page **pages) 685e6f79822SAlexei Starovoitov { 686e6f79822SAlexei Starovoitov int err; 687e6f79822SAlexei Starovoitov 688e6f79822SAlexei Starovoitov err = check_sparse_vm_area(area, start, end); 689e6f79822SAlexei Starovoitov if (err) 690e6f79822SAlexei Starovoitov return err; 691e6f79822SAlexei Starovoitov 692e6f79822SAlexei Starovoitov return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT); 693e6f79822SAlexei Starovoitov } 694e6f79822SAlexei Starovoitov 695e6f79822SAlexei Starovoitov /** 696e6f79822SAlexei Starovoitov * vm_area_unmap_pages - unmap pages inside given sparse vm_area 697e6f79822SAlexei Starovoitov * @area: vm_area 698e6f79822SAlexei Starovoitov * @start: start address inside vm_area 699e6f79822SAlexei Starovoitov * @end: end address inside vm_area 700e6f79822SAlexei Starovoitov */ 701e6f79822SAlexei Starovoitov void vm_area_unmap_pages(struct vm_struct *area, unsigned long start, 702e6f79822SAlexei Starovoitov unsigned long end) 703e6f79822SAlexei Starovoitov { 704e6f79822SAlexei Starovoitov if (check_sparse_vm_area(area, start, end)) 705e6f79822SAlexei Starovoitov return; 706e6f79822SAlexei Starovoitov 707e6f79822SAlexei Starovoitov vunmap_range(start, end); 708e6f79822SAlexei Starovoitov } 709e6f79822SAlexei Starovoitov 71081ac3ad9SKAMEZAWA Hiroyuki int is_vmalloc_or_module_addr(const void *x) 71173bdf0a6SLinus Torvalds { 71273bdf0a6SLinus Torvalds /* 713ab4f2ee1SRussell King * ARM, x86-64 and sparc64 put modules in a special place, 71473bdf0a6SLinus Torvalds * and fall back on vmalloc() if that fails. Others 71573bdf0a6SLinus Torvalds * just put it in the vmalloc space. 71673bdf0a6SLinus Torvalds */ 71773bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 7184aff1dc4SAndrey Konovalov unsigned long addr = (unsigned long)kasan_reset_tag(x); 71973bdf0a6SLinus Torvalds if (addr >= MODULES_VADDR && addr < MODULES_END) 72073bdf0a6SLinus Torvalds return 1; 72173bdf0a6SLinus Torvalds #endif 72273bdf0a6SLinus Torvalds return is_vmalloc_addr(x); 72373bdf0a6SLinus Torvalds } 72401858469SDavid Howells EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr); 72573bdf0a6SLinus Torvalds 72648667e7aSChristoph Lameter /* 727c0eb315aSNicholas Piggin * Walk a vmap address to the struct page it maps. Huge vmap mappings will 728c0eb315aSNicholas Piggin * return the tail page that corresponds to the base page address, which 729c0eb315aSNicholas Piggin * matches small vmap mappings. 73048667e7aSChristoph Lameter */ 731add688fbSmalc struct page *vmalloc_to_page(const void *vmalloc_addr) 73248667e7aSChristoph Lameter { 73348667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr; 734add688fbSmalc struct page *page = NULL; 73548667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr); 736c2febafcSKirill A. Shutemov p4d_t *p4d; 737c2febafcSKirill A. Shutemov pud_t *pud; 738c2febafcSKirill A. Shutemov pmd_t *pmd; 739c2febafcSKirill A. Shutemov pte_t *ptep, pte; 74048667e7aSChristoph Lameter 7417aa413deSIngo Molnar /* 7427aa413deSIngo Molnar * XXX we might need to change this if we add VIRTUAL_BUG_ON for 7437aa413deSIngo Molnar * architectures that do not vmalloc module space 7447aa413deSIngo Molnar */ 74573bdf0a6SLinus Torvalds VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 74659ea7463SJiri Slaby 747c2febafcSKirill A. Shutemov if (pgd_none(*pgd)) 748c2febafcSKirill A. Shutemov return NULL; 749c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pgd_leaf(*pgd))) 750c0eb315aSNicholas Piggin return NULL; /* XXX: no allowance for huge pgd */ 751c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pgd_bad(*pgd))) 752c0eb315aSNicholas Piggin return NULL; 753c0eb315aSNicholas Piggin 754c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 755c2febafcSKirill A. Shutemov if (p4d_none(*p4d)) 756c2febafcSKirill A. Shutemov return NULL; 757c0eb315aSNicholas Piggin if (p4d_leaf(*p4d)) 758c0eb315aSNicholas Piggin return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); 759c0eb315aSNicholas Piggin if (WARN_ON_ONCE(p4d_bad(*p4d))) 760c2febafcSKirill A. Shutemov return NULL; 761c0eb315aSNicholas Piggin 762c0eb315aSNicholas Piggin pud = pud_offset(p4d, addr); 763c0eb315aSNicholas Piggin if (pud_none(*pud)) 764c0eb315aSNicholas Piggin return NULL; 765c0eb315aSNicholas Piggin if (pud_leaf(*pud)) 766c0eb315aSNicholas Piggin return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 767c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pud_bad(*pud))) 768c0eb315aSNicholas Piggin return NULL; 769c0eb315aSNicholas Piggin 770c2febafcSKirill A. Shutemov pmd = pmd_offset(pud, addr); 771c0eb315aSNicholas Piggin if (pmd_none(*pmd)) 772c0eb315aSNicholas Piggin return NULL; 773c0eb315aSNicholas Piggin if (pmd_leaf(*pmd)) 774c0eb315aSNicholas Piggin return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 775c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pmd_bad(*pmd))) 776c2febafcSKirill A. Shutemov return NULL; 777db64fe02SNick Piggin 7780d1c81edSHugh Dickins ptep = pte_offset_kernel(pmd, addr); 779c33c7948SRyan Roberts pte = ptep_get(ptep); 78048667e7aSChristoph Lameter if (pte_present(pte)) 781add688fbSmalc page = pte_page(pte); 782c0eb315aSNicholas Piggin 783add688fbSmalc return page; 784ece86e22SJianyu Zhan } 785ece86e22SJianyu Zhan EXPORT_SYMBOL(vmalloc_to_page); 786ece86e22SJianyu Zhan 787add688fbSmalc /* 788add688fbSmalc * Map a vmalloc()-space virtual address to the physical page frame number. 789add688fbSmalc */ 790add688fbSmalc unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 791add688fbSmalc { 792add688fbSmalc return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 793add688fbSmalc } 794add688fbSmalc EXPORT_SYMBOL(vmalloc_to_pfn); 795add688fbSmalc 796db64fe02SNick Piggin 797db64fe02SNick Piggin /*** Global kva allocator ***/ 798db64fe02SNick Piggin 799bb850f4dSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 800a6cf4e0fSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 801bb850f4dSUladzislau Rezki (Sony) 802db64fe02SNick Piggin 803e36176beSUladzislau Rezki (Sony) static DEFINE_SPINLOCK(free_vmap_area_lock); 80468ad4a33SUladzislau Rezki (Sony) static bool vmap_initialized __read_mostly; 80589699605SNick Piggin 80668ad4a33SUladzislau Rezki (Sony) /* 80768ad4a33SUladzislau Rezki (Sony) * This kmem_cache is used for vmap_area objects. Instead of 80868ad4a33SUladzislau Rezki (Sony) * allocating from slab we reuse an object from this cache to 80968ad4a33SUladzislau Rezki (Sony) * make things faster. Especially in "no edge" splitting of 81068ad4a33SUladzislau Rezki (Sony) * free block. 81168ad4a33SUladzislau Rezki (Sony) */ 81268ad4a33SUladzislau Rezki (Sony) static struct kmem_cache *vmap_area_cachep; 81389699605SNick Piggin 81468ad4a33SUladzislau Rezki (Sony) /* 81568ad4a33SUladzislau Rezki (Sony) * This linked list is used in pair with free_vmap_area_root. 81668ad4a33SUladzislau Rezki (Sony) * It gives O(1) access to prev/next to perform fast coalescing. 81768ad4a33SUladzislau Rezki (Sony) */ 81868ad4a33SUladzislau Rezki (Sony) static LIST_HEAD(free_vmap_area_list); 81968ad4a33SUladzislau Rezki (Sony) 82068ad4a33SUladzislau Rezki (Sony) /* 82168ad4a33SUladzislau Rezki (Sony) * This augment red-black tree represents the free vmap space. 82268ad4a33SUladzislau Rezki (Sony) * All vmap_area objects in this tree are sorted by va->va_start 82368ad4a33SUladzislau Rezki (Sony) * address. It is used for allocation and merging when a vmap 82468ad4a33SUladzislau Rezki (Sony) * object is released. 82568ad4a33SUladzislau Rezki (Sony) * 82668ad4a33SUladzislau Rezki (Sony) * Each vmap_area node contains a maximum available free block 82768ad4a33SUladzislau Rezki (Sony) * of its sub-tree, right or left. Therefore it is possible to 82868ad4a33SUladzislau Rezki (Sony) * find a lowest match of free area. 82968ad4a33SUladzislau Rezki (Sony) */ 83068ad4a33SUladzislau Rezki (Sony) static struct rb_root free_vmap_area_root = RB_ROOT; 83168ad4a33SUladzislau Rezki (Sony) 83282dd23e8SUladzislau Rezki (Sony) /* 83382dd23e8SUladzislau Rezki (Sony) * Preload a CPU with one object for "no edge" split case. The 83482dd23e8SUladzislau Rezki (Sony) * aim is to get rid of allocations from the atomic context, thus 83582dd23e8SUladzislau Rezki (Sony) * to use more permissive allocation masks. 83682dd23e8SUladzislau Rezki (Sony) */ 83782dd23e8SUladzislau Rezki (Sony) static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); 83882dd23e8SUladzislau Rezki (Sony) 839d0936029SUladzislau Rezki (Sony) /* 84015e02a39SUladzislau Rezki (Sony) * This structure defines a single, solid model where a list and 84115e02a39SUladzislau Rezki (Sony) * rb-tree are part of one entity protected by the lock. Nodes are 84215e02a39SUladzislau Rezki (Sony) * sorted in ascending order, thus for O(1) access to left/right 84315e02a39SUladzislau Rezki (Sony) * neighbors a list is used as well as for sequential traversal. 844d0936029SUladzislau Rezki (Sony) */ 845d0936029SUladzislau Rezki (Sony) struct rb_list { 846d0936029SUladzislau Rezki (Sony) struct rb_root root; 847d0936029SUladzislau Rezki (Sony) struct list_head head; 848d0936029SUladzislau Rezki (Sony) spinlock_t lock; 849d0936029SUladzislau Rezki (Sony) }; 850d0936029SUladzislau Rezki (Sony) 85115e02a39SUladzislau Rezki (Sony) /* 85215e02a39SUladzislau Rezki (Sony) * A fast size storage contains VAs up to 1M size. A pool consists 85315e02a39SUladzislau Rezki (Sony) * of linked between each other ready to go VAs of certain sizes. 85415e02a39SUladzislau Rezki (Sony) * An index in the pool-array corresponds to number of pages + 1. 85515e02a39SUladzislau Rezki (Sony) */ 85615e02a39SUladzislau Rezki (Sony) #define MAX_VA_SIZE_PAGES 256 85715e02a39SUladzislau Rezki (Sony) 85872210662SUladzislau Rezki (Sony) struct vmap_pool { 85972210662SUladzislau Rezki (Sony) struct list_head head; 86072210662SUladzislau Rezki (Sony) unsigned long len; 86172210662SUladzislau Rezki (Sony) }; 86272210662SUladzislau Rezki (Sony) 86372210662SUladzislau Rezki (Sony) /* 86415e02a39SUladzislau Rezki (Sony) * An effective vmap-node logic. Users make use of nodes instead 86515e02a39SUladzislau Rezki (Sony) * of a global heap. It allows to balance an access and mitigate 86615e02a39SUladzislau Rezki (Sony) * contention. 86772210662SUladzislau Rezki (Sony) */ 868d0936029SUladzislau Rezki (Sony) static struct vmap_node { 86972210662SUladzislau Rezki (Sony) /* Simple size segregated storage. */ 87072210662SUladzislau Rezki (Sony) struct vmap_pool pool[MAX_VA_SIZE_PAGES]; 87172210662SUladzislau Rezki (Sony) spinlock_t pool_lock; 87272210662SUladzislau Rezki (Sony) bool skip_populate; 87372210662SUladzislau Rezki (Sony) 874d0936029SUladzislau Rezki (Sony) /* Bookkeeping data of this node. */ 875d0936029SUladzislau Rezki (Sony) struct rb_list busy; 876282631cbSUladzislau Rezki (Sony) struct rb_list lazy; 877282631cbSUladzislau Rezki (Sony) 878282631cbSUladzislau Rezki (Sony) /* 879282631cbSUladzislau Rezki (Sony) * Ready-to-free areas. 880282631cbSUladzislau Rezki (Sony) */ 881282631cbSUladzislau Rezki (Sony) struct list_head purge_list; 88272210662SUladzislau Rezki (Sony) struct work_struct purge_work; 88372210662SUladzislau Rezki (Sony) unsigned long nr_purged; 884d0936029SUladzislau Rezki (Sony) } single; 885d0936029SUladzislau Rezki (Sony) 88615e02a39SUladzislau Rezki (Sony) /* 88715e02a39SUladzislau Rezki (Sony) * Initial setup consists of one single node, i.e. a balancing 88815e02a39SUladzislau Rezki (Sony) * is fully disabled. Later on, after vmap is initialized these 88915e02a39SUladzislau Rezki (Sony) * parameters are updated based on a system capacity. 89015e02a39SUladzislau Rezki (Sony) */ 891d0936029SUladzislau Rezki (Sony) static struct vmap_node *vmap_nodes = &single; 892d0936029SUladzislau Rezki (Sony) static __read_mostly unsigned int nr_vmap_nodes = 1; 893d0936029SUladzislau Rezki (Sony) static __read_mostly unsigned int vmap_zone_size = 1; 894d0936029SUladzislau Rezki (Sony) 895d0936029SUladzislau Rezki (Sony) static inline unsigned int 896d0936029SUladzislau Rezki (Sony) addr_to_node_id(unsigned long addr) 897d0936029SUladzislau Rezki (Sony) { 898d0936029SUladzislau Rezki (Sony) return (addr / vmap_zone_size) % nr_vmap_nodes; 899d0936029SUladzislau Rezki (Sony) } 900d0936029SUladzislau Rezki (Sony) 901d0936029SUladzislau Rezki (Sony) static inline struct vmap_node * 902d0936029SUladzislau Rezki (Sony) addr_to_node(unsigned long addr) 903d0936029SUladzislau Rezki (Sony) { 904d0936029SUladzislau Rezki (Sony) return &vmap_nodes[addr_to_node_id(addr)]; 905d0936029SUladzislau Rezki (Sony) } 906d0936029SUladzislau Rezki (Sony) 90772210662SUladzislau Rezki (Sony) static inline struct vmap_node * 90872210662SUladzislau Rezki (Sony) id_to_node(unsigned int id) 90972210662SUladzislau Rezki (Sony) { 91072210662SUladzislau Rezki (Sony) return &vmap_nodes[id % nr_vmap_nodes]; 91172210662SUladzislau Rezki (Sony) } 91272210662SUladzislau Rezki (Sony) 91372210662SUladzislau Rezki (Sony) /* 91472210662SUladzislau Rezki (Sony) * We use the value 0 to represent "no node", that is why 91572210662SUladzislau Rezki (Sony) * an encoded value will be the node-id incremented by 1. 91672210662SUladzislau Rezki (Sony) * It is always greater then 0. A valid node_id which can 91772210662SUladzislau Rezki (Sony) * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id 91872210662SUladzislau Rezki (Sony) * is not valid 0 is returned. 91972210662SUladzislau Rezki (Sony) */ 92072210662SUladzislau Rezki (Sony) static unsigned int 92172210662SUladzislau Rezki (Sony) encode_vn_id(unsigned int node_id) 92272210662SUladzislau Rezki (Sony) { 92372210662SUladzislau Rezki (Sony) /* Can store U8_MAX [0:254] nodes. */ 92472210662SUladzislau Rezki (Sony) if (node_id < nr_vmap_nodes) 92572210662SUladzislau Rezki (Sony) return (node_id + 1) << BITS_PER_BYTE; 92672210662SUladzislau Rezki (Sony) 92772210662SUladzislau Rezki (Sony) /* Warn and no node encoded. */ 92872210662SUladzislau Rezki (Sony) WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id); 92972210662SUladzislau Rezki (Sony) return 0; 93072210662SUladzislau Rezki (Sony) } 93172210662SUladzislau Rezki (Sony) 93272210662SUladzislau Rezki (Sony) /* 93372210662SUladzislau Rezki (Sony) * Returns an encoded node-id, the valid range is within 93472210662SUladzislau Rezki (Sony) * [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is 93572210662SUladzislau Rezki (Sony) * returned if extracted data is wrong. 93672210662SUladzislau Rezki (Sony) */ 93772210662SUladzislau Rezki (Sony) static unsigned int 93872210662SUladzislau Rezki (Sony) decode_vn_id(unsigned int val) 93972210662SUladzislau Rezki (Sony) { 94072210662SUladzislau Rezki (Sony) unsigned int node_id = (val >> BITS_PER_BYTE) - 1; 94172210662SUladzislau Rezki (Sony) 94272210662SUladzislau Rezki (Sony) /* Can store U8_MAX [0:254] nodes. */ 94372210662SUladzislau Rezki (Sony) if (node_id < nr_vmap_nodes) 94472210662SUladzislau Rezki (Sony) return node_id; 94572210662SUladzislau Rezki (Sony) 94672210662SUladzislau Rezki (Sony) /* If it was _not_ zero, warn. */ 94772210662SUladzislau Rezki (Sony) WARN_ONCE(node_id != UINT_MAX, 94872210662SUladzislau Rezki (Sony) "Decode wrong node id (%d)\n", node_id); 94972210662SUladzislau Rezki (Sony) 95072210662SUladzislau Rezki (Sony) return nr_vmap_nodes; 95172210662SUladzislau Rezki (Sony) } 95272210662SUladzislau Rezki (Sony) 95372210662SUladzislau Rezki (Sony) static bool 95472210662SUladzislau Rezki (Sony) is_vn_id_valid(unsigned int node_id) 95572210662SUladzislau Rezki (Sony) { 95672210662SUladzislau Rezki (Sony) if (node_id < nr_vmap_nodes) 95772210662SUladzislau Rezki (Sony) return true; 95872210662SUladzislau Rezki (Sony) 95972210662SUladzislau Rezki (Sony) return false; 96072210662SUladzislau Rezki (Sony) } 96172210662SUladzislau Rezki (Sony) 96268ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 96368ad4a33SUladzislau Rezki (Sony) va_size(struct vmap_area *va) 96468ad4a33SUladzislau Rezki (Sony) { 96568ad4a33SUladzislau Rezki (Sony) return (va->va_end - va->va_start); 96668ad4a33SUladzislau Rezki (Sony) } 96768ad4a33SUladzislau Rezki (Sony) 96868ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 96968ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(struct rb_node *node) 97068ad4a33SUladzislau Rezki (Sony) { 97168ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 97268ad4a33SUladzislau Rezki (Sony) 97368ad4a33SUladzislau Rezki (Sony) va = rb_entry_safe(node, struct vmap_area, rb_node); 97468ad4a33SUladzislau Rezki (Sony) return va ? va->subtree_max_size : 0; 97568ad4a33SUladzislau Rezki (Sony) } 97668ad4a33SUladzislau Rezki (Sony) 977315cc066SMichel Lespinasse RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, 978315cc066SMichel Lespinasse struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) 97968ad4a33SUladzislau Rezki (Sony) 98077e50af0SThomas Gleixner static void reclaim_and_purge_vmap_areas(void); 98168ad4a33SUladzislau Rezki (Sony) static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 982690467c8SUladzislau Rezki (Sony) static void drain_vmap_area_work(struct work_struct *work); 983690467c8SUladzislau Rezki (Sony) static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work); 984db64fe02SNick Piggin 98597105f0aSRoman Gushchin static atomic_long_t nr_vmalloc_pages; 98697105f0aSRoman Gushchin 98797105f0aSRoman Gushchin unsigned long vmalloc_nr_pages(void) 98897105f0aSRoman Gushchin { 98997105f0aSRoman Gushchin return atomic_long_read(&nr_vmalloc_pages); 99097105f0aSRoman Gushchin } 99197105f0aSRoman Gushchin 992153090f2SBaoquan He /* Look up the first VA which satisfies addr < va_end, NULL if none. */ 993d0936029SUladzislau Rezki (Sony) static struct vmap_area * 99453becf32SUladzislau Rezki (Sony) __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root) 995f181234aSChen Wandun { 996f181234aSChen Wandun struct vmap_area *va = NULL; 997d0936029SUladzislau Rezki (Sony) struct rb_node *n = root->rb_node; 998f181234aSChen Wandun 9994aff1dc4SAndrey Konovalov addr = (unsigned long)kasan_reset_tag((void *)addr); 10004aff1dc4SAndrey Konovalov 1001f181234aSChen Wandun while (n) { 1002f181234aSChen Wandun struct vmap_area *tmp; 1003f181234aSChen Wandun 1004f181234aSChen Wandun tmp = rb_entry(n, struct vmap_area, rb_node); 1005f181234aSChen Wandun if (tmp->va_end > addr) { 1006f181234aSChen Wandun va = tmp; 1007f181234aSChen Wandun if (tmp->va_start <= addr) 1008f181234aSChen Wandun break; 1009f181234aSChen Wandun 1010f181234aSChen Wandun n = n->rb_left; 1011f181234aSChen Wandun } else 1012f181234aSChen Wandun n = n->rb_right; 1013f181234aSChen Wandun } 1014f181234aSChen Wandun 1015f181234aSChen Wandun return va; 1016f181234aSChen Wandun } 1017f181234aSChen Wandun 101853becf32SUladzislau Rezki (Sony) /* 101953becf32SUladzislau Rezki (Sony) * Returns a node where a first VA, that satisfies addr < va_end, resides. 102053becf32SUladzislau Rezki (Sony) * If success, a node is locked. A user is responsible to unlock it when a 102153becf32SUladzislau Rezki (Sony) * VA is no longer needed to be accessed. 102253becf32SUladzislau Rezki (Sony) * 102353becf32SUladzislau Rezki (Sony) * Returns NULL if nothing found. 102453becf32SUladzislau Rezki (Sony) */ 102553becf32SUladzislau Rezki (Sony) static struct vmap_node * 102653becf32SUladzislau Rezki (Sony) find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va) 102753becf32SUladzislau Rezki (Sony) { 102853becf32SUladzislau Rezki (Sony) struct vmap_node *vn, *va_node = NULL; 102953becf32SUladzislau Rezki (Sony) struct vmap_area *va_lowest; 103053becf32SUladzislau Rezki (Sony) int i; 103153becf32SUladzislau Rezki (Sony) 103253becf32SUladzislau Rezki (Sony) for (i = 0; i < nr_vmap_nodes; i++) { 103353becf32SUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 103453becf32SUladzislau Rezki (Sony) 103553becf32SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 103653becf32SUladzislau Rezki (Sony) va_lowest = __find_vmap_area_exceed_addr(addr, &vn->busy.root); 103753becf32SUladzislau Rezki (Sony) if (va_lowest) { 103853becf32SUladzislau Rezki (Sony) if (!va_node || va_lowest->va_start < (*va)->va_start) { 103953becf32SUladzislau Rezki (Sony) if (va_node) 104053becf32SUladzislau Rezki (Sony) spin_unlock(&va_node->busy.lock); 104153becf32SUladzislau Rezki (Sony) 104253becf32SUladzislau Rezki (Sony) *va = va_lowest; 104353becf32SUladzislau Rezki (Sony) va_node = vn; 104453becf32SUladzislau Rezki (Sony) continue; 104553becf32SUladzislau Rezki (Sony) } 104653becf32SUladzislau Rezki (Sony) } 104753becf32SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 104853becf32SUladzislau Rezki (Sony) } 104953becf32SUladzislau Rezki (Sony) 105053becf32SUladzislau Rezki (Sony) return va_node; 105153becf32SUladzislau Rezki (Sony) } 105253becf32SUladzislau Rezki (Sony) 1053899c6efeSUladzislau Rezki (Sony) static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root) 10541da177e4SLinus Torvalds { 1055899c6efeSUladzislau Rezki (Sony) struct rb_node *n = root->rb_node; 1056db64fe02SNick Piggin 10574aff1dc4SAndrey Konovalov addr = (unsigned long)kasan_reset_tag((void *)addr); 10584aff1dc4SAndrey Konovalov 1059db64fe02SNick Piggin while (n) { 1060db64fe02SNick Piggin struct vmap_area *va; 1061db64fe02SNick Piggin 1062db64fe02SNick Piggin va = rb_entry(n, struct vmap_area, rb_node); 1063db64fe02SNick Piggin if (addr < va->va_start) 1064db64fe02SNick Piggin n = n->rb_left; 1065cef2ac3fSHATAYAMA Daisuke else if (addr >= va->va_end) 1066db64fe02SNick Piggin n = n->rb_right; 1067db64fe02SNick Piggin else 1068db64fe02SNick Piggin return va; 1069db64fe02SNick Piggin } 1070db64fe02SNick Piggin 1071db64fe02SNick Piggin return NULL; 1072db64fe02SNick Piggin } 1073db64fe02SNick Piggin 107468ad4a33SUladzislau Rezki (Sony) /* 107568ad4a33SUladzislau Rezki (Sony) * This function returns back addresses of parent node 107668ad4a33SUladzislau Rezki (Sony) * and its left or right link for further processing. 10779c801f61SUladzislau Rezki (Sony) * 10789c801f61SUladzislau Rezki (Sony) * Otherwise NULL is returned. In that case all further 10799c801f61SUladzislau Rezki (Sony) * steps regarding inserting of conflicting overlap range 10809c801f61SUladzislau Rezki (Sony) * have to be declined and actually considered as a bug. 108168ad4a33SUladzislau Rezki (Sony) */ 108268ad4a33SUladzislau Rezki (Sony) static __always_inline struct rb_node ** 108368ad4a33SUladzislau Rezki (Sony) find_va_links(struct vmap_area *va, 108468ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct rb_node *from, 108568ad4a33SUladzislau Rezki (Sony) struct rb_node **parent) 1086db64fe02SNick Piggin { 1087170168d0SNamhyung Kim struct vmap_area *tmp_va; 108868ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 1089db64fe02SNick Piggin 109068ad4a33SUladzislau Rezki (Sony) if (root) { 109168ad4a33SUladzislau Rezki (Sony) link = &root->rb_node; 109268ad4a33SUladzislau Rezki (Sony) if (unlikely(!*link)) { 109368ad4a33SUladzislau Rezki (Sony) *parent = NULL; 109468ad4a33SUladzislau Rezki (Sony) return link; 109568ad4a33SUladzislau Rezki (Sony) } 109668ad4a33SUladzislau Rezki (Sony) } else { 109768ad4a33SUladzislau Rezki (Sony) link = &from; 109868ad4a33SUladzislau Rezki (Sony) } 109968ad4a33SUladzislau Rezki (Sony) 110068ad4a33SUladzislau Rezki (Sony) /* 110168ad4a33SUladzislau Rezki (Sony) * Go to the bottom of the tree. When we hit the last point 110268ad4a33SUladzislau Rezki (Sony) * we end up with parent rb_node and correct direction, i name 110368ad4a33SUladzislau Rezki (Sony) * it link, where the new va->rb_node will be attached to. 110468ad4a33SUladzislau Rezki (Sony) */ 110568ad4a33SUladzislau Rezki (Sony) do { 110668ad4a33SUladzislau Rezki (Sony) tmp_va = rb_entry(*link, struct vmap_area, rb_node); 110768ad4a33SUladzislau Rezki (Sony) 110868ad4a33SUladzislau Rezki (Sony) /* 110968ad4a33SUladzislau Rezki (Sony) * During the traversal we also do some sanity check. 111068ad4a33SUladzislau Rezki (Sony) * Trigger the BUG() if there are sides(left/right) 111168ad4a33SUladzislau Rezki (Sony) * or full overlaps. 111268ad4a33SUladzislau Rezki (Sony) */ 1113753df96bSBaoquan He if (va->va_end <= tmp_va->va_start) 111468ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_left; 1115753df96bSBaoquan He else if (va->va_start >= tmp_va->va_end) 111668ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_right; 11179c801f61SUladzislau Rezki (Sony) else { 11189c801f61SUladzislau Rezki (Sony) WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", 11199c801f61SUladzislau Rezki (Sony) va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); 11209c801f61SUladzislau Rezki (Sony) 11219c801f61SUladzislau Rezki (Sony) return NULL; 11229c801f61SUladzislau Rezki (Sony) } 112368ad4a33SUladzislau Rezki (Sony) } while (*link); 112468ad4a33SUladzislau Rezki (Sony) 112568ad4a33SUladzislau Rezki (Sony) *parent = &tmp_va->rb_node; 112668ad4a33SUladzislau Rezki (Sony) return link; 1127db64fe02SNick Piggin } 1128db64fe02SNick Piggin 112968ad4a33SUladzislau Rezki (Sony) static __always_inline struct list_head * 113068ad4a33SUladzislau Rezki (Sony) get_va_next_sibling(struct rb_node *parent, struct rb_node **link) 113168ad4a33SUladzislau Rezki (Sony) { 113268ad4a33SUladzislau Rezki (Sony) struct list_head *list; 1133db64fe02SNick Piggin 113468ad4a33SUladzislau Rezki (Sony) if (unlikely(!parent)) 113568ad4a33SUladzislau Rezki (Sony) /* 113668ad4a33SUladzislau Rezki (Sony) * The red-black tree where we try to find VA neighbors 113768ad4a33SUladzislau Rezki (Sony) * before merging or inserting is empty, i.e. it means 113868ad4a33SUladzislau Rezki (Sony) * there is no free vmap space. Normally it does not 113968ad4a33SUladzislau Rezki (Sony) * happen but we handle this case anyway. 114068ad4a33SUladzislau Rezki (Sony) */ 114168ad4a33SUladzislau Rezki (Sony) return NULL; 114268ad4a33SUladzislau Rezki (Sony) 114368ad4a33SUladzislau Rezki (Sony) list = &rb_entry(parent, struct vmap_area, rb_node)->list; 114468ad4a33SUladzislau Rezki (Sony) return (&parent->rb_right == link ? list->next : list); 1145db64fe02SNick Piggin } 1146db64fe02SNick Piggin 114768ad4a33SUladzislau Rezki (Sony) static __always_inline void 11488eb510dbSUladzislau Rezki (Sony) __link_va(struct vmap_area *va, struct rb_root *root, 11498eb510dbSUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link, 11508eb510dbSUladzislau Rezki (Sony) struct list_head *head, bool augment) 115168ad4a33SUladzislau Rezki (Sony) { 115268ad4a33SUladzislau Rezki (Sony) /* 115368ad4a33SUladzislau Rezki (Sony) * VA is still not in the list, but we can 115468ad4a33SUladzislau Rezki (Sony) * identify its future previous list_head node. 115568ad4a33SUladzislau Rezki (Sony) */ 115668ad4a33SUladzislau Rezki (Sony) if (likely(parent)) { 115768ad4a33SUladzislau Rezki (Sony) head = &rb_entry(parent, struct vmap_area, rb_node)->list; 115868ad4a33SUladzislau Rezki (Sony) if (&parent->rb_right != link) 115968ad4a33SUladzislau Rezki (Sony) head = head->prev; 116068ad4a33SUladzislau Rezki (Sony) } 1161db64fe02SNick Piggin 116268ad4a33SUladzislau Rezki (Sony) /* Insert to the rb-tree */ 116368ad4a33SUladzislau Rezki (Sony) rb_link_node(&va->rb_node, parent, link); 11648eb510dbSUladzislau Rezki (Sony) if (augment) { 116568ad4a33SUladzislau Rezki (Sony) /* 116668ad4a33SUladzislau Rezki (Sony) * Some explanation here. Just perform simple insertion 116768ad4a33SUladzislau Rezki (Sony) * to the tree. We do not set va->subtree_max_size to 116868ad4a33SUladzislau Rezki (Sony) * its current size before calling rb_insert_augmented(). 1169153090f2SBaoquan He * It is because we populate the tree from the bottom 117068ad4a33SUladzislau Rezki (Sony) * to parent levels when the node _is_ in the tree. 117168ad4a33SUladzislau Rezki (Sony) * 117268ad4a33SUladzislau Rezki (Sony) * Therefore we set subtree_max_size to zero after insertion, 117368ad4a33SUladzislau Rezki (Sony) * to let __augment_tree_propagate_from() puts everything to 117468ad4a33SUladzislau Rezki (Sony) * the correct order later on. 117568ad4a33SUladzislau Rezki (Sony) */ 117668ad4a33SUladzislau Rezki (Sony) rb_insert_augmented(&va->rb_node, 117768ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb); 117868ad4a33SUladzislau Rezki (Sony) va->subtree_max_size = 0; 117968ad4a33SUladzislau Rezki (Sony) } else { 118068ad4a33SUladzislau Rezki (Sony) rb_insert_color(&va->rb_node, root); 118168ad4a33SUladzislau Rezki (Sony) } 118268ad4a33SUladzislau Rezki (Sony) 118368ad4a33SUladzislau Rezki (Sony) /* Address-sort this list */ 118468ad4a33SUladzislau Rezki (Sony) list_add(&va->list, head); 118568ad4a33SUladzislau Rezki (Sony) } 118668ad4a33SUladzislau Rezki (Sony) 118768ad4a33SUladzislau Rezki (Sony) static __always_inline void 11888eb510dbSUladzislau Rezki (Sony) link_va(struct vmap_area *va, struct rb_root *root, 11898eb510dbSUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link, 11908eb510dbSUladzislau Rezki (Sony) struct list_head *head) 11918eb510dbSUladzislau Rezki (Sony) { 11928eb510dbSUladzislau Rezki (Sony) __link_va(va, root, parent, link, head, false); 11938eb510dbSUladzislau Rezki (Sony) } 11948eb510dbSUladzislau Rezki (Sony) 11958eb510dbSUladzislau Rezki (Sony) static __always_inline void 11968eb510dbSUladzislau Rezki (Sony) link_va_augment(struct vmap_area *va, struct rb_root *root, 11978eb510dbSUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link, 11988eb510dbSUladzislau Rezki (Sony) struct list_head *head) 11998eb510dbSUladzislau Rezki (Sony) { 12008eb510dbSUladzislau Rezki (Sony) __link_va(va, root, parent, link, head, true); 12018eb510dbSUladzislau Rezki (Sony) } 12028eb510dbSUladzislau Rezki (Sony) 12038eb510dbSUladzislau Rezki (Sony) static __always_inline void 12048eb510dbSUladzislau Rezki (Sony) __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment) 120568ad4a33SUladzislau Rezki (Sony) { 1206460e42d1SUladzislau Rezki (Sony) if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) 1207460e42d1SUladzislau Rezki (Sony) return; 1208460e42d1SUladzislau Rezki (Sony) 12098eb510dbSUladzislau Rezki (Sony) if (augment) 121068ad4a33SUladzislau Rezki (Sony) rb_erase_augmented(&va->rb_node, 121168ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb); 121268ad4a33SUladzislau Rezki (Sony) else 121368ad4a33SUladzislau Rezki (Sony) rb_erase(&va->rb_node, root); 121468ad4a33SUladzislau Rezki (Sony) 12155d7a7c54SUladzislau Rezki (Sony) list_del_init(&va->list); 121668ad4a33SUladzislau Rezki (Sony) RB_CLEAR_NODE(&va->rb_node); 121768ad4a33SUladzislau Rezki (Sony) } 121868ad4a33SUladzislau Rezki (Sony) 12198eb510dbSUladzislau Rezki (Sony) static __always_inline void 12208eb510dbSUladzislau Rezki (Sony) unlink_va(struct vmap_area *va, struct rb_root *root) 12218eb510dbSUladzislau Rezki (Sony) { 12228eb510dbSUladzislau Rezki (Sony) __unlink_va(va, root, false); 12238eb510dbSUladzislau Rezki (Sony) } 12248eb510dbSUladzislau Rezki (Sony) 12258eb510dbSUladzislau Rezki (Sony) static __always_inline void 12268eb510dbSUladzislau Rezki (Sony) unlink_va_augment(struct vmap_area *va, struct rb_root *root) 12278eb510dbSUladzislau Rezki (Sony) { 12288eb510dbSUladzislau Rezki (Sony) __unlink_va(va, root, true); 12298eb510dbSUladzislau Rezki (Sony) } 12308eb510dbSUladzislau Rezki (Sony) 1231bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK 1232c3385e84SJiapeng Chong /* 1233c3385e84SJiapeng Chong * Gets called when remove the node and rotate. 1234c3385e84SJiapeng Chong */ 1235c3385e84SJiapeng Chong static __always_inline unsigned long 1236c3385e84SJiapeng Chong compute_subtree_max_size(struct vmap_area *va) 1237c3385e84SJiapeng Chong { 1238c3385e84SJiapeng Chong return max3(va_size(va), 1239c3385e84SJiapeng Chong get_subtree_max_size(va->rb_node.rb_left), 1240c3385e84SJiapeng Chong get_subtree_max_size(va->rb_node.rb_right)); 1241c3385e84SJiapeng Chong } 1242c3385e84SJiapeng Chong 1243bb850f4dSUladzislau Rezki (Sony) static void 1244da27c9edSUladzislau Rezki (Sony) augment_tree_propagate_check(void) 1245bb850f4dSUladzislau Rezki (Sony) { 1246bb850f4dSUladzislau Rezki (Sony) struct vmap_area *va; 1247da27c9edSUladzislau Rezki (Sony) unsigned long computed_size; 1248bb850f4dSUladzislau Rezki (Sony) 1249da27c9edSUladzislau Rezki (Sony) list_for_each_entry(va, &free_vmap_area_list, list) { 1250da27c9edSUladzislau Rezki (Sony) computed_size = compute_subtree_max_size(va); 1251da27c9edSUladzislau Rezki (Sony) if (computed_size != va->subtree_max_size) 1252bb850f4dSUladzislau Rezki (Sony) pr_emerg("tree is corrupted: %lu, %lu\n", 1253bb850f4dSUladzislau Rezki (Sony) va_size(va), va->subtree_max_size); 1254bb850f4dSUladzislau Rezki (Sony) } 1255bb850f4dSUladzislau Rezki (Sony) } 1256bb850f4dSUladzislau Rezki (Sony) #endif 1257bb850f4dSUladzislau Rezki (Sony) 125868ad4a33SUladzislau Rezki (Sony) /* 125968ad4a33SUladzislau Rezki (Sony) * This function populates subtree_max_size from bottom to upper 126068ad4a33SUladzislau Rezki (Sony) * levels starting from VA point. The propagation must be done 126168ad4a33SUladzislau Rezki (Sony) * when VA size is modified by changing its va_start/va_end. Or 126268ad4a33SUladzislau Rezki (Sony) * in case of newly inserting of VA to the tree. 126368ad4a33SUladzislau Rezki (Sony) * 126468ad4a33SUladzislau Rezki (Sony) * It means that __augment_tree_propagate_from() must be called: 126568ad4a33SUladzislau Rezki (Sony) * - After VA has been inserted to the tree(free path); 126668ad4a33SUladzislau Rezki (Sony) * - After VA has been shrunk(allocation path); 126768ad4a33SUladzislau Rezki (Sony) * - After VA has been increased(merging path). 126868ad4a33SUladzislau Rezki (Sony) * 126968ad4a33SUladzislau Rezki (Sony) * Please note that, it does not mean that upper parent nodes 127068ad4a33SUladzislau Rezki (Sony) * and their subtree_max_size are recalculated all the time up 127168ad4a33SUladzislau Rezki (Sony) * to the root node. 127268ad4a33SUladzislau Rezki (Sony) * 127368ad4a33SUladzislau Rezki (Sony) * 4--8 127468ad4a33SUladzislau Rezki (Sony) * /\ 127568ad4a33SUladzislau Rezki (Sony) * / \ 127668ad4a33SUladzislau Rezki (Sony) * / \ 127768ad4a33SUladzislau Rezki (Sony) * 2--2 8--8 127868ad4a33SUladzislau Rezki (Sony) * 127968ad4a33SUladzislau Rezki (Sony) * For example if we modify the node 4, shrinking it to 2, then 128068ad4a33SUladzislau Rezki (Sony) * no any modification is required. If we shrink the node 2 to 1 128168ad4a33SUladzislau Rezki (Sony) * its subtree_max_size is updated only, and set to 1. If we shrink 128268ad4a33SUladzislau Rezki (Sony) * the node 8 to 6, then its subtree_max_size is set to 6 and parent 128368ad4a33SUladzislau Rezki (Sony) * node becomes 4--6. 128468ad4a33SUladzislau Rezki (Sony) */ 128568ad4a33SUladzislau Rezki (Sony) static __always_inline void 128668ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(struct vmap_area *va) 128768ad4a33SUladzislau Rezki (Sony) { 128868ad4a33SUladzislau Rezki (Sony) /* 128915ae144fSUladzislau Rezki (Sony) * Populate the tree from bottom towards the root until 129015ae144fSUladzislau Rezki (Sony) * the calculated maximum available size of checked node 129115ae144fSUladzislau Rezki (Sony) * is equal to its current one. 129268ad4a33SUladzislau Rezki (Sony) */ 129315ae144fSUladzislau Rezki (Sony) free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); 1294bb850f4dSUladzislau Rezki (Sony) 1295bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK 1296da27c9edSUladzislau Rezki (Sony) augment_tree_propagate_check(); 1297bb850f4dSUladzislau Rezki (Sony) #endif 129868ad4a33SUladzislau Rezki (Sony) } 129968ad4a33SUladzislau Rezki (Sony) 130068ad4a33SUladzislau Rezki (Sony) static void 130168ad4a33SUladzislau Rezki (Sony) insert_vmap_area(struct vmap_area *va, 130268ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 130368ad4a33SUladzislau Rezki (Sony) { 130468ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 130568ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 130668ad4a33SUladzislau Rezki (Sony) 130768ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 13089c801f61SUladzislau Rezki (Sony) if (link) 130968ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 131068ad4a33SUladzislau Rezki (Sony) } 131168ad4a33SUladzislau Rezki (Sony) 131268ad4a33SUladzislau Rezki (Sony) static void 131368ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(struct vmap_area *va, 131468ad4a33SUladzislau Rezki (Sony) struct rb_node *from, struct rb_root *root, 131568ad4a33SUladzislau Rezki (Sony) struct list_head *head) 131668ad4a33SUladzislau Rezki (Sony) { 131768ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 131868ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 131968ad4a33SUladzislau Rezki (Sony) 132068ad4a33SUladzislau Rezki (Sony) if (from) 132168ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, NULL, from, &parent); 132268ad4a33SUladzislau Rezki (Sony) else 132368ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 132468ad4a33SUladzislau Rezki (Sony) 13259c801f61SUladzislau Rezki (Sony) if (link) { 13268eb510dbSUladzislau Rezki (Sony) link_va_augment(va, root, parent, link, head); 132768ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 132868ad4a33SUladzislau Rezki (Sony) } 13299c801f61SUladzislau Rezki (Sony) } 133068ad4a33SUladzislau Rezki (Sony) 133168ad4a33SUladzislau Rezki (Sony) /* 133268ad4a33SUladzislau Rezki (Sony) * Merge de-allocated chunk of VA memory with previous 133368ad4a33SUladzislau Rezki (Sony) * and next free blocks. If coalesce is not done a new 133468ad4a33SUladzislau Rezki (Sony) * free area is inserted. If VA has been merged, it is 133568ad4a33SUladzislau Rezki (Sony) * freed. 13369c801f61SUladzislau Rezki (Sony) * 13379c801f61SUladzislau Rezki (Sony) * Please note, it can return NULL in case of overlap 13389c801f61SUladzislau Rezki (Sony) * ranges, followed by WARN() report. Despite it is a 13399c801f61SUladzislau Rezki (Sony) * buggy behaviour, a system can be alive and keep 13409c801f61SUladzislau Rezki (Sony) * ongoing. 134168ad4a33SUladzislau Rezki (Sony) */ 13423c5c3cfbSDaniel Axtens static __always_inline struct vmap_area * 13438eb510dbSUladzislau Rezki (Sony) __merge_or_add_vmap_area(struct vmap_area *va, 13448eb510dbSUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head, bool augment) 134568ad4a33SUladzislau Rezki (Sony) { 134668ad4a33SUladzislau Rezki (Sony) struct vmap_area *sibling; 134768ad4a33SUladzislau Rezki (Sony) struct list_head *next; 134868ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 134968ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 135068ad4a33SUladzislau Rezki (Sony) bool merged = false; 135168ad4a33SUladzislau Rezki (Sony) 135268ad4a33SUladzislau Rezki (Sony) /* 135368ad4a33SUladzislau Rezki (Sony) * Find a place in the tree where VA potentially will be 135468ad4a33SUladzislau Rezki (Sony) * inserted, unless it is merged with its sibling/siblings. 135568ad4a33SUladzislau Rezki (Sony) */ 135668ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 13579c801f61SUladzislau Rezki (Sony) if (!link) 13589c801f61SUladzislau Rezki (Sony) return NULL; 135968ad4a33SUladzislau Rezki (Sony) 136068ad4a33SUladzislau Rezki (Sony) /* 136168ad4a33SUladzislau Rezki (Sony) * Get next node of VA to check if merging can be done. 136268ad4a33SUladzislau Rezki (Sony) */ 136368ad4a33SUladzislau Rezki (Sony) next = get_va_next_sibling(parent, link); 136468ad4a33SUladzislau Rezki (Sony) if (unlikely(next == NULL)) 136568ad4a33SUladzislau Rezki (Sony) goto insert; 136668ad4a33SUladzislau Rezki (Sony) 136768ad4a33SUladzislau Rezki (Sony) /* 136868ad4a33SUladzislau Rezki (Sony) * start end 136968ad4a33SUladzislau Rezki (Sony) * | | 137068ad4a33SUladzislau Rezki (Sony) * |<------VA------>|<-----Next----->| 137168ad4a33SUladzislau Rezki (Sony) * | | 137268ad4a33SUladzislau Rezki (Sony) * start end 137368ad4a33SUladzislau Rezki (Sony) */ 137468ad4a33SUladzislau Rezki (Sony) if (next != head) { 137568ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next, struct vmap_area, list); 137668ad4a33SUladzislau Rezki (Sony) if (sibling->va_start == va->va_end) { 137768ad4a33SUladzislau Rezki (Sony) sibling->va_start = va->va_start; 137868ad4a33SUladzislau Rezki (Sony) 137968ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */ 138068ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 138168ad4a33SUladzislau Rezki (Sony) 138268ad4a33SUladzislau Rezki (Sony) /* Point to the new merged area. */ 138368ad4a33SUladzislau Rezki (Sony) va = sibling; 138468ad4a33SUladzislau Rezki (Sony) merged = true; 138568ad4a33SUladzislau Rezki (Sony) } 138668ad4a33SUladzislau Rezki (Sony) } 138768ad4a33SUladzislau Rezki (Sony) 138868ad4a33SUladzislau Rezki (Sony) /* 138968ad4a33SUladzislau Rezki (Sony) * start end 139068ad4a33SUladzislau Rezki (Sony) * | | 139168ad4a33SUladzislau Rezki (Sony) * |<-----Prev----->|<------VA------>| 139268ad4a33SUladzislau Rezki (Sony) * | | 139368ad4a33SUladzislau Rezki (Sony) * start end 139468ad4a33SUladzislau Rezki (Sony) */ 139568ad4a33SUladzislau Rezki (Sony) if (next->prev != head) { 139668ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next->prev, struct vmap_area, list); 139768ad4a33SUladzislau Rezki (Sony) if (sibling->va_end == va->va_start) { 13985dd78640SUladzislau Rezki (Sony) /* 13995dd78640SUladzislau Rezki (Sony) * If both neighbors are coalesced, it is important 14005dd78640SUladzislau Rezki (Sony) * to unlink the "next" node first, followed by merging 14015dd78640SUladzislau Rezki (Sony) * with "previous" one. Otherwise the tree might not be 14025dd78640SUladzislau Rezki (Sony) * fully populated if a sibling's augmented value is 14035dd78640SUladzislau Rezki (Sony) * "normalized" because of rotation operations. 14045dd78640SUladzislau Rezki (Sony) */ 140554f63d9dSUladzislau Rezki (Sony) if (merged) 14068eb510dbSUladzislau Rezki (Sony) __unlink_va(va, root, augment); 140768ad4a33SUladzislau Rezki (Sony) 14085dd78640SUladzislau Rezki (Sony) sibling->va_end = va->va_end; 14095dd78640SUladzislau Rezki (Sony) 141068ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */ 141168ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 14123c5c3cfbSDaniel Axtens 14133c5c3cfbSDaniel Axtens /* Point to the new merged area. */ 14143c5c3cfbSDaniel Axtens va = sibling; 14153c5c3cfbSDaniel Axtens merged = true; 141668ad4a33SUladzislau Rezki (Sony) } 141768ad4a33SUladzislau Rezki (Sony) } 141868ad4a33SUladzislau Rezki (Sony) 141968ad4a33SUladzislau Rezki (Sony) insert: 14205dd78640SUladzislau Rezki (Sony) if (!merged) 14218eb510dbSUladzislau Rezki (Sony) __link_va(va, root, parent, link, head, augment); 14223c5c3cfbSDaniel Axtens 142396e2db45SUladzislau Rezki (Sony) return va; 142496e2db45SUladzislau Rezki (Sony) } 142596e2db45SUladzislau Rezki (Sony) 142696e2db45SUladzislau Rezki (Sony) static __always_inline struct vmap_area * 14278eb510dbSUladzislau Rezki (Sony) merge_or_add_vmap_area(struct vmap_area *va, 14288eb510dbSUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 14298eb510dbSUladzislau Rezki (Sony) { 14308eb510dbSUladzislau Rezki (Sony) return __merge_or_add_vmap_area(va, root, head, false); 14318eb510dbSUladzislau Rezki (Sony) } 14328eb510dbSUladzislau Rezki (Sony) 14338eb510dbSUladzislau Rezki (Sony) static __always_inline struct vmap_area * 143496e2db45SUladzislau Rezki (Sony) merge_or_add_vmap_area_augment(struct vmap_area *va, 143596e2db45SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 143696e2db45SUladzislau Rezki (Sony) { 14378eb510dbSUladzislau Rezki (Sony) va = __merge_or_add_vmap_area(va, root, head, true); 143896e2db45SUladzislau Rezki (Sony) if (va) 14395dd78640SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 144096e2db45SUladzislau Rezki (Sony) 14413c5c3cfbSDaniel Axtens return va; 144268ad4a33SUladzislau Rezki (Sony) } 144368ad4a33SUladzislau Rezki (Sony) 144468ad4a33SUladzislau Rezki (Sony) static __always_inline bool 144568ad4a33SUladzislau Rezki (Sony) is_within_this_va(struct vmap_area *va, unsigned long size, 144668ad4a33SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 144768ad4a33SUladzislau Rezki (Sony) { 144868ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr; 144968ad4a33SUladzislau Rezki (Sony) 145068ad4a33SUladzislau Rezki (Sony) if (va->va_start > vstart) 145168ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align); 145268ad4a33SUladzislau Rezki (Sony) else 145368ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align); 145468ad4a33SUladzislau Rezki (Sony) 145568ad4a33SUladzislau Rezki (Sony) /* Can be overflowed due to big size or alignment. */ 145668ad4a33SUladzislau Rezki (Sony) if (nva_start_addr + size < nva_start_addr || 145768ad4a33SUladzislau Rezki (Sony) nva_start_addr < vstart) 145868ad4a33SUladzislau Rezki (Sony) return false; 145968ad4a33SUladzislau Rezki (Sony) 146068ad4a33SUladzislau Rezki (Sony) return (nva_start_addr + size <= va->va_end); 146168ad4a33SUladzislau Rezki (Sony) } 146268ad4a33SUladzislau Rezki (Sony) 146368ad4a33SUladzislau Rezki (Sony) /* 146468ad4a33SUladzislau Rezki (Sony) * Find the first free block(lowest start address) in the tree, 146568ad4a33SUladzislau Rezki (Sony) * that will accomplish the request corresponding to passing 14669333fe98SUladzislau Rezki * parameters. Please note, with an alignment bigger than PAGE_SIZE, 14679333fe98SUladzislau Rezki * a search length is adjusted to account for worst case alignment 14689333fe98SUladzislau Rezki * overhead. 146968ad4a33SUladzislau Rezki (Sony) */ 147068ad4a33SUladzislau Rezki (Sony) static __always_inline struct vmap_area * 1471f9863be4SUladzislau Rezki (Sony) find_vmap_lowest_match(struct rb_root *root, unsigned long size, 1472f9863be4SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart, bool adjust_search_size) 147368ad4a33SUladzislau Rezki (Sony) { 147468ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 147568ad4a33SUladzislau Rezki (Sony) struct rb_node *node; 14769333fe98SUladzislau Rezki unsigned long length; 147768ad4a33SUladzislau Rezki (Sony) 147868ad4a33SUladzislau Rezki (Sony) /* Start from the root. */ 1479f9863be4SUladzislau Rezki (Sony) node = root->rb_node; 148068ad4a33SUladzislau Rezki (Sony) 14819333fe98SUladzislau Rezki /* Adjust the search size for alignment overhead. */ 14829333fe98SUladzislau Rezki length = adjust_search_size ? size + align - 1 : size; 14839333fe98SUladzislau Rezki 148468ad4a33SUladzislau Rezki (Sony) while (node) { 148568ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 148668ad4a33SUladzislau Rezki (Sony) 14879333fe98SUladzislau Rezki if (get_subtree_max_size(node->rb_left) >= length && 148868ad4a33SUladzislau Rezki (Sony) vstart < va->va_start) { 148968ad4a33SUladzislau Rezki (Sony) node = node->rb_left; 149068ad4a33SUladzislau Rezki (Sony) } else { 149168ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart)) 149268ad4a33SUladzislau Rezki (Sony) return va; 149368ad4a33SUladzislau Rezki (Sony) 149468ad4a33SUladzislau Rezki (Sony) /* 149568ad4a33SUladzislau Rezki (Sony) * Does not make sense to go deeper towards the right 149668ad4a33SUladzislau Rezki (Sony) * sub-tree if it does not have a free block that is 14979333fe98SUladzislau Rezki * equal or bigger to the requested search length. 149868ad4a33SUladzislau Rezki (Sony) */ 14999333fe98SUladzislau Rezki if (get_subtree_max_size(node->rb_right) >= length) { 150068ad4a33SUladzislau Rezki (Sony) node = node->rb_right; 150168ad4a33SUladzislau Rezki (Sony) continue; 150268ad4a33SUladzislau Rezki (Sony) } 150368ad4a33SUladzislau Rezki (Sony) 150468ad4a33SUladzislau Rezki (Sony) /* 15053806b041SAndrew Morton * OK. We roll back and find the first right sub-tree, 150668ad4a33SUladzislau Rezki (Sony) * that will satisfy the search criteria. It can happen 15079f531973SUladzislau Rezki (Sony) * due to "vstart" restriction or an alignment overhead 15089f531973SUladzislau Rezki (Sony) * that is bigger then PAGE_SIZE. 150968ad4a33SUladzislau Rezki (Sony) */ 151068ad4a33SUladzislau Rezki (Sony) while ((node = rb_parent(node))) { 151168ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 151268ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart)) 151368ad4a33SUladzislau Rezki (Sony) return va; 151468ad4a33SUladzislau Rezki (Sony) 15159333fe98SUladzislau Rezki if (get_subtree_max_size(node->rb_right) >= length && 151668ad4a33SUladzislau Rezki (Sony) vstart <= va->va_start) { 15179f531973SUladzislau Rezki (Sony) /* 15189f531973SUladzislau Rezki (Sony) * Shift the vstart forward. Please note, we update it with 15199f531973SUladzislau Rezki (Sony) * parent's start address adding "1" because we do not want 15209f531973SUladzislau Rezki (Sony) * to enter same sub-tree after it has already been checked 15219f531973SUladzislau Rezki (Sony) * and no suitable free block found there. 15229f531973SUladzislau Rezki (Sony) */ 15239f531973SUladzislau Rezki (Sony) vstart = va->va_start + 1; 152468ad4a33SUladzislau Rezki (Sony) node = node->rb_right; 152568ad4a33SUladzislau Rezki (Sony) break; 152668ad4a33SUladzislau Rezki (Sony) } 152768ad4a33SUladzislau Rezki (Sony) } 152868ad4a33SUladzislau Rezki (Sony) } 152968ad4a33SUladzislau Rezki (Sony) } 153068ad4a33SUladzislau Rezki (Sony) 153168ad4a33SUladzislau Rezki (Sony) return NULL; 153268ad4a33SUladzislau Rezki (Sony) } 153368ad4a33SUladzislau Rezki (Sony) 1534a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1535a6cf4e0fSUladzislau Rezki (Sony) #include <linux/random.h> 1536a6cf4e0fSUladzislau Rezki (Sony) 1537a6cf4e0fSUladzislau Rezki (Sony) static struct vmap_area * 1538bd1264c3SSong Liu find_vmap_lowest_linear_match(struct list_head *head, unsigned long size, 1539a6cf4e0fSUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 1540a6cf4e0fSUladzislau Rezki (Sony) { 1541a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va; 1542a6cf4e0fSUladzislau Rezki (Sony) 1543bd1264c3SSong Liu list_for_each_entry(va, head, list) { 1544a6cf4e0fSUladzislau Rezki (Sony) if (!is_within_this_va(va, size, align, vstart)) 1545a6cf4e0fSUladzislau Rezki (Sony) continue; 1546a6cf4e0fSUladzislau Rezki (Sony) 1547a6cf4e0fSUladzislau Rezki (Sony) return va; 1548a6cf4e0fSUladzislau Rezki (Sony) } 1549a6cf4e0fSUladzislau Rezki (Sony) 1550a6cf4e0fSUladzislau Rezki (Sony) return NULL; 1551a6cf4e0fSUladzislau Rezki (Sony) } 1552a6cf4e0fSUladzislau Rezki (Sony) 1553a6cf4e0fSUladzislau Rezki (Sony) static void 1554bd1264c3SSong Liu find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head, 1555bd1264c3SSong Liu unsigned long size, unsigned long align) 1556a6cf4e0fSUladzislau Rezki (Sony) { 1557a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va_1, *va_2; 1558a6cf4e0fSUladzislau Rezki (Sony) unsigned long vstart; 1559a6cf4e0fSUladzislau Rezki (Sony) unsigned int rnd; 1560a6cf4e0fSUladzislau Rezki (Sony) 1561a6cf4e0fSUladzislau Rezki (Sony) get_random_bytes(&rnd, sizeof(rnd)); 1562a6cf4e0fSUladzislau Rezki (Sony) vstart = VMALLOC_START + rnd; 1563a6cf4e0fSUladzislau Rezki (Sony) 1564bd1264c3SSong Liu va_1 = find_vmap_lowest_match(root, size, align, vstart, false); 1565bd1264c3SSong Liu va_2 = find_vmap_lowest_linear_match(head, size, align, vstart); 1566a6cf4e0fSUladzislau Rezki (Sony) 1567a6cf4e0fSUladzislau Rezki (Sony) if (va_1 != va_2) 1568a6cf4e0fSUladzislau Rezki (Sony) pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", 1569a6cf4e0fSUladzislau Rezki (Sony) va_1, va_2, vstart); 1570a6cf4e0fSUladzislau Rezki (Sony) } 1571a6cf4e0fSUladzislau Rezki (Sony) #endif 1572a6cf4e0fSUladzislau Rezki (Sony) 157368ad4a33SUladzislau Rezki (Sony) enum fit_type { 157468ad4a33SUladzislau Rezki (Sony) NOTHING_FIT = 0, 157568ad4a33SUladzislau Rezki (Sony) FL_FIT_TYPE = 1, /* full fit */ 157668ad4a33SUladzislau Rezki (Sony) LE_FIT_TYPE = 2, /* left edge fit */ 157768ad4a33SUladzislau Rezki (Sony) RE_FIT_TYPE = 3, /* right edge fit */ 157868ad4a33SUladzislau Rezki (Sony) NE_FIT_TYPE = 4 /* no edge fit */ 157968ad4a33SUladzislau Rezki (Sony) }; 158068ad4a33SUladzislau Rezki (Sony) 158168ad4a33SUladzislau Rezki (Sony) static __always_inline enum fit_type 158268ad4a33SUladzislau Rezki (Sony) classify_va_fit_type(struct vmap_area *va, 158368ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr, unsigned long size) 158468ad4a33SUladzislau Rezki (Sony) { 158568ad4a33SUladzislau Rezki (Sony) enum fit_type type; 158668ad4a33SUladzislau Rezki (Sony) 158768ad4a33SUladzislau Rezki (Sony) /* Check if it is within VA. */ 158868ad4a33SUladzislau Rezki (Sony) if (nva_start_addr < va->va_start || 158968ad4a33SUladzislau Rezki (Sony) nva_start_addr + size > va->va_end) 159068ad4a33SUladzislau Rezki (Sony) return NOTHING_FIT; 159168ad4a33SUladzislau Rezki (Sony) 159268ad4a33SUladzislau Rezki (Sony) /* Now classify. */ 159368ad4a33SUladzislau Rezki (Sony) if (va->va_start == nva_start_addr) { 159468ad4a33SUladzislau Rezki (Sony) if (va->va_end == nva_start_addr + size) 159568ad4a33SUladzislau Rezki (Sony) type = FL_FIT_TYPE; 159668ad4a33SUladzislau Rezki (Sony) else 159768ad4a33SUladzislau Rezki (Sony) type = LE_FIT_TYPE; 159868ad4a33SUladzislau Rezki (Sony) } else if (va->va_end == nva_start_addr + size) { 159968ad4a33SUladzislau Rezki (Sony) type = RE_FIT_TYPE; 160068ad4a33SUladzislau Rezki (Sony) } else { 160168ad4a33SUladzislau Rezki (Sony) type = NE_FIT_TYPE; 160268ad4a33SUladzislau Rezki (Sony) } 160368ad4a33SUladzislau Rezki (Sony) 160468ad4a33SUladzislau Rezki (Sony) return type; 160568ad4a33SUladzislau Rezki (Sony) } 160668ad4a33SUladzislau Rezki (Sony) 160768ad4a33SUladzislau Rezki (Sony) static __always_inline int 16085b75b8e1SUladzislau Rezki (Sony) va_clip(struct rb_root *root, struct list_head *head, 1609f9863be4SUladzislau Rezki (Sony) struct vmap_area *va, unsigned long nva_start_addr, 1610f9863be4SUladzislau Rezki (Sony) unsigned long size) 161168ad4a33SUladzislau Rezki (Sony) { 16122c929233SArnd Bergmann struct vmap_area *lva = NULL; 16131b23ff80SBaoquan He enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); 161468ad4a33SUladzislau Rezki (Sony) 161568ad4a33SUladzislau Rezki (Sony) if (type == FL_FIT_TYPE) { 161668ad4a33SUladzislau Rezki (Sony) /* 161768ad4a33SUladzislau Rezki (Sony) * No need to split VA, it fully fits. 161868ad4a33SUladzislau Rezki (Sony) * 161968ad4a33SUladzislau Rezki (Sony) * | | 162068ad4a33SUladzislau Rezki (Sony) * V NVA V 162168ad4a33SUladzislau Rezki (Sony) * |---------------| 162268ad4a33SUladzislau Rezki (Sony) */ 1623f9863be4SUladzislau Rezki (Sony) unlink_va_augment(va, root); 162468ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 162568ad4a33SUladzislau Rezki (Sony) } else if (type == LE_FIT_TYPE) { 162668ad4a33SUladzislau Rezki (Sony) /* 162768ad4a33SUladzislau Rezki (Sony) * Split left edge of fit VA. 162868ad4a33SUladzislau Rezki (Sony) * 162968ad4a33SUladzislau Rezki (Sony) * | | 163068ad4a33SUladzislau Rezki (Sony) * V NVA V R 163168ad4a33SUladzislau Rezki (Sony) * |-------|-------| 163268ad4a33SUladzislau Rezki (Sony) */ 163368ad4a33SUladzislau Rezki (Sony) va->va_start += size; 163468ad4a33SUladzislau Rezki (Sony) } else if (type == RE_FIT_TYPE) { 163568ad4a33SUladzislau Rezki (Sony) /* 163668ad4a33SUladzislau Rezki (Sony) * Split right edge of fit VA. 163768ad4a33SUladzislau Rezki (Sony) * 163868ad4a33SUladzislau Rezki (Sony) * | | 163968ad4a33SUladzislau Rezki (Sony) * L V NVA V 164068ad4a33SUladzislau Rezki (Sony) * |-------|-------| 164168ad4a33SUladzislau Rezki (Sony) */ 164268ad4a33SUladzislau Rezki (Sony) va->va_end = nva_start_addr; 164368ad4a33SUladzislau Rezki (Sony) } else if (type == NE_FIT_TYPE) { 164468ad4a33SUladzislau Rezki (Sony) /* 164568ad4a33SUladzislau Rezki (Sony) * Split no edge of fit VA. 164668ad4a33SUladzislau Rezki (Sony) * 164768ad4a33SUladzislau Rezki (Sony) * | | 164868ad4a33SUladzislau Rezki (Sony) * L V NVA V R 164968ad4a33SUladzislau Rezki (Sony) * |---|-------|---| 165068ad4a33SUladzislau Rezki (Sony) */ 165182dd23e8SUladzislau Rezki (Sony) lva = __this_cpu_xchg(ne_fit_preload_node, NULL); 165282dd23e8SUladzislau Rezki (Sony) if (unlikely(!lva)) { 165382dd23e8SUladzislau Rezki (Sony) /* 165482dd23e8SUladzislau Rezki (Sony) * For percpu allocator we do not do any pre-allocation 165582dd23e8SUladzislau Rezki (Sony) * and leave it as it is. The reason is it most likely 165682dd23e8SUladzislau Rezki (Sony) * never ends up with NE_FIT_TYPE splitting. In case of 165782dd23e8SUladzislau Rezki (Sony) * percpu allocations offsets and sizes are aligned to 165882dd23e8SUladzislau Rezki (Sony) * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE 165982dd23e8SUladzislau Rezki (Sony) * are its main fitting cases. 166082dd23e8SUladzislau Rezki (Sony) * 166182dd23e8SUladzislau Rezki (Sony) * There are a few exceptions though, as an example it is 166282dd23e8SUladzislau Rezki (Sony) * a first allocation (early boot up) when we have "one" 166382dd23e8SUladzislau Rezki (Sony) * big free space that has to be split. 1664060650a2SUladzislau Rezki (Sony) * 1665060650a2SUladzislau Rezki (Sony) * Also we can hit this path in case of regular "vmap" 1666060650a2SUladzislau Rezki (Sony) * allocations, if "this" current CPU was not preloaded. 1667060650a2SUladzislau Rezki (Sony) * See the comment in alloc_vmap_area() why. If so, then 1668060650a2SUladzislau Rezki (Sony) * GFP_NOWAIT is used instead to get an extra object for 1669060650a2SUladzislau Rezki (Sony) * split purpose. That is rare and most time does not 1670060650a2SUladzislau Rezki (Sony) * occur. 1671060650a2SUladzislau Rezki (Sony) * 1672060650a2SUladzislau Rezki (Sony) * What happens if an allocation gets failed. Basically, 1673060650a2SUladzislau Rezki (Sony) * an "overflow" path is triggered to purge lazily freed 1674060650a2SUladzislau Rezki (Sony) * areas to free some memory, then, the "retry" path is 1675060650a2SUladzislau Rezki (Sony) * triggered to repeat one more time. See more details 1676060650a2SUladzislau Rezki (Sony) * in alloc_vmap_area() function. 167782dd23e8SUladzislau Rezki (Sony) */ 167868ad4a33SUladzislau Rezki (Sony) lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); 167982dd23e8SUladzislau Rezki (Sony) if (!lva) 168068ad4a33SUladzislau Rezki (Sony) return -1; 168182dd23e8SUladzislau Rezki (Sony) } 168268ad4a33SUladzislau Rezki (Sony) 168368ad4a33SUladzislau Rezki (Sony) /* 168468ad4a33SUladzislau Rezki (Sony) * Build the remainder. 168568ad4a33SUladzislau Rezki (Sony) */ 168668ad4a33SUladzislau Rezki (Sony) lva->va_start = va->va_start; 168768ad4a33SUladzislau Rezki (Sony) lva->va_end = nva_start_addr; 168868ad4a33SUladzislau Rezki (Sony) 168968ad4a33SUladzislau Rezki (Sony) /* 169068ad4a33SUladzislau Rezki (Sony) * Shrink this VA to remaining size. 169168ad4a33SUladzislau Rezki (Sony) */ 169268ad4a33SUladzislau Rezki (Sony) va->va_start = nva_start_addr + size; 169368ad4a33SUladzislau Rezki (Sony) } else { 169468ad4a33SUladzislau Rezki (Sony) return -1; 169568ad4a33SUladzislau Rezki (Sony) } 169668ad4a33SUladzislau Rezki (Sony) 169768ad4a33SUladzislau Rezki (Sony) if (type != FL_FIT_TYPE) { 169868ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 169968ad4a33SUladzislau Rezki (Sony) 17002c929233SArnd Bergmann if (lva) /* type == NE_FIT_TYPE */ 1701f9863be4SUladzislau Rezki (Sony) insert_vmap_area_augment(lva, &va->rb_node, root, head); 170268ad4a33SUladzislau Rezki (Sony) } 170368ad4a33SUladzislau Rezki (Sony) 170468ad4a33SUladzislau Rezki (Sony) return 0; 170568ad4a33SUladzislau Rezki (Sony) } 170668ad4a33SUladzislau Rezki (Sony) 170738f6b9afSUladzislau Rezki (Sony) static unsigned long 170838f6b9afSUladzislau Rezki (Sony) va_alloc(struct vmap_area *va, 170938f6b9afSUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head, 171038f6b9afSUladzislau Rezki (Sony) unsigned long size, unsigned long align, 171138f6b9afSUladzislau Rezki (Sony) unsigned long vstart, unsigned long vend) 171238f6b9afSUladzislau Rezki (Sony) { 171338f6b9afSUladzislau Rezki (Sony) unsigned long nva_start_addr; 171438f6b9afSUladzislau Rezki (Sony) int ret; 171538f6b9afSUladzislau Rezki (Sony) 171638f6b9afSUladzislau Rezki (Sony) if (va->va_start > vstart) 171738f6b9afSUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align); 171838f6b9afSUladzislau Rezki (Sony) else 171938f6b9afSUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align); 172038f6b9afSUladzislau Rezki (Sony) 172138f6b9afSUladzislau Rezki (Sony) /* Check the "vend" restriction. */ 172238f6b9afSUladzislau Rezki (Sony) if (nva_start_addr + size > vend) 172338f6b9afSUladzislau Rezki (Sony) return vend; 172438f6b9afSUladzislau Rezki (Sony) 172538f6b9afSUladzislau Rezki (Sony) /* Update the free vmap_area. */ 17265b75b8e1SUladzislau Rezki (Sony) ret = va_clip(root, head, va, nva_start_addr, size); 172738f6b9afSUladzislau Rezki (Sony) if (WARN_ON_ONCE(ret)) 172838f6b9afSUladzislau Rezki (Sony) return vend; 172938f6b9afSUladzislau Rezki (Sony) 173038f6b9afSUladzislau Rezki (Sony) return nva_start_addr; 173138f6b9afSUladzislau Rezki (Sony) } 173238f6b9afSUladzislau Rezki (Sony) 173368ad4a33SUladzislau Rezki (Sony) /* 173468ad4a33SUladzislau Rezki (Sony) * Returns a start address of the newly allocated area, if success. 173568ad4a33SUladzislau Rezki (Sony) * Otherwise a vend is returned that indicates failure. 173668ad4a33SUladzislau Rezki (Sony) */ 173768ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 1738f9863be4SUladzislau Rezki (Sony) __alloc_vmap_area(struct rb_root *root, struct list_head *head, 1739f9863be4SUladzislau Rezki (Sony) unsigned long size, unsigned long align, 1740cacca6baSUladzislau Rezki (Sony) unsigned long vstart, unsigned long vend) 174168ad4a33SUladzislau Rezki (Sony) { 17429333fe98SUladzislau Rezki bool adjust_search_size = true; 174368ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr; 174468ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 174568ad4a33SUladzislau Rezki (Sony) 17469333fe98SUladzislau Rezki /* 17479333fe98SUladzislau Rezki * Do not adjust when: 17489333fe98SUladzislau Rezki * a) align <= PAGE_SIZE, because it does not make any sense. 17499333fe98SUladzislau Rezki * All blocks(their start addresses) are at least PAGE_SIZE 17509333fe98SUladzislau Rezki * aligned anyway; 17519333fe98SUladzislau Rezki * b) a short range where a requested size corresponds to exactly 17529333fe98SUladzislau Rezki * specified [vstart:vend] interval and an alignment > PAGE_SIZE. 17539333fe98SUladzislau Rezki * With adjusted search length an allocation would not succeed. 17549333fe98SUladzislau Rezki */ 17559333fe98SUladzislau Rezki if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size)) 17569333fe98SUladzislau Rezki adjust_search_size = false; 17579333fe98SUladzislau Rezki 1758f9863be4SUladzislau Rezki (Sony) va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); 175968ad4a33SUladzislau Rezki (Sony) if (unlikely(!va)) 176068ad4a33SUladzislau Rezki (Sony) return vend; 176168ad4a33SUladzislau Rezki (Sony) 176238f6b9afSUladzislau Rezki (Sony) nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); 176338f6b9afSUladzislau Rezki (Sony) if (nva_start_addr == vend) 176468ad4a33SUladzislau Rezki (Sony) return vend; 176568ad4a33SUladzislau Rezki (Sony) 1766a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1767bd1264c3SSong Liu find_vmap_lowest_match_check(root, head, size, align); 1768a6cf4e0fSUladzislau Rezki (Sony) #endif 1769a6cf4e0fSUladzislau Rezki (Sony) 177068ad4a33SUladzislau Rezki (Sony) return nva_start_addr; 177168ad4a33SUladzislau Rezki (Sony) } 17724da56b99SChris Wilson 1773db64fe02SNick Piggin /* 1774d98c9e83SAndrey Ryabinin * Free a region of KVA allocated by alloc_vmap_area 1775d98c9e83SAndrey Ryabinin */ 1776d98c9e83SAndrey Ryabinin static void free_vmap_area(struct vmap_area *va) 1777d98c9e83SAndrey Ryabinin { 1778d0936029SUladzislau Rezki (Sony) struct vmap_node *vn = addr_to_node(va->va_start); 1779d0936029SUladzislau Rezki (Sony) 1780d98c9e83SAndrey Ryabinin /* 1781d98c9e83SAndrey Ryabinin * Remove from the busy tree/list. 1782d98c9e83SAndrey Ryabinin */ 1783d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 1784d0936029SUladzislau Rezki (Sony) unlink_va(va, &vn->busy.root); 1785d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 1786d98c9e83SAndrey Ryabinin 1787d98c9e83SAndrey Ryabinin /* 1788d98c9e83SAndrey Ryabinin * Insert/Merge it back to the free tree/list. 1789d98c9e83SAndrey Ryabinin */ 1790d98c9e83SAndrey Ryabinin spin_lock(&free_vmap_area_lock); 179196e2db45SUladzislau Rezki (Sony) merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); 1792d98c9e83SAndrey Ryabinin spin_unlock(&free_vmap_area_lock); 1793d98c9e83SAndrey Ryabinin } 1794d98c9e83SAndrey Ryabinin 1795187f8cc4SUladzislau Rezki (Sony) static inline void 1796187f8cc4SUladzislau Rezki (Sony) preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node) 1797187f8cc4SUladzislau Rezki (Sony) { 1798187f8cc4SUladzislau Rezki (Sony) struct vmap_area *va = NULL; 1799187f8cc4SUladzislau Rezki (Sony) 1800187f8cc4SUladzislau Rezki (Sony) /* 1801187f8cc4SUladzislau Rezki (Sony) * Preload this CPU with one extra vmap_area object. It is used 1802187f8cc4SUladzislau Rezki (Sony) * when fit type of free area is NE_FIT_TYPE. It guarantees that 1803187f8cc4SUladzislau Rezki (Sony) * a CPU that does an allocation is preloaded. 1804187f8cc4SUladzislau Rezki (Sony) * 1805187f8cc4SUladzislau Rezki (Sony) * We do it in non-atomic context, thus it allows us to use more 1806187f8cc4SUladzislau Rezki (Sony) * permissive allocation masks to be more stable under low memory 1807187f8cc4SUladzislau Rezki (Sony) * condition and high memory pressure. 1808187f8cc4SUladzislau Rezki (Sony) */ 1809187f8cc4SUladzislau Rezki (Sony) if (!this_cpu_read(ne_fit_preload_node)) 1810187f8cc4SUladzislau Rezki (Sony) va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1811187f8cc4SUladzislau Rezki (Sony) 1812187f8cc4SUladzislau Rezki (Sony) spin_lock(lock); 1813187f8cc4SUladzislau Rezki (Sony) 1814187f8cc4SUladzislau Rezki (Sony) if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va)) 1815187f8cc4SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 1816187f8cc4SUladzislau Rezki (Sony) } 1817187f8cc4SUladzislau Rezki (Sony) 181872210662SUladzislau Rezki (Sony) static struct vmap_pool * 181972210662SUladzislau Rezki (Sony) size_to_va_pool(struct vmap_node *vn, unsigned long size) 182072210662SUladzislau Rezki (Sony) { 182172210662SUladzislau Rezki (Sony) unsigned int idx = (size - 1) / PAGE_SIZE; 182272210662SUladzislau Rezki (Sony) 182372210662SUladzislau Rezki (Sony) if (idx < MAX_VA_SIZE_PAGES) 182472210662SUladzislau Rezki (Sony) return &vn->pool[idx]; 182572210662SUladzislau Rezki (Sony) 182672210662SUladzislau Rezki (Sony) return NULL; 182772210662SUladzislau Rezki (Sony) } 182872210662SUladzislau Rezki (Sony) 182972210662SUladzislau Rezki (Sony) static bool 183072210662SUladzislau Rezki (Sony) node_pool_add_va(struct vmap_node *n, struct vmap_area *va) 183172210662SUladzislau Rezki (Sony) { 183272210662SUladzislau Rezki (Sony) struct vmap_pool *vp; 183372210662SUladzislau Rezki (Sony) 183472210662SUladzislau Rezki (Sony) vp = size_to_va_pool(n, va_size(va)); 183572210662SUladzislau Rezki (Sony) if (!vp) 183672210662SUladzislau Rezki (Sony) return false; 183772210662SUladzislau Rezki (Sony) 183872210662SUladzislau Rezki (Sony) spin_lock(&n->pool_lock); 183972210662SUladzislau Rezki (Sony) list_add(&va->list, &vp->head); 184072210662SUladzislau Rezki (Sony) WRITE_ONCE(vp->len, vp->len + 1); 184172210662SUladzislau Rezki (Sony) spin_unlock(&n->pool_lock); 184272210662SUladzislau Rezki (Sony) 184372210662SUladzislau Rezki (Sony) return true; 184472210662SUladzislau Rezki (Sony) } 184572210662SUladzislau Rezki (Sony) 184672210662SUladzislau Rezki (Sony) static struct vmap_area * 184772210662SUladzislau Rezki (Sony) node_pool_del_va(struct vmap_node *vn, unsigned long size, 184872210662SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart, 184972210662SUladzislau Rezki (Sony) unsigned long vend) 185072210662SUladzislau Rezki (Sony) { 185172210662SUladzislau Rezki (Sony) struct vmap_area *va = NULL; 185272210662SUladzislau Rezki (Sony) struct vmap_pool *vp; 185372210662SUladzislau Rezki (Sony) int err = 0; 185472210662SUladzislau Rezki (Sony) 185572210662SUladzislau Rezki (Sony) vp = size_to_va_pool(vn, size); 185672210662SUladzislau Rezki (Sony) if (!vp || list_empty(&vp->head)) 185772210662SUladzislau Rezki (Sony) return NULL; 185872210662SUladzislau Rezki (Sony) 185972210662SUladzislau Rezki (Sony) spin_lock(&vn->pool_lock); 186072210662SUladzislau Rezki (Sony) if (!list_empty(&vp->head)) { 186172210662SUladzislau Rezki (Sony) va = list_first_entry(&vp->head, struct vmap_area, list); 186272210662SUladzislau Rezki (Sony) 186372210662SUladzislau Rezki (Sony) if (IS_ALIGNED(va->va_start, align)) { 186472210662SUladzislau Rezki (Sony) /* 186572210662SUladzislau Rezki (Sony) * Do some sanity check and emit a warning 186672210662SUladzislau Rezki (Sony) * if one of below checks detects an error. 186772210662SUladzislau Rezki (Sony) */ 186872210662SUladzislau Rezki (Sony) err |= (va_size(va) != size); 186972210662SUladzislau Rezki (Sony) err |= (va->va_start < vstart); 187072210662SUladzislau Rezki (Sony) err |= (va->va_end > vend); 187172210662SUladzislau Rezki (Sony) 187272210662SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(err)) { 187372210662SUladzislau Rezki (Sony) list_del_init(&va->list); 187472210662SUladzislau Rezki (Sony) WRITE_ONCE(vp->len, vp->len - 1); 187572210662SUladzislau Rezki (Sony) } else { 187672210662SUladzislau Rezki (Sony) va = NULL; 187772210662SUladzislau Rezki (Sony) } 187872210662SUladzislau Rezki (Sony) } else { 187972210662SUladzislau Rezki (Sony) list_move_tail(&va->list, &vp->head); 188072210662SUladzislau Rezki (Sony) va = NULL; 188172210662SUladzislau Rezki (Sony) } 188272210662SUladzislau Rezki (Sony) } 188372210662SUladzislau Rezki (Sony) spin_unlock(&vn->pool_lock); 188472210662SUladzislau Rezki (Sony) 188572210662SUladzislau Rezki (Sony) return va; 188672210662SUladzislau Rezki (Sony) } 188772210662SUladzislau Rezki (Sony) 188872210662SUladzislau Rezki (Sony) static struct vmap_area * 188972210662SUladzislau Rezki (Sony) node_alloc(unsigned long size, unsigned long align, 189072210662SUladzislau Rezki (Sony) unsigned long vstart, unsigned long vend, 189172210662SUladzislau Rezki (Sony) unsigned long *addr, unsigned int *vn_id) 189272210662SUladzislau Rezki (Sony) { 189372210662SUladzislau Rezki (Sony) struct vmap_area *va; 189472210662SUladzislau Rezki (Sony) 189572210662SUladzislau Rezki (Sony) *vn_id = 0; 189672210662SUladzislau Rezki (Sony) *addr = vend; 189772210662SUladzislau Rezki (Sony) 189872210662SUladzislau Rezki (Sony) /* 189972210662SUladzislau Rezki (Sony) * Fallback to a global heap if not vmalloc or there 190072210662SUladzislau Rezki (Sony) * is only one node. 190172210662SUladzislau Rezki (Sony) */ 190272210662SUladzislau Rezki (Sony) if (vstart != VMALLOC_START || vend != VMALLOC_END || 190372210662SUladzislau Rezki (Sony) nr_vmap_nodes == 1) 190472210662SUladzislau Rezki (Sony) return NULL; 190572210662SUladzislau Rezki (Sony) 190672210662SUladzislau Rezki (Sony) *vn_id = raw_smp_processor_id() % nr_vmap_nodes; 190772210662SUladzislau Rezki (Sony) va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend); 190872210662SUladzislau Rezki (Sony) *vn_id = encode_vn_id(*vn_id); 190972210662SUladzislau Rezki (Sony) 191072210662SUladzislau Rezki (Sony) if (va) 191172210662SUladzislau Rezki (Sony) *addr = va->va_start; 191272210662SUladzislau Rezki (Sony) 191372210662SUladzislau Rezki (Sony) return va; 191472210662SUladzislau Rezki (Sony) } 191572210662SUladzislau Rezki (Sony) 1916d98c9e83SAndrey Ryabinin /* 1917db64fe02SNick Piggin * Allocate a region of KVA of the specified size and alignment, within the 1918db64fe02SNick Piggin * vstart and vend. 1919db64fe02SNick Piggin */ 1920db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size, 1921db64fe02SNick Piggin unsigned long align, 1922db64fe02SNick Piggin unsigned long vstart, unsigned long vend, 1923869176a0SBaoquan He int node, gfp_t gfp_mask, 1924869176a0SBaoquan He unsigned long va_flags) 1925db64fe02SNick Piggin { 1926d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 1927187f8cc4SUladzislau Rezki (Sony) struct vmap_area *va; 192812e376a6SUladzislau Rezki (Sony) unsigned long freed; 19291da177e4SLinus Torvalds unsigned long addr; 193072210662SUladzislau Rezki (Sony) unsigned int vn_id; 1931db64fe02SNick Piggin int purged = 0; 1932d98c9e83SAndrey Ryabinin int ret; 1933db64fe02SNick Piggin 19347e4a32c0SHyunmin Lee if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align))) 19357e4a32c0SHyunmin Lee return ERR_PTR(-EINVAL); 1936db64fe02SNick Piggin 193768ad4a33SUladzislau Rezki (Sony) if (unlikely(!vmap_initialized)) 193868ad4a33SUladzislau Rezki (Sony) return ERR_PTR(-EBUSY); 193968ad4a33SUladzislau Rezki (Sony) 19405803ed29SChristoph Hellwig might_sleep(); 194172210662SUladzislau Rezki (Sony) 194272210662SUladzislau Rezki (Sony) /* 194372210662SUladzislau Rezki (Sony) * If a VA is obtained from a global heap(if it fails here) 194472210662SUladzislau Rezki (Sony) * it is anyway marked with this "vn_id" so it is returned 194572210662SUladzislau Rezki (Sony) * to this pool's node later. Such way gives a possibility 194672210662SUladzislau Rezki (Sony) * to populate pools based on users demand. 194772210662SUladzislau Rezki (Sony) * 194872210662SUladzislau Rezki (Sony) * On success a ready to go VA is returned. 194972210662SUladzislau Rezki (Sony) */ 195072210662SUladzislau Rezki (Sony) va = node_alloc(size, align, vstart, vend, &addr, &vn_id); 195172210662SUladzislau Rezki (Sony) if (!va) { 1952f07116d7SUladzislau Rezki (Sony) gfp_mask = gfp_mask & GFP_RECLAIM_MASK; 19534da56b99SChris Wilson 1954f07116d7SUladzislau Rezki (Sony) va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1955db64fe02SNick Piggin if (unlikely(!va)) 1956db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 1957db64fe02SNick Piggin 19587f88f88fSCatalin Marinas /* 19597f88f88fSCatalin Marinas * Only scan the relevant parts containing pointers to other objects 19607f88f88fSCatalin Marinas * to avoid false negatives. 19617f88f88fSCatalin Marinas */ 1962f07116d7SUladzislau Rezki (Sony) kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); 196396aa8437SUladzislau Rezki (Sony) } 19647f88f88fSCatalin Marinas 1965db64fe02SNick Piggin retry: 196672210662SUladzislau Rezki (Sony) if (addr == vend) { 1967187f8cc4SUladzislau Rezki (Sony) preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); 1968f9863be4SUladzislau Rezki (Sony) addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, 1969f9863be4SUladzislau Rezki (Sony) size, align, vstart, vend); 1970187f8cc4SUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 197172210662SUladzislau Rezki (Sony) } 197268ad4a33SUladzislau Rezki (Sony) 1973cf243da6SUladzislau Rezki (Sony) trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend); 1974cf243da6SUladzislau Rezki (Sony) 197589699605SNick Piggin /* 197668ad4a33SUladzislau Rezki (Sony) * If an allocation fails, the "vend" address is 197768ad4a33SUladzislau Rezki (Sony) * returned. Therefore trigger the overflow path. 197889699605SNick Piggin */ 197968ad4a33SUladzislau Rezki (Sony) if (unlikely(addr == vend)) 198089699605SNick Piggin goto overflow; 198189699605SNick Piggin 198289699605SNick Piggin va->va_start = addr; 198389699605SNick Piggin va->va_end = addr + size; 1984688fcbfcSPengfei Li va->vm = NULL; 198572210662SUladzislau Rezki (Sony) va->flags = (va_flags | vn_id); 198668ad4a33SUladzislau Rezki (Sony) 1987d0936029SUladzislau Rezki (Sony) vn = addr_to_node(va->va_start); 1988d0936029SUladzislau Rezki (Sony) 1989d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 1990d0936029SUladzislau Rezki (Sony) insert_vmap_area(va, &vn->busy.root, &vn->busy.head); 1991d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 199289699605SNick Piggin 199361e16557SWang Xiaoqiang BUG_ON(!IS_ALIGNED(va->va_start, align)); 199489699605SNick Piggin BUG_ON(va->va_start < vstart); 199589699605SNick Piggin BUG_ON(va->va_end > vend); 199689699605SNick Piggin 1997d98c9e83SAndrey Ryabinin ret = kasan_populate_vmalloc(addr, size); 1998d98c9e83SAndrey Ryabinin if (ret) { 1999d98c9e83SAndrey Ryabinin free_vmap_area(va); 2000d98c9e83SAndrey Ryabinin return ERR_PTR(ret); 2001d98c9e83SAndrey Ryabinin } 2002d98c9e83SAndrey Ryabinin 200389699605SNick Piggin return va; 200489699605SNick Piggin 20057766970cSNick Piggin overflow: 2006db64fe02SNick Piggin if (!purged) { 200777e50af0SThomas Gleixner reclaim_and_purge_vmap_areas(); 2008db64fe02SNick Piggin purged = 1; 2009db64fe02SNick Piggin goto retry; 2010db64fe02SNick Piggin } 20114da56b99SChris Wilson 201212e376a6SUladzislau Rezki (Sony) freed = 0; 20134da56b99SChris Wilson blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 201412e376a6SUladzislau Rezki (Sony) 20154da56b99SChris Wilson if (freed > 0) { 20164da56b99SChris Wilson purged = 0; 20174da56b99SChris Wilson goto retry; 20184da56b99SChris Wilson } 20194da56b99SChris Wilson 202003497d76SFlorian Fainelli if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) 2021756a025fSJoe Perches pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", 2022756a025fSJoe Perches size); 202368ad4a33SUladzislau Rezki (Sony) 202468ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 2025db64fe02SNick Piggin return ERR_PTR(-EBUSY); 2026db64fe02SNick Piggin } 2027db64fe02SNick Piggin 20284da56b99SChris Wilson int register_vmap_purge_notifier(struct notifier_block *nb) 20294da56b99SChris Wilson { 20304da56b99SChris Wilson return blocking_notifier_chain_register(&vmap_notify_list, nb); 20314da56b99SChris Wilson } 20324da56b99SChris Wilson EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 20334da56b99SChris Wilson 20344da56b99SChris Wilson int unregister_vmap_purge_notifier(struct notifier_block *nb) 20354da56b99SChris Wilson { 20364da56b99SChris Wilson return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 20374da56b99SChris Wilson } 20384da56b99SChris Wilson EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 20394da56b99SChris Wilson 2040db64fe02SNick Piggin /* 2041db64fe02SNick Piggin * lazy_max_pages is the maximum amount of virtual address space we gather up 2042db64fe02SNick Piggin * before attempting to purge with a TLB flush. 2043db64fe02SNick Piggin * 2044db64fe02SNick Piggin * There is a tradeoff here: a larger number will cover more kernel page tables 2045db64fe02SNick Piggin * and take slightly longer to purge, but it will linearly reduce the number of 2046db64fe02SNick Piggin * global TLB flushes that must be performed. It would seem natural to scale 2047db64fe02SNick Piggin * this number up linearly with the number of CPUs (because vmapping activity 2048db64fe02SNick Piggin * could also scale linearly with the number of CPUs), however it is likely 2049db64fe02SNick Piggin * that in practice, workloads might be constrained in other ways that mean 2050db64fe02SNick Piggin * vmap activity will not scale linearly with CPUs. Also, I want to be 2051db64fe02SNick Piggin * conservative and not introduce a big latency on huge systems, so go with 2052db64fe02SNick Piggin * a less aggressive log scale. It will still be an improvement over the old 2053db64fe02SNick Piggin * code, and it will be simple to change the scale factor if we find that it 2054db64fe02SNick Piggin * becomes a problem on bigger systems. 2055db64fe02SNick Piggin */ 2056db64fe02SNick Piggin static unsigned long lazy_max_pages(void) 2057db64fe02SNick Piggin { 2058db64fe02SNick Piggin unsigned int log; 2059db64fe02SNick Piggin 2060db64fe02SNick Piggin log = fls(num_online_cpus()); 2061db64fe02SNick Piggin 2062db64fe02SNick Piggin return log * (32UL * 1024 * 1024 / PAGE_SIZE); 2063db64fe02SNick Piggin } 2064db64fe02SNick Piggin 20654d36e6f8SUladzislau Rezki (Sony) static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); 2066db64fe02SNick Piggin 20670574ecd1SChristoph Hellwig /* 2068f0953a1bSIngo Molnar * Serialize vmap purging. There is no actual critical section protected 2069153090f2SBaoquan He * by this lock, but we want to avoid concurrent calls for performance 20700574ecd1SChristoph Hellwig * reasons and to make the pcpu_get_vm_areas more deterministic. 20710574ecd1SChristoph Hellwig */ 2072f9e09977SChristoph Hellwig static DEFINE_MUTEX(vmap_purge_lock); 20730574ecd1SChristoph Hellwig 207402b709dfSNick Piggin /* for per-CPU blocks */ 207502b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void); 2076282631cbSUladzislau Rezki (Sony) static cpumask_t purge_nodes; 207702b709dfSNick Piggin 207872210662SUladzislau Rezki (Sony) static void 207972210662SUladzislau Rezki (Sony) reclaim_list_global(struct list_head *head) 2080db64fe02SNick Piggin { 208172210662SUladzislau Rezki (Sony) struct vmap_area *va, *n; 2082db64fe02SNick Piggin 208372210662SUladzislau Rezki (Sony) if (list_empty(head)) 208472210662SUladzislau Rezki (Sony) return; 2085db64fe02SNick Piggin 2086e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock); 208772210662SUladzislau Rezki (Sony) list_for_each_entry_safe(va, n, head, list) 208872210662SUladzislau Rezki (Sony) merge_or_add_vmap_area_augment(va, 208972210662SUladzislau Rezki (Sony) &free_vmap_area_root, &free_vmap_area_list); 209072210662SUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 209172210662SUladzislau Rezki (Sony) } 209272210662SUladzislau Rezki (Sony) 209372210662SUladzislau Rezki (Sony) static void 209472210662SUladzislau Rezki (Sony) decay_va_pool_node(struct vmap_node *vn, bool full_decay) 209572210662SUladzislau Rezki (Sony) { 209672210662SUladzislau Rezki (Sony) struct vmap_area *va, *nva; 209772210662SUladzislau Rezki (Sony) struct list_head decay_list; 209872210662SUladzislau Rezki (Sony) struct rb_root decay_root; 209972210662SUladzislau Rezki (Sony) unsigned long n_decay; 210072210662SUladzislau Rezki (Sony) int i; 210172210662SUladzislau Rezki (Sony) 210272210662SUladzislau Rezki (Sony) decay_root = RB_ROOT; 210372210662SUladzislau Rezki (Sony) INIT_LIST_HEAD(&decay_list); 210472210662SUladzislau Rezki (Sony) 210572210662SUladzislau Rezki (Sony) for (i = 0; i < MAX_VA_SIZE_PAGES; i++) { 210672210662SUladzislau Rezki (Sony) struct list_head tmp_list; 210772210662SUladzislau Rezki (Sony) 210872210662SUladzislau Rezki (Sony) if (list_empty(&vn->pool[i].head)) 210972210662SUladzislau Rezki (Sony) continue; 211072210662SUladzislau Rezki (Sony) 211172210662SUladzislau Rezki (Sony) INIT_LIST_HEAD(&tmp_list); 211272210662SUladzislau Rezki (Sony) 211372210662SUladzislau Rezki (Sony) /* Detach the pool, so no-one can access it. */ 211472210662SUladzislau Rezki (Sony) spin_lock(&vn->pool_lock); 211572210662SUladzislau Rezki (Sony) list_replace_init(&vn->pool[i].head, &tmp_list); 211672210662SUladzislau Rezki (Sony) spin_unlock(&vn->pool_lock); 211772210662SUladzislau Rezki (Sony) 211872210662SUladzislau Rezki (Sony) if (full_decay) 211972210662SUladzislau Rezki (Sony) WRITE_ONCE(vn->pool[i].len, 0); 212072210662SUladzislau Rezki (Sony) 212172210662SUladzislau Rezki (Sony) /* Decay a pool by ~25% out of left objects. */ 212272210662SUladzislau Rezki (Sony) n_decay = vn->pool[i].len >> 2; 212372210662SUladzislau Rezki (Sony) 212472210662SUladzislau Rezki (Sony) list_for_each_entry_safe(va, nva, &tmp_list, list) { 212572210662SUladzislau Rezki (Sony) list_del_init(&va->list); 212672210662SUladzislau Rezki (Sony) merge_or_add_vmap_area(va, &decay_root, &decay_list); 212772210662SUladzislau Rezki (Sony) 212872210662SUladzislau Rezki (Sony) if (!full_decay) { 212972210662SUladzislau Rezki (Sony) WRITE_ONCE(vn->pool[i].len, vn->pool[i].len - 1); 213072210662SUladzislau Rezki (Sony) 213172210662SUladzislau Rezki (Sony) if (!--n_decay) 213272210662SUladzislau Rezki (Sony) break; 213372210662SUladzislau Rezki (Sony) } 213472210662SUladzislau Rezki (Sony) } 213572210662SUladzislau Rezki (Sony) 213615e02a39SUladzislau Rezki (Sony) /* 213715e02a39SUladzislau Rezki (Sony) * Attach the pool back if it has been partly decayed. 213815e02a39SUladzislau Rezki (Sony) * Please note, it is supposed that nobody(other contexts) 213915e02a39SUladzislau Rezki (Sony) * can populate the pool therefore a simple list replace 214015e02a39SUladzislau Rezki (Sony) * operation takes place here. 214115e02a39SUladzislau Rezki (Sony) */ 214272210662SUladzislau Rezki (Sony) if (!full_decay && !list_empty(&tmp_list)) { 214372210662SUladzislau Rezki (Sony) spin_lock(&vn->pool_lock); 214472210662SUladzislau Rezki (Sony) list_replace_init(&tmp_list, &vn->pool[i].head); 214572210662SUladzislau Rezki (Sony) spin_unlock(&vn->pool_lock); 214672210662SUladzislau Rezki (Sony) } 214772210662SUladzislau Rezki (Sony) } 214872210662SUladzislau Rezki (Sony) 214972210662SUladzislau Rezki (Sony) reclaim_list_global(&decay_list); 215072210662SUladzislau Rezki (Sony) } 215172210662SUladzislau Rezki (Sony) 215272210662SUladzislau Rezki (Sony) static void purge_vmap_node(struct work_struct *work) 215372210662SUladzislau Rezki (Sony) { 215472210662SUladzislau Rezki (Sony) struct vmap_node *vn = container_of(work, 215572210662SUladzislau Rezki (Sony) struct vmap_node, purge_work); 215672210662SUladzislau Rezki (Sony) struct vmap_area *va, *n_va; 215772210662SUladzislau Rezki (Sony) LIST_HEAD(local_list); 215872210662SUladzislau Rezki (Sony) 215972210662SUladzislau Rezki (Sony) vn->nr_purged = 0; 216072210662SUladzislau Rezki (Sony) 2161282631cbSUladzislau Rezki (Sony) list_for_each_entry_safe(va, n_va, &vn->purge_list, list) { 21624d36e6f8SUladzislau Rezki (Sony) unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; 21633c5c3cfbSDaniel Axtens unsigned long orig_start = va->va_start; 21643c5c3cfbSDaniel Axtens unsigned long orig_end = va->va_end; 216572210662SUladzislau Rezki (Sony) unsigned int vn_id = decode_vn_id(va->flags); 2166763b218dSJoel Fernandes 216772210662SUladzislau Rezki (Sony) list_del_init(&va->list); 21689c801f61SUladzislau Rezki (Sony) 21693c5c3cfbSDaniel Axtens if (is_vmalloc_or_module_addr((void *)orig_start)) 21703c5c3cfbSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 21713c5c3cfbSDaniel Axtens va->va_start, va->va_end); 2172dd3b8353SUladzislau Rezki (Sony) 21734d36e6f8SUladzislau Rezki (Sony) atomic_long_sub(nr, &vmap_lazy_nr); 217472210662SUladzislau Rezki (Sony) vn->nr_purged++; 217568571be9SUladzislau Rezki (Sony) 217672210662SUladzislau Rezki (Sony) if (is_vn_id_valid(vn_id) && !vn->skip_populate) 217772210662SUladzislau Rezki (Sony) if (node_pool_add_va(vn, va)) 217872210662SUladzislau Rezki (Sony) continue; 217972210662SUladzislau Rezki (Sony) 218072210662SUladzislau Rezki (Sony) /* Go back to global. */ 218172210662SUladzislau Rezki (Sony) list_add(&va->list, &local_list); 2182763b218dSJoel Fernandes } 21836030fd5fSUladzislau Rezki (Sony) 218472210662SUladzislau Rezki (Sony) reclaim_list_global(&local_list); 2185282631cbSUladzislau Rezki (Sony) } 2186282631cbSUladzislau Rezki (Sony) 2187282631cbSUladzislau Rezki (Sony) /* 2188282631cbSUladzislau Rezki (Sony) * Purges all lazily-freed vmap areas. 2189282631cbSUladzislau Rezki (Sony) */ 219072210662SUladzislau Rezki (Sony) static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end, 219172210662SUladzislau Rezki (Sony) bool full_pool_decay) 2192282631cbSUladzislau Rezki (Sony) { 219372210662SUladzislau Rezki (Sony) unsigned long nr_purged_areas = 0; 219472210662SUladzislau Rezki (Sony) unsigned int nr_purge_helpers; 219572210662SUladzislau Rezki (Sony) unsigned int nr_purge_nodes; 2196282631cbSUladzislau Rezki (Sony) struct vmap_node *vn; 2197282631cbSUladzislau Rezki (Sony) int i; 2198282631cbSUladzislau Rezki (Sony) 2199282631cbSUladzislau Rezki (Sony) lockdep_assert_held(&vmap_purge_lock); 220072210662SUladzislau Rezki (Sony) 220172210662SUladzislau Rezki (Sony) /* 220272210662SUladzislau Rezki (Sony) * Use cpumask to mark which node has to be processed. 220372210662SUladzislau Rezki (Sony) */ 2204282631cbSUladzislau Rezki (Sony) purge_nodes = CPU_MASK_NONE; 2205282631cbSUladzislau Rezki (Sony) 2206282631cbSUladzislau Rezki (Sony) for (i = 0; i < nr_vmap_nodes; i++) { 2207282631cbSUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 2208282631cbSUladzislau Rezki (Sony) 2209282631cbSUladzislau Rezki (Sony) INIT_LIST_HEAD(&vn->purge_list); 221072210662SUladzislau Rezki (Sony) vn->skip_populate = full_pool_decay; 221172210662SUladzislau Rezki (Sony) decay_va_pool_node(vn, full_pool_decay); 2212282631cbSUladzislau Rezki (Sony) 2213282631cbSUladzislau Rezki (Sony) if (RB_EMPTY_ROOT(&vn->lazy.root)) 2214282631cbSUladzislau Rezki (Sony) continue; 2215282631cbSUladzislau Rezki (Sony) 2216282631cbSUladzislau Rezki (Sony) spin_lock(&vn->lazy.lock); 2217282631cbSUladzislau Rezki (Sony) WRITE_ONCE(vn->lazy.root.rb_node, NULL); 2218282631cbSUladzislau Rezki (Sony) list_replace_init(&vn->lazy.head, &vn->purge_list); 2219282631cbSUladzislau Rezki (Sony) spin_unlock(&vn->lazy.lock); 2220282631cbSUladzislau Rezki (Sony) 2221282631cbSUladzislau Rezki (Sony) start = min(start, list_first_entry(&vn->purge_list, 2222282631cbSUladzislau Rezki (Sony) struct vmap_area, list)->va_start); 2223282631cbSUladzislau Rezki (Sony) 2224282631cbSUladzislau Rezki (Sony) end = max(end, list_last_entry(&vn->purge_list, 2225282631cbSUladzislau Rezki (Sony) struct vmap_area, list)->va_end); 2226282631cbSUladzislau Rezki (Sony) 2227282631cbSUladzislau Rezki (Sony) cpumask_set_cpu(i, &purge_nodes); 2228282631cbSUladzislau Rezki (Sony) } 2229282631cbSUladzislau Rezki (Sony) 223072210662SUladzislau Rezki (Sony) nr_purge_nodes = cpumask_weight(&purge_nodes); 223172210662SUladzislau Rezki (Sony) if (nr_purge_nodes > 0) { 2232282631cbSUladzislau Rezki (Sony) flush_tlb_kernel_range(start, end); 2233282631cbSUladzislau Rezki (Sony) 223472210662SUladzislau Rezki (Sony) /* One extra worker is per a lazy_max_pages() full set minus one. */ 223572210662SUladzislau Rezki (Sony) nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages(); 223672210662SUladzislau Rezki (Sony) nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1; 223772210662SUladzislau Rezki (Sony) 2238282631cbSUladzislau Rezki (Sony) for_each_cpu(i, &purge_nodes) { 223972210662SUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 224072210662SUladzislau Rezki (Sony) 224172210662SUladzislau Rezki (Sony) if (nr_purge_helpers > 0) { 224272210662SUladzislau Rezki (Sony) INIT_WORK(&vn->purge_work, purge_vmap_node); 224372210662SUladzislau Rezki (Sony) 224472210662SUladzislau Rezki (Sony) if (cpumask_test_cpu(i, cpu_online_mask)) 224572210662SUladzislau Rezki (Sony) schedule_work_on(i, &vn->purge_work); 224672210662SUladzislau Rezki (Sony) else 224772210662SUladzislau Rezki (Sony) schedule_work(&vn->purge_work); 224872210662SUladzislau Rezki (Sony) 224972210662SUladzislau Rezki (Sony) nr_purge_helpers--; 225072210662SUladzislau Rezki (Sony) } else { 225172210662SUladzislau Rezki (Sony) vn->purge_work.func = NULL; 225272210662SUladzislau Rezki (Sony) purge_vmap_node(&vn->purge_work); 225372210662SUladzislau Rezki (Sony) nr_purged_areas += vn->nr_purged; 2254282631cbSUladzislau Rezki (Sony) } 2255282631cbSUladzislau Rezki (Sony) } 2256282631cbSUladzislau Rezki (Sony) 225772210662SUladzislau Rezki (Sony) for_each_cpu(i, &purge_nodes) { 225872210662SUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 225972210662SUladzislau Rezki (Sony) 226072210662SUladzislau Rezki (Sony) if (vn->purge_work.func) { 226172210662SUladzislau Rezki (Sony) flush_work(&vn->purge_work); 226272210662SUladzislau Rezki (Sony) nr_purged_areas += vn->nr_purged; 226372210662SUladzislau Rezki (Sony) } 226472210662SUladzislau Rezki (Sony) } 226572210662SUladzislau Rezki (Sony) } 226672210662SUladzislau Rezki (Sony) 226772210662SUladzislau Rezki (Sony) trace_purge_vmap_area_lazy(start, end, nr_purged_areas); 226872210662SUladzislau Rezki (Sony) return nr_purged_areas > 0; 2269db64fe02SNick Piggin } 2270db64fe02SNick Piggin 2271db64fe02SNick Piggin /* 227277e50af0SThomas Gleixner * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list. 2273db64fe02SNick Piggin */ 227477e50af0SThomas Gleixner static void reclaim_and_purge_vmap_areas(void) 227577e50af0SThomas Gleixner 2276db64fe02SNick Piggin { 2277f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 22780574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 227972210662SUladzislau Rezki (Sony) __purge_vmap_area_lazy(ULONG_MAX, 0, true); 2280f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 2281db64fe02SNick Piggin } 2282db64fe02SNick Piggin 2283690467c8SUladzislau Rezki (Sony) static void drain_vmap_area_work(struct work_struct *work) 2284690467c8SUladzislau Rezki (Sony) { 2285690467c8SUladzislau Rezki (Sony) mutex_lock(&vmap_purge_lock); 228672210662SUladzislau Rezki (Sony) __purge_vmap_area_lazy(ULONG_MAX, 0, false); 2287690467c8SUladzislau Rezki (Sony) mutex_unlock(&vmap_purge_lock); 2288690467c8SUladzislau Rezki (Sony) } 2289690467c8SUladzislau Rezki (Sony) 2290db64fe02SNick Piggin /* 2291edd89818SUladzislau Rezki (Sony) * Free a vmap area, caller ensuring that the area has been unmapped, 2292edd89818SUladzislau Rezki (Sony) * unlinked and flush_cache_vunmap had been called for the correct 2293edd89818SUladzislau Rezki (Sony) * range previously. 2294db64fe02SNick Piggin */ 229564141da5SJeremy Fitzhardinge static void free_vmap_area_noflush(struct vmap_area *va) 2296db64fe02SNick Piggin { 22978c4196feSUladzislau Rezki (Sony) unsigned long nr_lazy_max = lazy_max_pages(); 22988c4196feSUladzislau Rezki (Sony) unsigned long va_start = va->va_start; 229972210662SUladzislau Rezki (Sony) unsigned int vn_id = decode_vn_id(va->flags); 230072210662SUladzislau Rezki (Sony) struct vmap_node *vn; 23014d36e6f8SUladzislau Rezki (Sony) unsigned long nr_lazy; 230280c4bd7aSChris Wilson 2303edd89818SUladzislau Rezki (Sony) if (WARN_ON_ONCE(!list_empty(&va->list))) 2304edd89818SUladzislau Rezki (Sony) return; 2305dd3b8353SUladzislau Rezki (Sony) 23064d36e6f8SUladzislau Rezki (Sony) nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> 23074d36e6f8SUladzislau Rezki (Sony) PAGE_SHIFT, &vmap_lazy_nr); 230880c4bd7aSChris Wilson 230996e2db45SUladzislau Rezki (Sony) /* 231072210662SUladzislau Rezki (Sony) * If it was request by a certain node we would like to 231172210662SUladzislau Rezki (Sony) * return it to that node, i.e. its pool for later reuse. 231296e2db45SUladzislau Rezki (Sony) */ 231372210662SUladzislau Rezki (Sony) vn = is_vn_id_valid(vn_id) ? 231472210662SUladzislau Rezki (Sony) id_to_node(vn_id):addr_to_node(va->va_start); 231572210662SUladzislau Rezki (Sony) 2316282631cbSUladzislau Rezki (Sony) spin_lock(&vn->lazy.lock); 231772210662SUladzislau Rezki (Sony) insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head); 2318282631cbSUladzislau Rezki (Sony) spin_unlock(&vn->lazy.lock); 231980c4bd7aSChris Wilson 23208c4196feSUladzislau Rezki (Sony) trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max); 23218c4196feSUladzislau Rezki (Sony) 232296e2db45SUladzislau Rezki (Sony) /* After this point, we may free va at any time */ 23238c4196feSUladzislau Rezki (Sony) if (unlikely(nr_lazy > nr_lazy_max)) 2324690467c8SUladzislau Rezki (Sony) schedule_work(&drain_vmap_work); 2325db64fe02SNick Piggin } 2326db64fe02SNick Piggin 2327b29acbdcSNick Piggin /* 2328b29acbdcSNick Piggin * Free and unmap a vmap area 2329b29acbdcSNick Piggin */ 2330b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va) 2331b29acbdcSNick Piggin { 2332b29acbdcSNick Piggin flush_cache_vunmap(va->va_start, va->va_end); 23334ad0ae8cSNicholas Piggin vunmap_range_noflush(va->va_start, va->va_end); 23348e57f8acSVlastimil Babka if (debug_pagealloc_enabled_static()) 233582a2e924SChintan Pandya flush_tlb_kernel_range(va->va_start, va->va_end); 233682a2e924SChintan Pandya 2337c8eef01eSChristoph Hellwig free_vmap_area_noflush(va); 2338b29acbdcSNick Piggin } 2339b29acbdcSNick Piggin 2340993d0b28SMatthew Wilcox (Oracle) struct vmap_area *find_vmap_area(unsigned long addr) 2341db64fe02SNick Piggin { 2342d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 2343db64fe02SNick Piggin struct vmap_area *va; 2344d0936029SUladzislau Rezki (Sony) int i, j; 2345db64fe02SNick Piggin 2346*4ed91fa9SUladzislau Rezki (Sony) if (unlikely(!vmap_initialized)) 2347*4ed91fa9SUladzislau Rezki (Sony) return NULL; 2348*4ed91fa9SUladzislau Rezki (Sony) 2349d0936029SUladzislau Rezki (Sony) /* 2350d0936029SUladzislau Rezki (Sony) * An addr_to_node_id(addr) converts an address to a node index 2351d0936029SUladzislau Rezki (Sony) * where a VA is located. If VA spans several zones and passed 2352d0936029SUladzislau Rezki (Sony) * addr is not the same as va->va_start, what is not common, we 235315e02a39SUladzislau Rezki (Sony) * may need to scan extra nodes. See an example: 2354d0936029SUladzislau Rezki (Sony) * 235515e02a39SUladzislau Rezki (Sony) * <----va----> 2356d0936029SUladzislau Rezki (Sony) * -|-----|-----|-----|-----|- 2357d0936029SUladzislau Rezki (Sony) * 1 2 0 1 2358d0936029SUladzislau Rezki (Sony) * 235915e02a39SUladzislau Rezki (Sony) * VA resides in node 1 whereas it spans 1, 2 an 0. If passed 236015e02a39SUladzislau Rezki (Sony) * addr is within 2 or 0 nodes we should do extra work. 2361d0936029SUladzislau Rezki (Sony) */ 2362d0936029SUladzislau Rezki (Sony) i = j = addr_to_node_id(addr); 2363d0936029SUladzislau Rezki (Sony) do { 2364d0936029SUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 2365db64fe02SNick Piggin 2366d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 2367d0936029SUladzislau Rezki (Sony) va = __find_vmap_area(addr, &vn->busy.root); 2368d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 2369d0936029SUladzislau Rezki (Sony) 2370d0936029SUladzislau Rezki (Sony) if (va) 2371db64fe02SNick Piggin return va; 2372d0936029SUladzislau Rezki (Sony) } while ((i = (i + 1) % nr_vmap_nodes) != j); 2373d0936029SUladzislau Rezki (Sony) 2374d0936029SUladzislau Rezki (Sony) return NULL; 2375db64fe02SNick Piggin } 2376db64fe02SNick Piggin 2377edd89818SUladzislau Rezki (Sony) static struct vmap_area *find_unlink_vmap_area(unsigned long addr) 2378edd89818SUladzislau Rezki (Sony) { 2379d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 2380edd89818SUladzislau Rezki (Sony) struct vmap_area *va; 2381d0936029SUladzislau Rezki (Sony) int i, j; 2382edd89818SUladzislau Rezki (Sony) 238315e02a39SUladzislau Rezki (Sony) /* 238415e02a39SUladzislau Rezki (Sony) * Check the comment in the find_vmap_area() about the loop. 238515e02a39SUladzislau Rezki (Sony) */ 2386d0936029SUladzislau Rezki (Sony) i = j = addr_to_node_id(addr); 2387d0936029SUladzislau Rezki (Sony) do { 2388d0936029SUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 2389d0936029SUladzislau Rezki (Sony) 2390d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 2391d0936029SUladzislau Rezki (Sony) va = __find_vmap_area(addr, &vn->busy.root); 2392edd89818SUladzislau Rezki (Sony) if (va) 2393d0936029SUladzislau Rezki (Sony) unlink_va(va, &vn->busy.root); 2394d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 2395edd89818SUladzislau Rezki (Sony) 2396d0936029SUladzislau Rezki (Sony) if (va) 2397edd89818SUladzislau Rezki (Sony) return va; 2398d0936029SUladzislau Rezki (Sony) } while ((i = (i + 1) % nr_vmap_nodes) != j); 2399d0936029SUladzislau Rezki (Sony) 2400d0936029SUladzislau Rezki (Sony) return NULL; 2401edd89818SUladzislau Rezki (Sony) } 2402edd89818SUladzislau Rezki (Sony) 2403db64fe02SNick Piggin /*** Per cpu kva allocator ***/ 2404db64fe02SNick Piggin 2405db64fe02SNick Piggin /* 2406db64fe02SNick Piggin * vmap space is limited especially on 32 bit architectures. Ensure there is 2407db64fe02SNick Piggin * room for at least 16 percpu vmap blocks per CPU. 2408db64fe02SNick Piggin */ 2409db64fe02SNick Piggin /* 2410db64fe02SNick Piggin * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 2411db64fe02SNick Piggin * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 2412db64fe02SNick Piggin * instead (we just need a rough idea) 2413db64fe02SNick Piggin */ 2414db64fe02SNick Piggin #if BITS_PER_LONG == 32 2415db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024) 2416db64fe02SNick Piggin #else 2417db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024*1024) 2418db64fe02SNick Piggin #endif 2419db64fe02SNick Piggin 2420db64fe02SNick Piggin #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 2421db64fe02SNick Piggin #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 2422db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 2423db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 2424db64fe02SNick Piggin #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 2425db64fe02SNick Piggin #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 2426f982f915SClemens Ladisch #define VMAP_BBMAP_BITS \ 2427f982f915SClemens Ladisch VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 2428db64fe02SNick Piggin VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 2429f982f915SClemens Ladisch VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 2430db64fe02SNick Piggin 2431db64fe02SNick Piggin #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 2432db64fe02SNick Piggin 243377e50af0SThomas Gleixner /* 243477e50af0SThomas Gleixner * Purge threshold to prevent overeager purging of fragmented blocks for 243577e50af0SThomas Gleixner * regular operations: Purge if vb->free is less than 1/4 of the capacity. 243677e50af0SThomas Gleixner */ 243777e50af0SThomas Gleixner #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4) 243877e50af0SThomas Gleixner 2439869176a0SBaoquan He #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/ 2440869176a0SBaoquan He #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/ 2441869176a0SBaoquan He #define VMAP_FLAGS_MASK 0x3 2442869176a0SBaoquan He 2443db64fe02SNick Piggin struct vmap_block_queue { 2444db64fe02SNick Piggin spinlock_t lock; 2445db64fe02SNick Piggin struct list_head free; 2446062eacf5SUladzislau Rezki (Sony) 2447062eacf5SUladzislau Rezki (Sony) /* 2448062eacf5SUladzislau Rezki (Sony) * An xarray requires an extra memory dynamically to 2449062eacf5SUladzislau Rezki (Sony) * be allocated. If it is an issue, we can use rb-tree 2450062eacf5SUladzislau Rezki (Sony) * instead. 2451062eacf5SUladzislau Rezki (Sony) */ 2452062eacf5SUladzislau Rezki (Sony) struct xarray vmap_blocks; 2453db64fe02SNick Piggin }; 2454db64fe02SNick Piggin 2455db64fe02SNick Piggin struct vmap_block { 2456db64fe02SNick Piggin spinlock_t lock; 2457db64fe02SNick Piggin struct vmap_area *va; 2458db64fe02SNick Piggin unsigned long free, dirty; 2459d76f9954SBaoquan He DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS); 24607d61bfe8SRoman Pen unsigned long dirty_min, dirty_max; /*< dirty range */ 2461db64fe02SNick Piggin struct list_head free_list; 2462db64fe02SNick Piggin struct rcu_head rcu_head; 246302b709dfSNick Piggin struct list_head purge; 2464db64fe02SNick Piggin }; 2465db64fe02SNick Piggin 2466db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 2467db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 2468db64fe02SNick Piggin 2469db64fe02SNick Piggin /* 2470062eacf5SUladzislau Rezki (Sony) * In order to fast access to any "vmap_block" associated with a 2471062eacf5SUladzislau Rezki (Sony) * specific address, we use a hash. 2472062eacf5SUladzislau Rezki (Sony) * 2473062eacf5SUladzislau Rezki (Sony) * A per-cpu vmap_block_queue is used in both ways, to serialize 2474062eacf5SUladzislau Rezki (Sony) * an access to free block chains among CPUs(alloc path) and it 2475062eacf5SUladzislau Rezki (Sony) * also acts as a vmap_block hash(alloc/free paths). It means we 2476062eacf5SUladzislau Rezki (Sony) * overload it, since we already have the per-cpu array which is 2477062eacf5SUladzislau Rezki (Sony) * used as a hash table. When used as a hash a 'cpu' passed to 2478062eacf5SUladzislau Rezki (Sony) * per_cpu() is not actually a CPU but rather a hash index. 2479062eacf5SUladzislau Rezki (Sony) * 2480fa1c77c1SUladzislau Rezki (Sony) * A hash function is addr_to_vb_xa() which hashes any address 2481062eacf5SUladzislau Rezki (Sony) * to a specific index(in a hash) it belongs to. This then uses a 2482062eacf5SUladzislau Rezki (Sony) * per_cpu() macro to access an array with generated index. 2483062eacf5SUladzislau Rezki (Sony) * 2484062eacf5SUladzislau Rezki (Sony) * An example: 2485062eacf5SUladzislau Rezki (Sony) * 2486062eacf5SUladzislau Rezki (Sony) * CPU_1 CPU_2 CPU_0 2487062eacf5SUladzislau Rezki (Sony) * | | | 2488062eacf5SUladzislau Rezki (Sony) * V V V 2489062eacf5SUladzislau Rezki (Sony) * 0 10 20 30 40 50 60 2490062eacf5SUladzislau Rezki (Sony) * |------|------|------|------|------|------|...<vmap address space> 2491062eacf5SUladzislau Rezki (Sony) * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2 2492062eacf5SUladzislau Rezki (Sony) * 2493062eacf5SUladzislau Rezki (Sony) * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus 2494062eacf5SUladzislau Rezki (Sony) * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock; 2495062eacf5SUladzislau Rezki (Sony) * 2496062eacf5SUladzislau Rezki (Sony) * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus 2497062eacf5SUladzislau Rezki (Sony) * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock; 2498062eacf5SUladzislau Rezki (Sony) * 2499062eacf5SUladzislau Rezki (Sony) * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus 2500062eacf5SUladzislau Rezki (Sony) * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock. 2501062eacf5SUladzislau Rezki (Sony) * 2502062eacf5SUladzislau Rezki (Sony) * This technique almost always avoids lock contention on insert/remove, 2503062eacf5SUladzislau Rezki (Sony) * however xarray spinlocks protect against any contention that remains. 2504db64fe02SNick Piggin */ 2505062eacf5SUladzislau Rezki (Sony) static struct xarray * 2506fa1c77c1SUladzislau Rezki (Sony) addr_to_vb_xa(unsigned long addr) 2507062eacf5SUladzislau Rezki (Sony) { 2508062eacf5SUladzislau Rezki (Sony) int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus(); 2509062eacf5SUladzislau Rezki (Sony) 2510062eacf5SUladzislau Rezki (Sony) return &per_cpu(vmap_block_queue, index).vmap_blocks; 2511062eacf5SUladzislau Rezki (Sony) } 2512db64fe02SNick Piggin 2513db64fe02SNick Piggin /* 2514db64fe02SNick Piggin * We should probably have a fallback mechanism to allocate virtual memory 2515db64fe02SNick Piggin * out of partially filled vmap blocks. However vmap block sizing should be 2516db64fe02SNick Piggin * fairly reasonable according to the vmalloc size, so it shouldn't be a 2517db64fe02SNick Piggin * big problem. 2518db64fe02SNick Piggin */ 2519db64fe02SNick Piggin 2520db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr) 2521db64fe02SNick Piggin { 2522db64fe02SNick Piggin addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 2523db64fe02SNick Piggin addr /= VMAP_BLOCK_SIZE; 2524db64fe02SNick Piggin return addr; 2525db64fe02SNick Piggin } 2526db64fe02SNick Piggin 2527cf725ce2SRoman Pen static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 2528cf725ce2SRoman Pen { 2529cf725ce2SRoman Pen unsigned long addr; 2530cf725ce2SRoman Pen 2531cf725ce2SRoman Pen addr = va_start + (pages_off << PAGE_SHIFT); 2532cf725ce2SRoman Pen BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 2533cf725ce2SRoman Pen return (void *)addr; 2534cf725ce2SRoman Pen } 2535cf725ce2SRoman Pen 2536cf725ce2SRoman Pen /** 2537cf725ce2SRoman Pen * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 2538cf725ce2SRoman Pen * block. Of course pages number can't exceed VMAP_BBMAP_BITS 2539cf725ce2SRoman Pen * @order: how many 2^order pages should be occupied in newly allocated block 2540cf725ce2SRoman Pen * @gfp_mask: flags for the page level allocator 2541cf725ce2SRoman Pen * 2542a862f68aSMike Rapoport * Return: virtual address in a newly allocated block or ERR_PTR(-errno) 2543cf725ce2SRoman Pen */ 2544cf725ce2SRoman Pen static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 2545db64fe02SNick Piggin { 2546db64fe02SNick Piggin struct vmap_block_queue *vbq; 2547db64fe02SNick Piggin struct vmap_block *vb; 2548db64fe02SNick Piggin struct vmap_area *va; 2549062eacf5SUladzislau Rezki (Sony) struct xarray *xa; 2550db64fe02SNick Piggin unsigned long vb_idx; 2551db64fe02SNick Piggin int node, err; 2552cf725ce2SRoman Pen void *vaddr; 2553db64fe02SNick Piggin 2554db64fe02SNick Piggin node = numa_node_id(); 2555db64fe02SNick Piggin 2556db64fe02SNick Piggin vb = kmalloc_node(sizeof(struct vmap_block), 2557db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 2558db64fe02SNick Piggin if (unlikely(!vb)) 2559db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 2560db64fe02SNick Piggin 2561db64fe02SNick Piggin va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 2562db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, 2563869176a0SBaoquan He node, gfp_mask, 2564869176a0SBaoquan He VMAP_RAM|VMAP_BLOCK); 2565ddf9c6d4STobias Klauser if (IS_ERR(va)) { 2566db64fe02SNick Piggin kfree(vb); 2567e7d86340SJulia Lawall return ERR_CAST(va); 2568db64fe02SNick Piggin } 2569db64fe02SNick Piggin 2570cf725ce2SRoman Pen vaddr = vmap_block_vaddr(va->va_start, 0); 2571db64fe02SNick Piggin spin_lock_init(&vb->lock); 2572db64fe02SNick Piggin vb->va = va; 2573cf725ce2SRoman Pen /* At least something should be left free */ 2574cf725ce2SRoman Pen BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 2575d76f9954SBaoquan He bitmap_zero(vb->used_map, VMAP_BBMAP_BITS); 2576cf725ce2SRoman Pen vb->free = VMAP_BBMAP_BITS - (1UL << order); 2577db64fe02SNick Piggin vb->dirty = 0; 25787d61bfe8SRoman Pen vb->dirty_min = VMAP_BBMAP_BITS; 25797d61bfe8SRoman Pen vb->dirty_max = 0; 2580d76f9954SBaoquan He bitmap_set(vb->used_map, 0, (1UL << order)); 2581db64fe02SNick Piggin INIT_LIST_HEAD(&vb->free_list); 2582db64fe02SNick Piggin 2583fa1c77c1SUladzislau Rezki (Sony) xa = addr_to_vb_xa(va->va_start); 2584db64fe02SNick Piggin vb_idx = addr_to_vb_idx(va->va_start); 2585062eacf5SUladzislau Rezki (Sony) err = xa_insert(xa, vb_idx, vb, gfp_mask); 25860f14599cSMatthew Wilcox (Oracle) if (err) { 25870f14599cSMatthew Wilcox (Oracle) kfree(vb); 25880f14599cSMatthew Wilcox (Oracle) free_vmap_area(va); 25890f14599cSMatthew Wilcox (Oracle) return ERR_PTR(err); 25900f14599cSMatthew Wilcox (Oracle) } 2591db64fe02SNick Piggin 25923f804920SSebastian Andrzej Siewior vbq = raw_cpu_ptr(&vmap_block_queue); 2593db64fe02SNick Piggin spin_lock(&vbq->lock); 259468ac546fSRoman Pen list_add_tail_rcu(&vb->free_list, &vbq->free); 2595db64fe02SNick Piggin spin_unlock(&vbq->lock); 2596db64fe02SNick Piggin 2597cf725ce2SRoman Pen return vaddr; 2598db64fe02SNick Piggin } 2599db64fe02SNick Piggin 2600db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb) 2601db64fe02SNick Piggin { 2602d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 2603db64fe02SNick Piggin struct vmap_block *tmp; 2604062eacf5SUladzislau Rezki (Sony) struct xarray *xa; 2605db64fe02SNick Piggin 2606fa1c77c1SUladzislau Rezki (Sony) xa = addr_to_vb_xa(vb->va->va_start); 2607062eacf5SUladzislau Rezki (Sony) tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start)); 2608db64fe02SNick Piggin BUG_ON(tmp != vb); 2609db64fe02SNick Piggin 2610d0936029SUladzislau Rezki (Sony) vn = addr_to_node(vb->va->va_start); 2611d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 2612d0936029SUladzislau Rezki (Sony) unlink_va(vb->va, &vn->busy.root); 2613d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 2614edd89818SUladzislau Rezki (Sony) 261564141da5SJeremy Fitzhardinge free_vmap_area_noflush(vb->va); 261622a3c7d1SLai Jiangshan kfree_rcu(vb, rcu_head); 2617db64fe02SNick Piggin } 2618db64fe02SNick Piggin 2619ca5e46c3SThomas Gleixner static bool purge_fragmented_block(struct vmap_block *vb, 262077e50af0SThomas Gleixner struct vmap_block_queue *vbq, struct list_head *purge_list, 262177e50af0SThomas Gleixner bool force_purge) 262202b709dfSNick Piggin { 2623ca5e46c3SThomas Gleixner if (vb->free + vb->dirty != VMAP_BBMAP_BITS || 2624ca5e46c3SThomas Gleixner vb->dirty == VMAP_BBMAP_BITS) 2625ca5e46c3SThomas Gleixner return false; 262602b709dfSNick Piggin 262777e50af0SThomas Gleixner /* Don't overeagerly purge usable blocks unless requested */ 262877e50af0SThomas Gleixner if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD)) 262977e50af0SThomas Gleixner return false; 263077e50af0SThomas Gleixner 2631ca5e46c3SThomas Gleixner /* prevent further allocs after releasing lock */ 26327f48121eSThomas Gleixner WRITE_ONCE(vb->free, 0); 2633ca5e46c3SThomas Gleixner /* prevent purging it again */ 26347f48121eSThomas Gleixner WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS); 26357d61bfe8SRoman Pen vb->dirty_min = 0; 26367d61bfe8SRoman Pen vb->dirty_max = VMAP_BBMAP_BITS; 263702b709dfSNick Piggin spin_lock(&vbq->lock); 263802b709dfSNick Piggin list_del_rcu(&vb->free_list); 263902b709dfSNick Piggin spin_unlock(&vbq->lock); 2640ca5e46c3SThomas Gleixner list_add_tail(&vb->purge, purge_list); 2641ca5e46c3SThomas Gleixner return true; 264202b709dfSNick Piggin } 264302b709dfSNick Piggin 2644ca5e46c3SThomas Gleixner static void free_purged_blocks(struct list_head *purge_list) 2645ca5e46c3SThomas Gleixner { 2646ca5e46c3SThomas Gleixner struct vmap_block *vb, *n_vb; 2647ca5e46c3SThomas Gleixner 2648ca5e46c3SThomas Gleixner list_for_each_entry_safe(vb, n_vb, purge_list, purge) { 264902b709dfSNick Piggin list_del(&vb->purge); 265002b709dfSNick Piggin free_vmap_block(vb); 265102b709dfSNick Piggin } 265202b709dfSNick Piggin } 265302b709dfSNick Piggin 2654ca5e46c3SThomas Gleixner static void purge_fragmented_blocks(int cpu) 2655ca5e46c3SThomas Gleixner { 2656ca5e46c3SThomas Gleixner LIST_HEAD(purge); 2657ca5e46c3SThomas Gleixner struct vmap_block *vb; 2658ca5e46c3SThomas Gleixner struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 2659ca5e46c3SThomas Gleixner 2660ca5e46c3SThomas Gleixner rcu_read_lock(); 2661ca5e46c3SThomas Gleixner list_for_each_entry_rcu(vb, &vbq->free, free_list) { 26627f48121eSThomas Gleixner unsigned long free = READ_ONCE(vb->free); 26637f48121eSThomas Gleixner unsigned long dirty = READ_ONCE(vb->dirty); 26647f48121eSThomas Gleixner 26657f48121eSThomas Gleixner if (free + dirty != VMAP_BBMAP_BITS || 26667f48121eSThomas Gleixner dirty == VMAP_BBMAP_BITS) 2667ca5e46c3SThomas Gleixner continue; 2668ca5e46c3SThomas Gleixner 2669ca5e46c3SThomas Gleixner spin_lock(&vb->lock); 267077e50af0SThomas Gleixner purge_fragmented_block(vb, vbq, &purge, true); 2671ca5e46c3SThomas Gleixner spin_unlock(&vb->lock); 2672ca5e46c3SThomas Gleixner } 2673ca5e46c3SThomas Gleixner rcu_read_unlock(); 2674ca5e46c3SThomas Gleixner free_purged_blocks(&purge); 2675ca5e46c3SThomas Gleixner } 2676ca5e46c3SThomas Gleixner 267702b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void) 267802b709dfSNick Piggin { 267902b709dfSNick Piggin int cpu; 268002b709dfSNick Piggin 268102b709dfSNick Piggin for_each_possible_cpu(cpu) 268202b709dfSNick Piggin purge_fragmented_blocks(cpu); 268302b709dfSNick Piggin } 268402b709dfSNick Piggin 2685db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 2686db64fe02SNick Piggin { 2687db64fe02SNick Piggin struct vmap_block_queue *vbq; 2688db64fe02SNick Piggin struct vmap_block *vb; 2689cf725ce2SRoman Pen void *vaddr = NULL; 2690db64fe02SNick Piggin unsigned int order; 2691db64fe02SNick Piggin 2692891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 2693db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2694aa91c4d8SJan Kara if (WARN_ON(size == 0)) { 2695aa91c4d8SJan Kara /* 2696aa91c4d8SJan Kara * Allocating 0 bytes isn't what caller wants since 2697aa91c4d8SJan Kara * get_order(0) returns funny result. Just warn and terminate 2698aa91c4d8SJan Kara * early. 2699aa91c4d8SJan Kara */ 2700aa91c4d8SJan Kara return NULL; 2701aa91c4d8SJan Kara } 2702db64fe02SNick Piggin order = get_order(size); 2703db64fe02SNick Piggin 2704db64fe02SNick Piggin rcu_read_lock(); 27053f804920SSebastian Andrzej Siewior vbq = raw_cpu_ptr(&vmap_block_queue); 2706db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2707cf725ce2SRoman Pen unsigned long pages_off; 2708db64fe02SNick Piggin 270943d76502SThomas Gleixner if (READ_ONCE(vb->free) < (1UL << order)) 271043d76502SThomas Gleixner continue; 271143d76502SThomas Gleixner 2712db64fe02SNick Piggin spin_lock(&vb->lock); 2713cf725ce2SRoman Pen if (vb->free < (1UL << order)) { 2714cf725ce2SRoman Pen spin_unlock(&vb->lock); 2715cf725ce2SRoman Pen continue; 2716cf725ce2SRoman Pen } 271702b709dfSNick Piggin 2718cf725ce2SRoman Pen pages_off = VMAP_BBMAP_BITS - vb->free; 2719cf725ce2SRoman Pen vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 272043d76502SThomas Gleixner WRITE_ONCE(vb->free, vb->free - (1UL << order)); 2721d76f9954SBaoquan He bitmap_set(vb->used_map, pages_off, (1UL << order)); 2722db64fe02SNick Piggin if (vb->free == 0) { 2723db64fe02SNick Piggin spin_lock(&vbq->lock); 2724de560423SNick Piggin list_del_rcu(&vb->free_list); 2725db64fe02SNick Piggin spin_unlock(&vbq->lock); 2726db64fe02SNick Piggin } 2727cf725ce2SRoman Pen 2728db64fe02SNick Piggin spin_unlock(&vb->lock); 2729db64fe02SNick Piggin break; 2730db64fe02SNick Piggin } 273102b709dfSNick Piggin 2732db64fe02SNick Piggin rcu_read_unlock(); 2733db64fe02SNick Piggin 2734cf725ce2SRoman Pen /* Allocate new block if nothing was found */ 2735cf725ce2SRoman Pen if (!vaddr) 2736cf725ce2SRoman Pen vaddr = new_vmap_block(order, gfp_mask); 2737db64fe02SNick Piggin 2738cf725ce2SRoman Pen return vaddr; 2739db64fe02SNick Piggin } 2740db64fe02SNick Piggin 274178a0e8c4SChristoph Hellwig static void vb_free(unsigned long addr, unsigned long size) 2742db64fe02SNick Piggin { 2743db64fe02SNick Piggin unsigned long offset; 2744db64fe02SNick Piggin unsigned int order; 2745db64fe02SNick Piggin struct vmap_block *vb; 2746062eacf5SUladzislau Rezki (Sony) struct xarray *xa; 2747db64fe02SNick Piggin 2748891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 2749db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2750b29acbdcSNick Piggin 275178a0e8c4SChristoph Hellwig flush_cache_vunmap(addr, addr + size); 2752b29acbdcSNick Piggin 2753db64fe02SNick Piggin order = get_order(size); 275478a0e8c4SChristoph Hellwig offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; 2755062eacf5SUladzislau Rezki (Sony) 2756fa1c77c1SUladzislau Rezki (Sony) xa = addr_to_vb_xa(addr); 2757062eacf5SUladzislau Rezki (Sony) vb = xa_load(xa, addr_to_vb_idx(addr)); 2758062eacf5SUladzislau Rezki (Sony) 2759d76f9954SBaoquan He spin_lock(&vb->lock); 2760d76f9954SBaoquan He bitmap_clear(vb->used_map, offset, (1UL << order)); 2761d76f9954SBaoquan He spin_unlock(&vb->lock); 2762db64fe02SNick Piggin 27634ad0ae8cSNicholas Piggin vunmap_range_noflush(addr, addr + size); 276464141da5SJeremy Fitzhardinge 27658e57f8acSVlastimil Babka if (debug_pagealloc_enabled_static()) 276678a0e8c4SChristoph Hellwig flush_tlb_kernel_range(addr, addr + size); 276782a2e924SChintan Pandya 2768db64fe02SNick Piggin spin_lock(&vb->lock); 27697d61bfe8SRoman Pen 2770a09fad96SThomas Gleixner /* Expand the not yet TLB flushed dirty range */ 27717d61bfe8SRoman Pen vb->dirty_min = min(vb->dirty_min, offset); 27727d61bfe8SRoman Pen vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 2773d086817dSMinChan Kim 27747f48121eSThomas Gleixner WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order)); 2775db64fe02SNick Piggin if (vb->dirty == VMAP_BBMAP_BITS) { 2776de560423SNick Piggin BUG_ON(vb->free); 2777db64fe02SNick Piggin spin_unlock(&vb->lock); 2778db64fe02SNick Piggin free_vmap_block(vb); 2779db64fe02SNick Piggin } else 2780db64fe02SNick Piggin spin_unlock(&vb->lock); 2781db64fe02SNick Piggin } 2782db64fe02SNick Piggin 2783868b104dSRick Edgecombe static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) 2784db64fe02SNick Piggin { 2785ca5e46c3SThomas Gleixner LIST_HEAD(purge_list); 2786db64fe02SNick Piggin int cpu; 2787db64fe02SNick Piggin 27889b463334SJeremy Fitzhardinge if (unlikely(!vmap_initialized)) 27899b463334SJeremy Fitzhardinge return; 27909b463334SJeremy Fitzhardinge 2791ca5e46c3SThomas Gleixner mutex_lock(&vmap_purge_lock); 27925803ed29SChristoph Hellwig 2793db64fe02SNick Piggin for_each_possible_cpu(cpu) { 2794db64fe02SNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 2795db64fe02SNick Piggin struct vmap_block *vb; 2796fc1e0d98SThomas Gleixner unsigned long idx; 2797db64fe02SNick Piggin 2798db64fe02SNick Piggin rcu_read_lock(); 2799fc1e0d98SThomas Gleixner xa_for_each(&vbq->vmap_blocks, idx, vb) { 2800db64fe02SNick Piggin spin_lock(&vb->lock); 2801ca5e46c3SThomas Gleixner 2802ca5e46c3SThomas Gleixner /* 2803ca5e46c3SThomas Gleixner * Try to purge a fragmented block first. If it's 2804ca5e46c3SThomas Gleixner * not purgeable, check whether there is dirty 2805ca5e46c3SThomas Gleixner * space to be flushed. 2806ca5e46c3SThomas Gleixner */ 280777e50af0SThomas Gleixner if (!purge_fragmented_block(vb, vbq, &purge_list, false) && 2808a09fad96SThomas Gleixner vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) { 28097d61bfe8SRoman Pen unsigned long va_start = vb->va->va_start; 2810db64fe02SNick Piggin unsigned long s, e; 2811b136be5eSJoonsoo Kim 28127d61bfe8SRoman Pen s = va_start + (vb->dirty_min << PAGE_SHIFT); 28137d61bfe8SRoman Pen e = va_start + (vb->dirty_max << PAGE_SHIFT); 2814db64fe02SNick Piggin 28157d61bfe8SRoman Pen start = min(s, start); 28167d61bfe8SRoman Pen end = max(e, end); 28177d61bfe8SRoman Pen 2818a09fad96SThomas Gleixner /* Prevent that this is flushed again */ 2819a09fad96SThomas Gleixner vb->dirty_min = VMAP_BBMAP_BITS; 2820a09fad96SThomas Gleixner vb->dirty_max = 0; 2821a09fad96SThomas Gleixner 2822db64fe02SNick Piggin flush = 1; 2823db64fe02SNick Piggin } 2824db64fe02SNick Piggin spin_unlock(&vb->lock); 2825db64fe02SNick Piggin } 2826db64fe02SNick Piggin rcu_read_unlock(); 2827db64fe02SNick Piggin } 2828ca5e46c3SThomas Gleixner free_purged_blocks(&purge_list); 2829db64fe02SNick Piggin 283072210662SUladzislau Rezki (Sony) if (!__purge_vmap_area_lazy(start, end, false) && flush) 28310574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 2832f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 2833db64fe02SNick Piggin } 2834868b104dSRick Edgecombe 2835868b104dSRick Edgecombe /** 2836868b104dSRick Edgecombe * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 2837868b104dSRick Edgecombe * 2838868b104dSRick Edgecombe * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 2839868b104dSRick Edgecombe * to amortize TLB flushing overheads. What this means is that any page you 2840868b104dSRick Edgecombe * have now, may, in a former life, have been mapped into kernel virtual 2841868b104dSRick Edgecombe * address by the vmap layer and so there might be some CPUs with TLB entries 2842868b104dSRick Edgecombe * still referencing that page (additional to the regular 1:1 kernel mapping). 2843868b104dSRick Edgecombe * 2844868b104dSRick Edgecombe * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 2845868b104dSRick Edgecombe * be sure that none of the pages we have control over will have any aliases 2846868b104dSRick Edgecombe * from the vmap layer. 2847868b104dSRick Edgecombe */ 2848868b104dSRick Edgecombe void vm_unmap_aliases(void) 2849868b104dSRick Edgecombe { 2850868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 2851868b104dSRick Edgecombe int flush = 0; 2852868b104dSRick Edgecombe 2853868b104dSRick Edgecombe _vm_unmap_aliases(start, end, flush); 2854868b104dSRick Edgecombe } 2855db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases); 2856db64fe02SNick Piggin 2857db64fe02SNick Piggin /** 2858db64fe02SNick Piggin * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 2859db64fe02SNick Piggin * @mem: the pointer returned by vm_map_ram 2860db64fe02SNick Piggin * @count: the count passed to that vm_map_ram call (cannot unmap partial) 2861db64fe02SNick Piggin */ 2862db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count) 2863db64fe02SNick Piggin { 286465ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 28654aff1dc4SAndrey Konovalov unsigned long addr = (unsigned long)kasan_reset_tag(mem); 28669c3acf60SChristoph Hellwig struct vmap_area *va; 2867db64fe02SNick Piggin 28685803ed29SChristoph Hellwig might_sleep(); 2869db64fe02SNick Piggin BUG_ON(!addr); 2870db64fe02SNick Piggin BUG_ON(addr < VMALLOC_START); 2871db64fe02SNick Piggin BUG_ON(addr > VMALLOC_END); 2872a1c0b1a0SShawn Lin BUG_ON(!PAGE_ALIGNED(addr)); 2873db64fe02SNick Piggin 2874d98c9e83SAndrey Ryabinin kasan_poison_vmalloc(mem, size); 2875d98c9e83SAndrey Ryabinin 28769c3acf60SChristoph Hellwig if (likely(count <= VMAP_MAX_ALLOC)) { 287705e3ff95SChintan Pandya debug_check_no_locks_freed(mem, size); 287878a0e8c4SChristoph Hellwig vb_free(addr, size); 28799c3acf60SChristoph Hellwig return; 28809c3acf60SChristoph Hellwig } 28819c3acf60SChristoph Hellwig 2882edd89818SUladzislau Rezki (Sony) va = find_unlink_vmap_area(addr); 288314687619SUladzislau Rezki (Sony) if (WARN_ON_ONCE(!va)) 288414687619SUladzislau Rezki (Sony) return; 288514687619SUladzislau Rezki (Sony) 288605e3ff95SChintan Pandya debug_check_no_locks_freed((void *)va->va_start, 288705e3ff95SChintan Pandya (va->va_end - va->va_start)); 28889c3acf60SChristoph Hellwig free_unmap_vmap_area(va); 2889db64fe02SNick Piggin } 2890db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram); 2891db64fe02SNick Piggin 2892db64fe02SNick Piggin /** 2893db64fe02SNick Piggin * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 2894db64fe02SNick Piggin * @pages: an array of pointers to the pages to be mapped 2895db64fe02SNick Piggin * @count: number of pages 2896db64fe02SNick Piggin * @node: prefer to allocate data structures on this node 2897e99c97adSRandy Dunlap * 289836437638SGioh Kim * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 289936437638SGioh Kim * faster than vmap so it's good. But if you mix long-life and short-life 290036437638SGioh Kim * objects with vm_map_ram(), it could consume lots of address space through 290136437638SGioh Kim * fragmentation (especially on a 32bit machine). You could see failures in 290236437638SGioh Kim * the end. Please use this function for short-lived objects. 290336437638SGioh Kim * 2904e99c97adSRandy Dunlap * Returns: a pointer to the address that has been mapped, or %NULL on failure 2905db64fe02SNick Piggin */ 2906d4efd79aSChristoph Hellwig void *vm_map_ram(struct page **pages, unsigned int count, int node) 2907db64fe02SNick Piggin { 290865ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 2909db64fe02SNick Piggin unsigned long addr; 2910db64fe02SNick Piggin void *mem; 2911db64fe02SNick Piggin 2912db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) { 2913db64fe02SNick Piggin mem = vb_alloc(size, GFP_KERNEL); 2914db64fe02SNick Piggin if (IS_ERR(mem)) 2915db64fe02SNick Piggin return NULL; 2916db64fe02SNick Piggin addr = (unsigned long)mem; 2917db64fe02SNick Piggin } else { 2918db64fe02SNick Piggin struct vmap_area *va; 2919db64fe02SNick Piggin va = alloc_vmap_area(size, PAGE_SIZE, 2920869176a0SBaoquan He VMALLOC_START, VMALLOC_END, 2921869176a0SBaoquan He node, GFP_KERNEL, VMAP_RAM); 2922db64fe02SNick Piggin if (IS_ERR(va)) 2923db64fe02SNick Piggin return NULL; 2924db64fe02SNick Piggin 2925db64fe02SNick Piggin addr = va->va_start; 2926db64fe02SNick Piggin mem = (void *)addr; 2927db64fe02SNick Piggin } 2928d98c9e83SAndrey Ryabinin 2929b67177ecSNicholas Piggin if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, 2930b67177ecSNicholas Piggin pages, PAGE_SHIFT) < 0) { 2931db64fe02SNick Piggin vm_unmap_ram(mem, count); 2932db64fe02SNick Piggin return NULL; 2933db64fe02SNick Piggin } 2934b67177ecSNicholas Piggin 293523689e91SAndrey Konovalov /* 293623689e91SAndrey Konovalov * Mark the pages as accessible, now that they are mapped. 293723689e91SAndrey Konovalov * With hardware tag-based KASAN, marking is skipped for 293823689e91SAndrey Konovalov * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 293923689e91SAndrey Konovalov */ 2940f6e39794SAndrey Konovalov mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL); 294119f1c3acSAndrey Konovalov 2942db64fe02SNick Piggin return mem; 2943db64fe02SNick Piggin } 2944db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram); 2945db64fe02SNick Piggin 29464341fa45SJoonsoo Kim static struct vm_struct *vmlist __initdata; 294792eac168SMike Rapoport 2948121e6f32SNicholas Piggin static inline unsigned int vm_area_page_order(struct vm_struct *vm) 2949121e6f32SNicholas Piggin { 2950121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 2951121e6f32SNicholas Piggin return vm->page_order; 2952121e6f32SNicholas Piggin #else 2953121e6f32SNicholas Piggin return 0; 2954121e6f32SNicholas Piggin #endif 2955121e6f32SNicholas Piggin } 2956121e6f32SNicholas Piggin 2957121e6f32SNicholas Piggin static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order) 2958121e6f32SNicholas Piggin { 2959121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 2960121e6f32SNicholas Piggin vm->page_order = order; 2961121e6f32SNicholas Piggin #else 2962121e6f32SNicholas Piggin BUG_ON(order != 0); 2963121e6f32SNicholas Piggin #endif 2964121e6f32SNicholas Piggin } 2965121e6f32SNicholas Piggin 2966f0aa6617STejun Heo /** 2967be9b7335SNicolas Pitre * vm_area_add_early - add vmap area early during boot 2968be9b7335SNicolas Pitre * @vm: vm_struct to add 2969be9b7335SNicolas Pitre * 2970be9b7335SNicolas Pitre * This function is used to add fixed kernel vm area to vmlist before 2971be9b7335SNicolas Pitre * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 2972be9b7335SNicolas Pitre * should contain proper values and the other fields should be zero. 2973be9b7335SNicolas Pitre * 2974be9b7335SNicolas Pitre * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 2975be9b7335SNicolas Pitre */ 2976be9b7335SNicolas Pitre void __init vm_area_add_early(struct vm_struct *vm) 2977be9b7335SNicolas Pitre { 2978be9b7335SNicolas Pitre struct vm_struct *tmp, **p; 2979be9b7335SNicolas Pitre 2980be9b7335SNicolas Pitre BUG_ON(vmap_initialized); 2981be9b7335SNicolas Pitre for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 2982be9b7335SNicolas Pitre if (tmp->addr >= vm->addr) { 2983be9b7335SNicolas Pitre BUG_ON(tmp->addr < vm->addr + vm->size); 2984be9b7335SNicolas Pitre break; 2985be9b7335SNicolas Pitre } else 2986be9b7335SNicolas Pitre BUG_ON(tmp->addr + tmp->size > vm->addr); 2987be9b7335SNicolas Pitre } 2988be9b7335SNicolas Pitre vm->next = *p; 2989be9b7335SNicolas Pitre *p = vm; 2990be9b7335SNicolas Pitre } 2991be9b7335SNicolas Pitre 2992be9b7335SNicolas Pitre /** 2993f0aa6617STejun Heo * vm_area_register_early - register vmap area early during boot 2994f0aa6617STejun Heo * @vm: vm_struct to register 2995c0c0a293STejun Heo * @align: requested alignment 2996f0aa6617STejun Heo * 2997f0aa6617STejun Heo * This function is used to register kernel vm area before 2998f0aa6617STejun Heo * vmalloc_init() is called. @vm->size and @vm->flags should contain 2999f0aa6617STejun Heo * proper values on entry and other fields should be zero. On return, 3000f0aa6617STejun Heo * vm->addr contains the allocated address. 3001f0aa6617STejun Heo * 3002f0aa6617STejun Heo * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 3003f0aa6617STejun Heo */ 3004c0c0a293STejun Heo void __init vm_area_register_early(struct vm_struct *vm, size_t align) 3005f0aa6617STejun Heo { 30060eb68437SKefeng Wang unsigned long addr = ALIGN(VMALLOC_START, align); 30070eb68437SKefeng Wang struct vm_struct *cur, **p; 3008f0aa6617STejun Heo 30090eb68437SKefeng Wang BUG_ON(vmap_initialized); 3010c0c0a293STejun Heo 30110eb68437SKefeng Wang for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) { 30120eb68437SKefeng Wang if ((unsigned long)cur->addr - addr >= vm->size) 30130eb68437SKefeng Wang break; 30140eb68437SKefeng Wang addr = ALIGN((unsigned long)cur->addr + cur->size, align); 30150eb68437SKefeng Wang } 30160eb68437SKefeng Wang 30170eb68437SKefeng Wang BUG_ON(addr > VMALLOC_END - vm->size); 3018c0c0a293STejun Heo vm->addr = (void *)addr; 30190eb68437SKefeng Wang vm->next = *p; 30200eb68437SKefeng Wang *p = vm; 30213252b1d8SKefeng Wang kasan_populate_early_vm_area_shadow(vm->addr, vm->size); 3022f0aa6617STejun Heo } 3023f0aa6617STejun Heo 3024e36176beSUladzislau Rezki (Sony) static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, 3025e36176beSUladzislau Rezki (Sony) struct vmap_area *va, unsigned long flags, const void *caller) 3026cf88c790STejun Heo { 3027cf88c790STejun Heo vm->flags = flags; 3028cf88c790STejun Heo vm->addr = (void *)va->va_start; 3029cf88c790STejun Heo vm->size = va->va_end - va->va_start; 3030cf88c790STejun Heo vm->caller = caller; 3031db1aecafSMinchan Kim va->vm = vm; 3032e36176beSUladzislau Rezki (Sony) } 3033e36176beSUladzislau Rezki (Sony) 3034e36176beSUladzislau Rezki (Sony) static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 3035e36176beSUladzislau Rezki (Sony) unsigned long flags, const void *caller) 3036e36176beSUladzislau Rezki (Sony) { 3037d0936029SUladzislau Rezki (Sony) struct vmap_node *vn = addr_to_node(va->va_start); 3038d0936029SUladzislau Rezki (Sony) 3039d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 3040e36176beSUladzislau Rezki (Sony) setup_vmalloc_vm_locked(vm, va, flags, caller); 3041d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 3042f5252e00SMitsuo Hayasaka } 3043cf88c790STejun Heo 304420fc02b4SZhang Yanfei static void clear_vm_uninitialized_flag(struct vm_struct *vm) 3045f5252e00SMitsuo Hayasaka { 3046d4033afdSJoonsoo Kim /* 304720fc02b4SZhang Yanfei * Before removing VM_UNINITIALIZED, 3048d4033afdSJoonsoo Kim * we should make sure that vm has proper values. 3049d4033afdSJoonsoo Kim * Pair with smp_rmb() in show_numa_info(). 3050d4033afdSJoonsoo Kim */ 3051d4033afdSJoonsoo Kim smp_wmb(); 305220fc02b4SZhang Yanfei vm->flags &= ~VM_UNINITIALIZED; 3053cf88c790STejun Heo } 3054cf88c790STejun Heo 3055db64fe02SNick Piggin static struct vm_struct *__get_vm_area_node(unsigned long size, 30567ca3027bSDaniel Axtens unsigned long align, unsigned long shift, unsigned long flags, 30577ca3027bSDaniel Axtens unsigned long start, unsigned long end, int node, 30587ca3027bSDaniel Axtens gfp_t gfp_mask, const void *caller) 3059db64fe02SNick Piggin { 30600006526dSKautuk Consul struct vmap_area *va; 3061db64fe02SNick Piggin struct vm_struct *area; 3062d98c9e83SAndrey Ryabinin unsigned long requested_size = size; 30631da177e4SLinus Torvalds 306452fd24caSGiridhar Pemmasani BUG_ON(in_interrupt()); 30657ca3027bSDaniel Axtens size = ALIGN(size, 1ul << shift); 306631be8309SOGAWA Hirofumi if (unlikely(!size)) 306731be8309SOGAWA Hirofumi return NULL; 30681da177e4SLinus Torvalds 3069252e5c6eSzijun_hu if (flags & VM_IOREMAP) 3070252e5c6eSzijun_hu align = 1ul << clamp_t(int, get_count_order_long(size), 3071252e5c6eSzijun_hu PAGE_SHIFT, IOREMAP_MAX_ORDER); 3072252e5c6eSzijun_hu 3073cf88c790STejun Heo area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 30741da177e4SLinus Torvalds if (unlikely(!area)) 30751da177e4SLinus Torvalds return NULL; 30761da177e4SLinus Torvalds 307771394fe5SAndrey Ryabinin if (!(flags & VM_NO_GUARD)) 30781da177e4SLinus Torvalds size += PAGE_SIZE; 30791da177e4SLinus Torvalds 3080869176a0SBaoquan He va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0); 3081db64fe02SNick Piggin if (IS_ERR(va)) { 3082db64fe02SNick Piggin kfree(area); 3083db64fe02SNick Piggin return NULL; 30841da177e4SLinus Torvalds } 30851da177e4SLinus Torvalds 3086d98c9e83SAndrey Ryabinin setup_vmalloc_vm(area, va, flags, caller); 30873c5c3cfbSDaniel Axtens 308819f1c3acSAndrey Konovalov /* 308919f1c3acSAndrey Konovalov * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a 309019f1c3acSAndrey Konovalov * best-effort approach, as they can be mapped outside of vmalloc code. 309119f1c3acSAndrey Konovalov * For VM_ALLOC mappings, the pages are marked as accessible after 309219f1c3acSAndrey Konovalov * getting mapped in __vmalloc_node_range(). 309323689e91SAndrey Konovalov * With hardware tag-based KASAN, marking is skipped for 309423689e91SAndrey Konovalov * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 309519f1c3acSAndrey Konovalov */ 309619f1c3acSAndrey Konovalov if (!(flags & VM_ALLOC)) 309723689e91SAndrey Konovalov area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, 3098f6e39794SAndrey Konovalov KASAN_VMALLOC_PROT_NORMAL); 30991d96320fSAndrey Konovalov 31001da177e4SLinus Torvalds return area; 31011da177e4SLinus Torvalds } 31021da177e4SLinus Torvalds 3103c2968612SBenjamin Herrenschmidt struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 3104c2968612SBenjamin Herrenschmidt unsigned long start, unsigned long end, 31055e6cafc8SMarek Szyprowski const void *caller) 3106c2968612SBenjamin Herrenschmidt { 31077ca3027bSDaniel Axtens return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end, 31087ca3027bSDaniel Axtens NUMA_NO_NODE, GFP_KERNEL, caller); 3109c2968612SBenjamin Herrenschmidt } 3110c2968612SBenjamin Herrenschmidt 31111da177e4SLinus Torvalds /** 3112183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area 31131da177e4SLinus Torvalds * @size: size of the area 31141da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 31151da177e4SLinus Torvalds * 31161da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area, 31171da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor 31181da177e4SLinus Torvalds * on success or %NULL on failure. 3119a862f68aSMike Rapoport * 3120a862f68aSMike Rapoport * Return: the area descriptor on success or %NULL on failure. 31211da177e4SLinus Torvalds */ 31221da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 31231da177e4SLinus Torvalds { 31247ca3027bSDaniel Axtens return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 31257ca3027bSDaniel Axtens VMALLOC_START, VMALLOC_END, 312600ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, 312700ef2d2fSDavid Rientjes __builtin_return_address(0)); 312823016969SChristoph Lameter } 312923016969SChristoph Lameter 313023016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 31315e6cafc8SMarek Szyprowski const void *caller) 313223016969SChristoph Lameter { 31337ca3027bSDaniel Axtens return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 31347ca3027bSDaniel Axtens VMALLOC_START, VMALLOC_END, 313500ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, caller); 31361da177e4SLinus Torvalds } 31371da177e4SLinus Torvalds 3138e9da6e99SMarek Szyprowski /** 3139e9da6e99SMarek Szyprowski * find_vm_area - find a continuous kernel virtual area 3140e9da6e99SMarek Szyprowski * @addr: base address 3141e9da6e99SMarek Szyprowski * 3142e9da6e99SMarek Szyprowski * Search for the kernel VM area starting at @addr, and return it. 3143e9da6e99SMarek Szyprowski * It is up to the caller to do all required locking to keep the returned 3144e9da6e99SMarek Szyprowski * pointer valid. 3145a862f68aSMike Rapoport * 314674640617SHui Su * Return: the area descriptor on success or %NULL on failure. 3147e9da6e99SMarek Szyprowski */ 3148e9da6e99SMarek Szyprowski struct vm_struct *find_vm_area(const void *addr) 314983342314SNick Piggin { 3150db64fe02SNick Piggin struct vmap_area *va; 315183342314SNick Piggin 3152db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 3153688fcbfcSPengfei Li if (!va) 31547856dfebSAndi Kleen return NULL; 3155688fcbfcSPengfei Li 3156688fcbfcSPengfei Li return va->vm; 31577856dfebSAndi Kleen } 31587856dfebSAndi Kleen 31591da177e4SLinus Torvalds /** 3160183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area 31611da177e4SLinus Torvalds * @addr: base address 31621da177e4SLinus Torvalds * 31631da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it. 31641da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe 31657856dfebSAndi Kleen * on SMP machines, except for its size or flags. 3166a862f68aSMike Rapoport * 316774640617SHui Su * Return: the area descriptor on success or %NULL on failure. 31681da177e4SLinus Torvalds */ 3169b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr) 31701da177e4SLinus Torvalds { 3171db64fe02SNick Piggin struct vmap_area *va; 317275c59ce7SChristoph Hellwig struct vm_struct *vm; 3173db64fe02SNick Piggin 31745803ed29SChristoph Hellwig might_sleep(); 31755803ed29SChristoph Hellwig 317617d3ef43SChristoph Hellwig if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 317717d3ef43SChristoph Hellwig addr)) 3178db64fe02SNick Piggin return NULL; 317917d3ef43SChristoph Hellwig 318075c59ce7SChristoph Hellwig va = find_unlink_vmap_area((unsigned long)addr); 318175c59ce7SChristoph Hellwig if (!va || !va->vm) 318275c59ce7SChristoph Hellwig return NULL; 318375c59ce7SChristoph Hellwig vm = va->vm; 318417d3ef43SChristoph Hellwig 318517d3ef43SChristoph Hellwig debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm)); 318617d3ef43SChristoph Hellwig debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm)); 318775c59ce7SChristoph Hellwig kasan_free_module_shadow(vm); 318817d3ef43SChristoph Hellwig kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm)); 318917d3ef43SChristoph Hellwig 319075c59ce7SChristoph Hellwig free_unmap_vmap_area(va); 319175c59ce7SChristoph Hellwig return vm; 31921da177e4SLinus Torvalds } 31931da177e4SLinus Torvalds 3194868b104dSRick Edgecombe static inline void set_area_direct_map(const struct vm_struct *area, 3195868b104dSRick Edgecombe int (*set_direct_map)(struct page *page)) 3196868b104dSRick Edgecombe { 3197868b104dSRick Edgecombe int i; 3198868b104dSRick Edgecombe 3199121e6f32SNicholas Piggin /* HUGE_VMALLOC passes small pages to set_direct_map */ 3200868b104dSRick Edgecombe for (i = 0; i < area->nr_pages; i++) 3201868b104dSRick Edgecombe if (page_address(area->pages[i])) 3202868b104dSRick Edgecombe set_direct_map(area->pages[i]); 3203868b104dSRick Edgecombe } 3204868b104dSRick Edgecombe 32059e5fa0aeSChristoph Hellwig /* 32069e5fa0aeSChristoph Hellwig * Flush the vm mapping and reset the direct map. 32079e5fa0aeSChristoph Hellwig */ 32089e5fa0aeSChristoph Hellwig static void vm_reset_perms(struct vm_struct *area) 3209868b104dSRick Edgecombe { 3210868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 3211121e6f32SNicholas Piggin unsigned int page_order = vm_area_page_order(area); 321231e67340SRick Edgecombe int flush_dmap = 0; 3213868b104dSRick Edgecombe int i; 3214868b104dSRick Edgecombe 3215868b104dSRick Edgecombe /* 32169e5fa0aeSChristoph Hellwig * Find the start and end range of the direct mappings to make sure that 3217868b104dSRick Edgecombe * the vm_unmap_aliases() flush includes the direct map. 3218868b104dSRick Edgecombe */ 3219121e6f32SNicholas Piggin for (i = 0; i < area->nr_pages; i += 1U << page_order) { 32208e41f872SRick Edgecombe unsigned long addr = (unsigned long)page_address(area->pages[i]); 32219e5fa0aeSChristoph Hellwig 32228e41f872SRick Edgecombe if (addr) { 3223121e6f32SNicholas Piggin unsigned long page_size; 3224121e6f32SNicholas Piggin 3225121e6f32SNicholas Piggin page_size = PAGE_SIZE << page_order; 3226868b104dSRick Edgecombe start = min(addr, start); 3227121e6f32SNicholas Piggin end = max(addr + page_size, end); 322831e67340SRick Edgecombe flush_dmap = 1; 3229868b104dSRick Edgecombe } 3230868b104dSRick Edgecombe } 3231868b104dSRick Edgecombe 3232868b104dSRick Edgecombe /* 3233868b104dSRick Edgecombe * Set direct map to something invalid so that it won't be cached if 3234868b104dSRick Edgecombe * there are any accesses after the TLB flush, then flush the TLB and 3235868b104dSRick Edgecombe * reset the direct map permissions to the default. 3236868b104dSRick Edgecombe */ 3237868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_invalid_noflush); 323831e67340SRick Edgecombe _vm_unmap_aliases(start, end, flush_dmap); 3239868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_default_noflush); 3240868b104dSRick Edgecombe } 3241868b104dSRick Edgecombe 3242208162f4SChristoph Hellwig static void delayed_vfree_work(struct work_struct *w) 32431da177e4SLinus Torvalds { 3244208162f4SChristoph Hellwig struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 3245208162f4SChristoph Hellwig struct llist_node *t, *llnode; 32461da177e4SLinus Torvalds 3247208162f4SChristoph Hellwig llist_for_each_safe(llnode, t, llist_del_all(&p->list)) 32485d3d31d6SChristoph Hellwig vfree(llnode); 3249bf22e37aSAndrey Ryabinin } 3250bf22e37aSAndrey Ryabinin 3251bf22e37aSAndrey Ryabinin /** 3252bf22e37aSAndrey Ryabinin * vfree_atomic - release memory allocated by vmalloc() 3253bf22e37aSAndrey Ryabinin * @addr: memory base address 3254bf22e37aSAndrey Ryabinin * 3255bf22e37aSAndrey Ryabinin * This one is just like vfree() but can be called in any atomic context 3256bf22e37aSAndrey Ryabinin * except NMIs. 3257bf22e37aSAndrey Ryabinin */ 3258bf22e37aSAndrey Ryabinin void vfree_atomic(const void *addr) 3259bf22e37aSAndrey Ryabinin { 326001e2e839SChristoph Hellwig struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 3261bf22e37aSAndrey Ryabinin 326201e2e839SChristoph Hellwig BUG_ON(in_nmi()); 3263bf22e37aSAndrey Ryabinin kmemleak_free(addr); 3264bf22e37aSAndrey Ryabinin 326501e2e839SChristoph Hellwig /* 326601e2e839SChristoph Hellwig * Use raw_cpu_ptr() because this can be called from preemptible 326701e2e839SChristoph Hellwig * context. Preemption is absolutely fine here, because the llist_add() 326801e2e839SChristoph Hellwig * implementation is lockless, so it works even if we are adding to 326901e2e839SChristoph Hellwig * another cpu's list. schedule_work() should be fine with this too. 327001e2e839SChristoph Hellwig */ 327101e2e839SChristoph Hellwig if (addr && llist_add((struct llist_node *)addr, &p->list)) 327201e2e839SChristoph Hellwig schedule_work(&p->wq); 3273c67dc624SRoman Penyaev } 3274c67dc624SRoman Penyaev 32751da177e4SLinus Torvalds /** 3276fa307474SMatthew Wilcox (Oracle) * vfree - Release memory allocated by vmalloc() 3277fa307474SMatthew Wilcox (Oracle) * @addr: Memory base address 32781da177e4SLinus Torvalds * 3279fa307474SMatthew Wilcox (Oracle) * Free the virtually continuous memory area starting at @addr, as obtained 3280fa307474SMatthew Wilcox (Oracle) * from one of the vmalloc() family of APIs. This will usually also free the 3281fa307474SMatthew Wilcox (Oracle) * physical memory underlying the virtual allocation, but that memory is 3282fa307474SMatthew Wilcox (Oracle) * reference counted, so it will not be freed until the last user goes away. 32831da177e4SLinus Torvalds * 3284fa307474SMatthew Wilcox (Oracle) * If @addr is NULL, no operation is performed. 328532fcfd40SAl Viro * 3286fa307474SMatthew Wilcox (Oracle) * Context: 32873ca4ea3aSAndrey Ryabinin * May sleep if called *not* from interrupt context. 3288fa307474SMatthew Wilcox (Oracle) * Must not be called in NMI context (strictly speaking, it could be 3289fa307474SMatthew Wilcox (Oracle) * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 3290f0953a1bSIngo Molnar * conventions for vfree() arch-dependent would be a really bad idea). 32911da177e4SLinus Torvalds */ 3292b3bdda02SChristoph Lameter void vfree(const void *addr) 32931da177e4SLinus Torvalds { 329479311c1fSChristoph Hellwig struct vm_struct *vm; 329579311c1fSChristoph Hellwig int i; 329679311c1fSChristoph Hellwig 329701e2e839SChristoph Hellwig if (unlikely(in_interrupt())) { 329801e2e839SChristoph Hellwig vfree_atomic(addr); 329932fcfd40SAl Viro return; 330001e2e839SChristoph Hellwig } 330101e2e839SChristoph Hellwig 33021da177e4SLinus Torvalds BUG_ON(in_nmi()); 330389219d37SCatalin Marinas kmemleak_free(addr); 330401e2e839SChristoph Hellwig might_sleep(); 330532fcfd40SAl Viro 3306bf22e37aSAndrey Ryabinin if (!addr) 3307bf22e37aSAndrey Ryabinin return; 3308c67dc624SRoman Penyaev 330979311c1fSChristoph Hellwig vm = remove_vm_area(addr); 331079311c1fSChristoph Hellwig if (unlikely(!vm)) { 331179311c1fSChristoph Hellwig WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 331279311c1fSChristoph Hellwig addr); 331379311c1fSChristoph Hellwig return; 331479311c1fSChristoph Hellwig } 331579311c1fSChristoph Hellwig 33169e5fa0aeSChristoph Hellwig if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS)) 33179e5fa0aeSChristoph Hellwig vm_reset_perms(vm); 331879311c1fSChristoph Hellwig for (i = 0; i < vm->nr_pages; i++) { 331979311c1fSChristoph Hellwig struct page *page = vm->pages[i]; 332079311c1fSChristoph Hellwig 332179311c1fSChristoph Hellwig BUG_ON(!page); 332279311c1fSChristoph Hellwig mod_memcg_page_state(page, MEMCG_VMALLOC, -1); 332379311c1fSChristoph Hellwig /* 332479311c1fSChristoph Hellwig * High-order allocs for huge vmallocs are split, so 332579311c1fSChristoph Hellwig * can be freed as an array of order-0 allocations 332679311c1fSChristoph Hellwig */ 3327dcc1be11SLorenzo Stoakes __free_page(page); 332879311c1fSChristoph Hellwig cond_resched(); 332979311c1fSChristoph Hellwig } 333079311c1fSChristoph Hellwig atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages); 333179311c1fSChristoph Hellwig kvfree(vm->pages); 333279311c1fSChristoph Hellwig kfree(vm); 33331da177e4SLinus Torvalds } 33341da177e4SLinus Torvalds EXPORT_SYMBOL(vfree); 33351da177e4SLinus Torvalds 33361da177e4SLinus Torvalds /** 33371da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap() 33381da177e4SLinus Torvalds * @addr: memory base address 33391da177e4SLinus Torvalds * 33401da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr, 33411da177e4SLinus Torvalds * which was created from the page array passed to vmap(). 33421da177e4SLinus Torvalds * 334380e93effSPekka Enberg * Must not be called in interrupt context. 33441da177e4SLinus Torvalds */ 3345b3bdda02SChristoph Lameter void vunmap(const void *addr) 33461da177e4SLinus Torvalds { 334779311c1fSChristoph Hellwig struct vm_struct *vm; 334879311c1fSChristoph Hellwig 33491da177e4SLinus Torvalds BUG_ON(in_interrupt()); 335034754b69SPeter Zijlstra might_sleep(); 335179311c1fSChristoph Hellwig 335279311c1fSChristoph Hellwig if (!addr) 335379311c1fSChristoph Hellwig return; 335479311c1fSChristoph Hellwig vm = remove_vm_area(addr); 335579311c1fSChristoph Hellwig if (unlikely(!vm)) { 335679311c1fSChristoph Hellwig WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n", 335779311c1fSChristoph Hellwig addr); 335879311c1fSChristoph Hellwig return; 335979311c1fSChristoph Hellwig } 336079311c1fSChristoph Hellwig kfree(vm); 33611da177e4SLinus Torvalds } 33621da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap); 33631da177e4SLinus Torvalds 33641da177e4SLinus Torvalds /** 33651da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space 33661da177e4SLinus Torvalds * @pages: array of page pointers 33671da177e4SLinus Torvalds * @count: number of pages to map 33681da177e4SLinus Torvalds * @flags: vm_area->flags 33691da177e4SLinus Torvalds * @prot: page protection for the mapping 33701da177e4SLinus Torvalds * 3371b944afc9SChristoph Hellwig * Maps @count pages from @pages into contiguous kernel virtual space. 3372b944afc9SChristoph Hellwig * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself 3373b944afc9SChristoph Hellwig * (which must be kmalloc or vmalloc memory) and one reference per pages in it 3374b944afc9SChristoph Hellwig * are transferred from the caller to vmap(), and will be freed / dropped when 3375b944afc9SChristoph Hellwig * vfree() is called on the return value. 3376a862f68aSMike Rapoport * 3377a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 33781da177e4SLinus Torvalds */ 33791da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count, 33801da177e4SLinus Torvalds unsigned long flags, pgprot_t prot) 33811da177e4SLinus Torvalds { 33821da177e4SLinus Torvalds struct vm_struct *area; 3383b67177ecSNicholas Piggin unsigned long addr; 338465ee03c4SGuillermo Julián Moreno unsigned long size; /* In bytes */ 33851da177e4SLinus Torvalds 338634754b69SPeter Zijlstra might_sleep(); 338734754b69SPeter Zijlstra 338837f3605eSChristoph Hellwig if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS)) 338937f3605eSChristoph Hellwig return NULL; 339037f3605eSChristoph Hellwig 3391bd1a8fb2SPeter Zijlstra /* 3392bd1a8fb2SPeter Zijlstra * Your top guard is someone else's bottom guard. Not having a top 3393bd1a8fb2SPeter Zijlstra * guard compromises someone else's mappings too. 3394bd1a8fb2SPeter Zijlstra */ 3395bd1a8fb2SPeter Zijlstra if (WARN_ON_ONCE(flags & VM_NO_GUARD)) 3396bd1a8fb2SPeter Zijlstra flags &= ~VM_NO_GUARD; 3397bd1a8fb2SPeter Zijlstra 3398ca79b0c2SArun KS if (count > totalram_pages()) 33991da177e4SLinus Torvalds return NULL; 34001da177e4SLinus Torvalds 340165ee03c4SGuillermo Julián Moreno size = (unsigned long)count << PAGE_SHIFT; 340265ee03c4SGuillermo Julián Moreno area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 34031da177e4SLinus Torvalds if (!area) 34041da177e4SLinus Torvalds return NULL; 340523016969SChristoph Lameter 3406b67177ecSNicholas Piggin addr = (unsigned long)area->addr; 3407b67177ecSNicholas Piggin if (vmap_pages_range(addr, addr + size, pgprot_nx(prot), 3408b67177ecSNicholas Piggin pages, PAGE_SHIFT) < 0) { 34091da177e4SLinus Torvalds vunmap(area->addr); 34101da177e4SLinus Torvalds return NULL; 34111da177e4SLinus Torvalds } 34121da177e4SLinus Torvalds 3413c22ee528SMiaohe Lin if (flags & VM_MAP_PUT_PAGES) { 3414b944afc9SChristoph Hellwig area->pages = pages; 3415c22ee528SMiaohe Lin area->nr_pages = count; 3416c22ee528SMiaohe Lin } 34171da177e4SLinus Torvalds return area->addr; 34181da177e4SLinus Torvalds } 34191da177e4SLinus Torvalds EXPORT_SYMBOL(vmap); 34201da177e4SLinus Torvalds 34213e9a9e25SChristoph Hellwig #ifdef CONFIG_VMAP_PFN 34223e9a9e25SChristoph Hellwig struct vmap_pfn_data { 34233e9a9e25SChristoph Hellwig unsigned long *pfns; 34243e9a9e25SChristoph Hellwig pgprot_t prot; 34253e9a9e25SChristoph Hellwig unsigned int idx; 34263e9a9e25SChristoph Hellwig }; 34273e9a9e25SChristoph Hellwig 34283e9a9e25SChristoph Hellwig static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) 34293e9a9e25SChristoph Hellwig { 34303e9a9e25SChristoph Hellwig struct vmap_pfn_data *data = private; 3431b3f78e74SRyan Roberts unsigned long pfn = data->pfns[data->idx]; 3432b3f78e74SRyan Roberts pte_t ptent; 34333e9a9e25SChristoph Hellwig 3434b3f78e74SRyan Roberts if (WARN_ON_ONCE(pfn_valid(pfn))) 34353e9a9e25SChristoph Hellwig return -EINVAL; 3436b3f78e74SRyan Roberts 3437b3f78e74SRyan Roberts ptent = pte_mkspecial(pfn_pte(pfn, data->prot)); 3438b3f78e74SRyan Roberts set_pte_at(&init_mm, addr, pte, ptent); 3439b3f78e74SRyan Roberts 3440b3f78e74SRyan Roberts data->idx++; 34413e9a9e25SChristoph Hellwig return 0; 34423e9a9e25SChristoph Hellwig } 34433e9a9e25SChristoph Hellwig 34443e9a9e25SChristoph Hellwig /** 34453e9a9e25SChristoph Hellwig * vmap_pfn - map an array of PFNs into virtually contiguous space 34463e9a9e25SChristoph Hellwig * @pfns: array of PFNs 34473e9a9e25SChristoph Hellwig * @count: number of pages to map 34483e9a9e25SChristoph Hellwig * @prot: page protection for the mapping 34493e9a9e25SChristoph Hellwig * 34503e9a9e25SChristoph Hellwig * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns 34513e9a9e25SChristoph Hellwig * the start address of the mapping. 34523e9a9e25SChristoph Hellwig */ 34533e9a9e25SChristoph Hellwig void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) 34543e9a9e25SChristoph Hellwig { 34553e9a9e25SChristoph Hellwig struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) }; 34563e9a9e25SChristoph Hellwig struct vm_struct *area; 34573e9a9e25SChristoph Hellwig 34583e9a9e25SChristoph Hellwig area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, 34593e9a9e25SChristoph Hellwig __builtin_return_address(0)); 34603e9a9e25SChristoph Hellwig if (!area) 34613e9a9e25SChristoph Hellwig return NULL; 34623e9a9e25SChristoph Hellwig if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 34633e9a9e25SChristoph Hellwig count * PAGE_SIZE, vmap_pfn_apply, &data)) { 34643e9a9e25SChristoph Hellwig free_vm_area(area); 34653e9a9e25SChristoph Hellwig return NULL; 34663e9a9e25SChristoph Hellwig } 3467a50420c7SAlexandre Ghiti 3468a50420c7SAlexandre Ghiti flush_cache_vmap((unsigned long)area->addr, 3469a50420c7SAlexandre Ghiti (unsigned long)area->addr + count * PAGE_SIZE); 3470a50420c7SAlexandre Ghiti 34713e9a9e25SChristoph Hellwig return area->addr; 34723e9a9e25SChristoph Hellwig } 34733e9a9e25SChristoph Hellwig EXPORT_SYMBOL_GPL(vmap_pfn); 34743e9a9e25SChristoph Hellwig #endif /* CONFIG_VMAP_PFN */ 34753e9a9e25SChristoph Hellwig 347612b9f873SUladzislau Rezki static inline unsigned int 347712b9f873SUladzislau Rezki vm_area_alloc_pages(gfp_t gfp, int nid, 3478343ab817SUladzislau Rezki (Sony) unsigned int order, unsigned int nr_pages, struct page **pages) 347912b9f873SUladzislau Rezki { 348012b9f873SUladzislau Rezki unsigned int nr_allocated = 0; 3481e9c3cda4SMichal Hocko gfp_t alloc_gfp = gfp; 3482e9c3cda4SMichal Hocko bool nofail = false; 3483ffb29b1cSChen Wandun struct page *page; 3484ffb29b1cSChen Wandun int i; 348512b9f873SUladzislau Rezki 348612b9f873SUladzislau Rezki /* 348712b9f873SUladzislau Rezki * For order-0 pages we make use of bulk allocator, if 348812b9f873SUladzislau Rezki * the page array is partly or not at all populated due 348912b9f873SUladzislau Rezki * to fails, fallback to a single page allocator that is 349012b9f873SUladzislau Rezki * more permissive. 349112b9f873SUladzislau Rezki */ 3492c00b6b96SChen Wandun if (!order) { 3493e9c3cda4SMichal Hocko /* bulk allocator doesn't support nofail req. officially */ 34949376130cSMichal Hocko gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL; 34959376130cSMichal Hocko 3496343ab817SUladzislau Rezki (Sony) while (nr_allocated < nr_pages) { 3497343ab817SUladzislau Rezki (Sony) unsigned int nr, nr_pages_request; 3498343ab817SUladzislau Rezki (Sony) 3499343ab817SUladzislau Rezki (Sony) /* 3500343ab817SUladzislau Rezki (Sony) * A maximum allowed request is hard-coded and is 100 3501343ab817SUladzislau Rezki (Sony) * pages per call. That is done in order to prevent a 3502343ab817SUladzislau Rezki (Sony) * long preemption off scenario in the bulk-allocator 3503343ab817SUladzislau Rezki (Sony) * so the range is [1:100]. 3504343ab817SUladzislau Rezki (Sony) */ 3505343ab817SUladzislau Rezki (Sony) nr_pages_request = min(100U, nr_pages - nr_allocated); 3506343ab817SUladzislau Rezki (Sony) 3507c00b6b96SChen Wandun /* memory allocation should consider mempolicy, we can't 3508c00b6b96SChen Wandun * wrongly use nearest node when nid == NUMA_NO_NODE, 3509c00b6b96SChen Wandun * otherwise memory may be allocated in only one node, 351098af39d5SYixuan Cao * but mempolicy wants to alloc memory by interleaving. 3511c00b6b96SChen Wandun */ 3512c00b6b96SChen Wandun if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE) 35139376130cSMichal Hocko nr = alloc_pages_bulk_array_mempolicy(bulk_gfp, 3514c00b6b96SChen Wandun nr_pages_request, 3515c00b6b96SChen Wandun pages + nr_allocated); 3516c00b6b96SChen Wandun 3517c00b6b96SChen Wandun else 35189376130cSMichal Hocko nr = alloc_pages_bulk_array_node(bulk_gfp, nid, 3519c00b6b96SChen Wandun nr_pages_request, 3520c00b6b96SChen Wandun pages + nr_allocated); 3521343ab817SUladzislau Rezki (Sony) 3522343ab817SUladzislau Rezki (Sony) nr_allocated += nr; 3523343ab817SUladzislau Rezki (Sony) cond_resched(); 3524343ab817SUladzislau Rezki (Sony) 3525343ab817SUladzislau Rezki (Sony) /* 3526343ab817SUladzislau Rezki (Sony) * If zero or pages were obtained partly, 3527343ab817SUladzislau Rezki (Sony) * fallback to a single page allocator. 3528343ab817SUladzislau Rezki (Sony) */ 3529343ab817SUladzislau Rezki (Sony) if (nr != nr_pages_request) 3530343ab817SUladzislau Rezki (Sony) break; 3531343ab817SUladzislau Rezki (Sony) } 3532e9c3cda4SMichal Hocko } else if (gfp & __GFP_NOFAIL) { 3533e9c3cda4SMichal Hocko /* 3534e9c3cda4SMichal Hocko * Higher order nofail allocations are really expensive and 3535e9c3cda4SMichal Hocko * potentially dangerous (pre-mature OOM, disruptive reclaim 3536e9c3cda4SMichal Hocko * and compaction etc. 3537e9c3cda4SMichal Hocko */ 3538e9c3cda4SMichal Hocko alloc_gfp &= ~__GFP_NOFAIL; 3539e9c3cda4SMichal Hocko nofail = true; 35403b8000aeSNicholas Piggin } 354112b9f873SUladzislau Rezki 354212b9f873SUladzislau Rezki /* High-order pages or fallback path if "bulk" fails. */ 3543ffb29b1cSChen Wandun while (nr_allocated < nr_pages) { 3544dd544141SVasily Averin if (fatal_signal_pending(current)) 3545dd544141SVasily Averin break; 3546dd544141SVasily Averin 3547ffb29b1cSChen Wandun if (nid == NUMA_NO_NODE) 3548e9c3cda4SMichal Hocko page = alloc_pages(alloc_gfp, order); 3549ffb29b1cSChen Wandun else 3550e9c3cda4SMichal Hocko page = alloc_pages_node(nid, alloc_gfp, order); 3551e9c3cda4SMichal Hocko if (unlikely(!page)) { 3552e9c3cda4SMichal Hocko if (!nofail) 355312b9f873SUladzislau Rezki break; 3554e9c3cda4SMichal Hocko 3555e9c3cda4SMichal Hocko /* fall back to the zero order allocations */ 3556e9c3cda4SMichal Hocko alloc_gfp |= __GFP_NOFAIL; 3557e9c3cda4SMichal Hocko order = 0; 3558e9c3cda4SMichal Hocko continue; 3559e9c3cda4SMichal Hocko } 3560e9c3cda4SMichal Hocko 35613b8000aeSNicholas Piggin /* 35623b8000aeSNicholas Piggin * Higher order allocations must be able to be treated as 35633b8000aeSNicholas Piggin * indepdenent small pages by callers (as they can with 35643b8000aeSNicholas Piggin * small-page vmallocs). Some drivers do their own refcounting 35653b8000aeSNicholas Piggin * on vmalloc_to_page() pages, some use page->mapping, 35663b8000aeSNicholas Piggin * page->lru, etc. 35673b8000aeSNicholas Piggin */ 35683b8000aeSNicholas Piggin if (order) 35693b8000aeSNicholas Piggin split_page(page, order); 357012b9f873SUladzislau Rezki 357112b9f873SUladzislau Rezki /* 357212b9f873SUladzislau Rezki * Careful, we allocate and map page-order pages, but 357312b9f873SUladzislau Rezki * tracking is done per PAGE_SIZE page so as to keep the 357412b9f873SUladzislau Rezki * vm_struct APIs independent of the physical/mapped size. 357512b9f873SUladzislau Rezki */ 357612b9f873SUladzislau Rezki for (i = 0; i < (1U << order); i++) 357712b9f873SUladzislau Rezki pages[nr_allocated + i] = page + i; 357812b9f873SUladzislau Rezki 357912b9f873SUladzislau Rezki cond_resched(); 358012b9f873SUladzislau Rezki nr_allocated += 1U << order; 358112b9f873SUladzislau Rezki } 358212b9f873SUladzislau Rezki 358312b9f873SUladzislau Rezki return nr_allocated; 358412b9f873SUladzislau Rezki } 358512b9f873SUladzislau Rezki 3586e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 3587121e6f32SNicholas Piggin pgprot_t prot, unsigned int page_shift, 3588121e6f32SNicholas Piggin int node) 35891da177e4SLinus Torvalds { 3590930f036bSDavid Rientjes const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 35919376130cSMichal Hocko bool nofail = gfp_mask & __GFP_NOFAIL; 3592121e6f32SNicholas Piggin unsigned long addr = (unsigned long)area->addr; 3593121e6f32SNicholas Piggin unsigned long size = get_vm_area_size(area); 359434fe6537SAndrew Morton unsigned long array_size; 3595121e6f32SNicholas Piggin unsigned int nr_small_pages = size >> PAGE_SHIFT; 3596121e6f32SNicholas Piggin unsigned int page_order; 3597451769ebSMichal Hocko unsigned int flags; 3598451769ebSMichal Hocko int ret; 35991da177e4SLinus Torvalds 3600121e6f32SNicholas Piggin array_size = (unsigned long)nr_small_pages * sizeof(struct page *); 360180b1d8fdSLorenzo Stoakes 3602f255935bSChristoph Hellwig if (!(gfp_mask & (GFP_DMA | GFP_DMA32))) 3603f255935bSChristoph Hellwig gfp_mask |= __GFP_HIGHMEM; 36041da177e4SLinus Torvalds 36051da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */ 36068757d5faSJan Kiszka if (array_size > PAGE_SIZE) { 36075c1f4e69SUladzislau Rezki (Sony) area->pages = __vmalloc_node(array_size, 1, nested_gfp, node, 3608f255935bSChristoph Hellwig area->caller); 3609286e1ea3SAndrew Morton } else { 36105c1f4e69SUladzislau Rezki (Sony) area->pages = kmalloc_node(array_size, nested_gfp, node); 3611286e1ea3SAndrew Morton } 36127ea36242SAustin Kim 36135c1f4e69SUladzislau Rezki (Sony) if (!area->pages) { 3614c3d77172SUladzislau Rezki (Sony) warn_alloc(gfp_mask, NULL, 3615f4bdfeafSUladzislau Rezki (Sony) "vmalloc error: size %lu, failed to allocated page array size %lu", 3616d70bec8cSNicholas Piggin nr_small_pages * PAGE_SIZE, array_size); 3617cd61413bSUladzislau Rezki (Sony) free_vm_area(area); 36181da177e4SLinus Torvalds return NULL; 36191da177e4SLinus Torvalds } 36201da177e4SLinus Torvalds 3621121e6f32SNicholas Piggin set_vm_area_page_order(area, page_shift - PAGE_SHIFT); 3622121e6f32SNicholas Piggin page_order = vm_area_page_order(area); 3623121e6f32SNicholas Piggin 3624c3d77172SUladzislau Rezki (Sony) area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN, 3625c3d77172SUladzislau Rezki (Sony) node, page_order, nr_small_pages, area->pages); 36265c1f4e69SUladzislau Rezki (Sony) 362797105f0aSRoman Gushchin atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 36284e5aa1f4SShakeel Butt if (gfp_mask & __GFP_ACCOUNT) { 36293b8000aeSNicholas Piggin int i; 36304e5aa1f4SShakeel Butt 36313b8000aeSNicholas Piggin for (i = 0; i < area->nr_pages; i++) 36323b8000aeSNicholas Piggin mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1); 36334e5aa1f4SShakeel Butt } 36345c1f4e69SUladzislau Rezki (Sony) 36355c1f4e69SUladzislau Rezki (Sony) /* 36365c1f4e69SUladzislau Rezki (Sony) * If not enough pages were obtained to accomplish an 3637f41f036bSChristoph Hellwig * allocation request, free them via vfree() if any. 36385c1f4e69SUladzislau Rezki (Sony) */ 36395c1f4e69SUladzislau Rezki (Sony) if (area->nr_pages != nr_small_pages) { 364095a301eeSLorenzo Stoakes /* 364195a301eeSLorenzo Stoakes * vm_area_alloc_pages() can fail due to insufficient memory but 364295a301eeSLorenzo Stoakes * also:- 364395a301eeSLorenzo Stoakes * 364495a301eeSLorenzo Stoakes * - a pending fatal signal 364595a301eeSLorenzo Stoakes * - insufficient huge page-order pages 364695a301eeSLorenzo Stoakes * 364795a301eeSLorenzo Stoakes * Since we always retry allocations at order-0 in the huge page 364895a301eeSLorenzo Stoakes * case a warning for either is spurious. 364995a301eeSLorenzo Stoakes */ 365095a301eeSLorenzo Stoakes if (!fatal_signal_pending(current) && page_order == 0) 3651c3d77172SUladzislau Rezki (Sony) warn_alloc(gfp_mask, NULL, 365295a301eeSLorenzo Stoakes "vmalloc error: size %lu, failed to allocate pages", 365395a301eeSLorenzo Stoakes area->nr_pages * PAGE_SIZE); 36541da177e4SLinus Torvalds goto fail; 36551da177e4SLinus Torvalds } 3656121e6f32SNicholas Piggin 3657451769ebSMichal Hocko /* 3658451769ebSMichal Hocko * page tables allocations ignore external gfp mask, enforce it 3659451769ebSMichal Hocko * by the scope API 3660451769ebSMichal Hocko */ 3661451769ebSMichal Hocko if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 3662451769ebSMichal Hocko flags = memalloc_nofs_save(); 3663451769ebSMichal Hocko else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 3664451769ebSMichal Hocko flags = memalloc_noio_save(); 3665451769ebSMichal Hocko 36669376130cSMichal Hocko do { 3667451769ebSMichal Hocko ret = vmap_pages_range(addr, addr + size, prot, area->pages, 3668451769ebSMichal Hocko page_shift); 36699376130cSMichal Hocko if (nofail && (ret < 0)) 36709376130cSMichal Hocko schedule_timeout_uninterruptible(1); 36719376130cSMichal Hocko } while (nofail && (ret < 0)); 3672451769ebSMichal Hocko 3673451769ebSMichal Hocko if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 3674451769ebSMichal Hocko memalloc_nofs_restore(flags); 3675451769ebSMichal Hocko else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 3676451769ebSMichal Hocko memalloc_noio_restore(flags); 3677451769ebSMichal Hocko 3678451769ebSMichal Hocko if (ret < 0) { 3679c3d77172SUladzislau Rezki (Sony) warn_alloc(gfp_mask, NULL, 3680f4bdfeafSUladzislau Rezki (Sony) "vmalloc error: size %lu, failed to map pages", 3681d70bec8cSNicholas Piggin area->nr_pages * PAGE_SIZE); 36821da177e4SLinus Torvalds goto fail; 3683d70bec8cSNicholas Piggin } 3684ed1f324cSChristoph Hellwig 36851da177e4SLinus Torvalds return area->addr; 36861da177e4SLinus Torvalds 36871da177e4SLinus Torvalds fail: 3688f41f036bSChristoph Hellwig vfree(area->addr); 36891da177e4SLinus Torvalds return NULL; 36901da177e4SLinus Torvalds } 36911da177e4SLinus Torvalds 3692d0a21265SDavid Rientjes /** 3693d0a21265SDavid Rientjes * __vmalloc_node_range - allocate virtually contiguous memory 3694d0a21265SDavid Rientjes * @size: allocation size 3695d0a21265SDavid Rientjes * @align: desired alignment 3696d0a21265SDavid Rientjes * @start: vm area range start 3697d0a21265SDavid Rientjes * @end: vm area range end 3698d0a21265SDavid Rientjes * @gfp_mask: flags for the page level allocator 3699d0a21265SDavid Rientjes * @prot: protection mask for the allocated pages 3700cb9e3c29SAndrey Ryabinin * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 370100ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 3702d0a21265SDavid Rientjes * @caller: caller's return address 3703d0a21265SDavid Rientjes * 3704d0a21265SDavid Rientjes * Allocate enough pages to cover @size from the page level 3705b7d90e7aSMichal Hocko * allocator with @gfp_mask flags. Please note that the full set of gfp 370630d3f011SMichal Hocko * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all 370730d3f011SMichal Hocko * supported. 370830d3f011SMichal Hocko * Zone modifiers are not supported. From the reclaim modifiers 370930d3f011SMichal Hocko * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported) 371030d3f011SMichal Hocko * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and 371130d3f011SMichal Hocko * __GFP_RETRY_MAYFAIL are not supported). 371230d3f011SMichal Hocko * 371330d3f011SMichal Hocko * __GFP_NOWARN can be used to suppress failures messages. 3714b7d90e7aSMichal Hocko * 3715b7d90e7aSMichal Hocko * Map them into contiguous kernel virtual space, using a pagetable 3716b7d90e7aSMichal Hocko * protection of @prot. 3717a862f68aSMike Rapoport * 3718a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 3719d0a21265SDavid Rientjes */ 3720d0a21265SDavid Rientjes void *__vmalloc_node_range(unsigned long size, unsigned long align, 3721d0a21265SDavid Rientjes unsigned long start, unsigned long end, gfp_t gfp_mask, 3722cb9e3c29SAndrey Ryabinin pgprot_t prot, unsigned long vm_flags, int node, 3723cb9e3c29SAndrey Ryabinin const void *caller) 3724930fc45aSChristoph Lameter { 3725d0a21265SDavid Rientjes struct vm_struct *area; 372619f1c3acSAndrey Konovalov void *ret; 3727f6e39794SAndrey Konovalov kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE; 3728d0a21265SDavid Rientjes unsigned long real_size = size; 3729121e6f32SNicholas Piggin unsigned long real_align = align; 3730121e6f32SNicholas Piggin unsigned int shift = PAGE_SHIFT; 3731d0a21265SDavid Rientjes 3732d70bec8cSNicholas Piggin if (WARN_ON_ONCE(!size)) 3733d70bec8cSNicholas Piggin return NULL; 3734d70bec8cSNicholas Piggin 3735d70bec8cSNicholas Piggin if ((size >> PAGE_SHIFT) > totalram_pages()) { 3736d70bec8cSNicholas Piggin warn_alloc(gfp_mask, NULL, 3737f4bdfeafSUladzislau Rezki (Sony) "vmalloc error: size %lu, exceeds total pages", 3738f4bdfeafSUladzislau Rezki (Sony) real_size); 3739d70bec8cSNicholas Piggin return NULL; 3740121e6f32SNicholas Piggin } 3741d0a21265SDavid Rientjes 3742559089e0SSong Liu if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) { 3743121e6f32SNicholas Piggin unsigned long size_per_node; 3744121e6f32SNicholas Piggin 3745121e6f32SNicholas Piggin /* 3746121e6f32SNicholas Piggin * Try huge pages. Only try for PAGE_KERNEL allocations, 3747121e6f32SNicholas Piggin * others like modules don't yet expect huge pages in 3748121e6f32SNicholas Piggin * their allocations due to apply_to_page_range not 3749121e6f32SNicholas Piggin * supporting them. 3750121e6f32SNicholas Piggin */ 3751121e6f32SNicholas Piggin 3752121e6f32SNicholas Piggin size_per_node = size; 3753121e6f32SNicholas Piggin if (node == NUMA_NO_NODE) 3754121e6f32SNicholas Piggin size_per_node /= num_online_nodes(); 37553382bbeeSChristophe Leroy if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE) 3756121e6f32SNicholas Piggin shift = PMD_SHIFT; 37573382bbeeSChristophe Leroy else 37583382bbeeSChristophe Leroy shift = arch_vmap_pte_supported_shift(size_per_node); 37593382bbeeSChristophe Leroy 3760121e6f32SNicholas Piggin align = max(real_align, 1UL << shift); 3761121e6f32SNicholas Piggin size = ALIGN(real_size, 1UL << shift); 3762121e6f32SNicholas Piggin } 3763121e6f32SNicholas Piggin 3764121e6f32SNicholas Piggin again: 37657ca3027bSDaniel Axtens area = __get_vm_area_node(real_size, align, shift, VM_ALLOC | 37667ca3027bSDaniel Axtens VM_UNINITIALIZED | vm_flags, start, end, node, 37677ca3027bSDaniel Axtens gfp_mask, caller); 3768d70bec8cSNicholas Piggin if (!area) { 37699376130cSMichal Hocko bool nofail = gfp_mask & __GFP_NOFAIL; 3770d70bec8cSNicholas Piggin warn_alloc(gfp_mask, NULL, 37719376130cSMichal Hocko "vmalloc error: size %lu, vm_struct allocation failed%s", 37729376130cSMichal Hocko real_size, (nofail) ? ". Retrying." : ""); 37739376130cSMichal Hocko if (nofail) { 37749376130cSMichal Hocko schedule_timeout_uninterruptible(1); 37759376130cSMichal Hocko goto again; 37769376130cSMichal Hocko } 3777de7d2b56SJoe Perches goto fail; 3778d70bec8cSNicholas Piggin } 3779d0a21265SDavid Rientjes 3780f6e39794SAndrey Konovalov /* 3781f6e39794SAndrey Konovalov * Prepare arguments for __vmalloc_area_node() and 3782f6e39794SAndrey Konovalov * kasan_unpoison_vmalloc(). 3783f6e39794SAndrey Konovalov */ 3784f6e39794SAndrey Konovalov if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) { 3785f6e39794SAndrey Konovalov if (kasan_hw_tags_enabled()) { 378601d92c7fSAndrey Konovalov /* 378701d92c7fSAndrey Konovalov * Modify protection bits to allow tagging. 3788f6e39794SAndrey Konovalov * This must be done before mapping. 378901d92c7fSAndrey Konovalov */ 379001d92c7fSAndrey Konovalov prot = arch_vmap_pgprot_tagged(prot); 379101d92c7fSAndrey Konovalov 379223689e91SAndrey Konovalov /* 3793f6e39794SAndrey Konovalov * Skip page_alloc poisoning and zeroing for physical 3794f6e39794SAndrey Konovalov * pages backing VM_ALLOC mapping. Memory is instead 3795f6e39794SAndrey Konovalov * poisoned and zeroed by kasan_unpoison_vmalloc(). 379623689e91SAndrey Konovalov */ 37970a54864fSPeter Collingbourne gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO; 379823689e91SAndrey Konovalov } 379923689e91SAndrey Konovalov 3800f6e39794SAndrey Konovalov /* Take note that the mapping is PAGE_KERNEL. */ 3801f6e39794SAndrey Konovalov kasan_flags |= KASAN_VMALLOC_PROT_NORMAL; 3802f6e39794SAndrey Konovalov } 3803f6e39794SAndrey Konovalov 380401d92c7fSAndrey Konovalov /* Allocate physical pages and map them into vmalloc space. */ 380519f1c3acSAndrey Konovalov ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); 380619f1c3acSAndrey Konovalov if (!ret) 3807121e6f32SNicholas Piggin goto fail; 380889219d37SCatalin Marinas 380923689e91SAndrey Konovalov /* 381023689e91SAndrey Konovalov * Mark the pages as accessible, now that they are mapped. 38116c2f761dSAndrey Konovalov * The condition for setting KASAN_VMALLOC_INIT should complement the 38126c2f761dSAndrey Konovalov * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check 38136c2f761dSAndrey Konovalov * to make sure that memory is initialized under the same conditions. 3814f6e39794SAndrey Konovalov * Tag-based KASAN modes only assign tags to normal non-executable 3815f6e39794SAndrey Konovalov * allocations, see __kasan_unpoison_vmalloc(). 381623689e91SAndrey Konovalov */ 3817f6e39794SAndrey Konovalov kasan_flags |= KASAN_VMALLOC_VM_ALLOC; 38186c2f761dSAndrey Konovalov if (!want_init_on_free() && want_init_on_alloc(gfp_mask) && 38196c2f761dSAndrey Konovalov (gfp_mask & __GFP_SKIP_ZERO)) 382023689e91SAndrey Konovalov kasan_flags |= KASAN_VMALLOC_INIT; 3821f6e39794SAndrey Konovalov /* KASAN_VMALLOC_PROT_NORMAL already set if required. */ 382223689e91SAndrey Konovalov area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags); 382319f1c3acSAndrey Konovalov 382489219d37SCatalin Marinas /* 382520fc02b4SZhang Yanfei * In this function, newly allocated vm_struct has VM_UNINITIALIZED 382620fc02b4SZhang Yanfei * flag. It means that vm_struct is not fully initialized. 38274341fa45SJoonsoo Kim * Now, it is fully initialized, so remove this flag here. 3828f5252e00SMitsuo Hayasaka */ 382920fc02b4SZhang Yanfei clear_vm_uninitialized_flag(area); 3830f5252e00SMitsuo Hayasaka 38317ca3027bSDaniel Axtens size = PAGE_ALIGN(size); 383260115fa5SKefeng Wang if (!(vm_flags & VM_DEFER_KMEMLEAK)) 383394f4a161SCatalin Marinas kmemleak_vmalloc(area, size, gfp_mask); 383489219d37SCatalin Marinas 383519f1c3acSAndrey Konovalov return area->addr; 3836de7d2b56SJoe Perches 3837de7d2b56SJoe Perches fail: 3838121e6f32SNicholas Piggin if (shift > PAGE_SHIFT) { 3839121e6f32SNicholas Piggin shift = PAGE_SHIFT; 3840121e6f32SNicholas Piggin align = real_align; 3841121e6f32SNicholas Piggin size = real_size; 3842121e6f32SNicholas Piggin goto again; 3843121e6f32SNicholas Piggin } 3844121e6f32SNicholas Piggin 3845de7d2b56SJoe Perches return NULL; 3846930fc45aSChristoph Lameter } 3847930fc45aSChristoph Lameter 38481da177e4SLinus Torvalds /** 3849930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory 38501da177e4SLinus Torvalds * @size: allocation size 38512dca6999SDavid Miller * @align: desired alignment 38521da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator 385300ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 3854c85d194bSRandy Dunlap * @caller: caller's return address 38551da177e4SLinus Torvalds * 3856f38fcb9cSChristoph Hellwig * Allocate enough pages to cover @size from the page level allocator with 3857f38fcb9cSChristoph Hellwig * @gfp_mask flags. Map them into contiguous kernel virtual space. 3858a7c3e901SMichal Hocko * 3859dcda9b04SMichal Hocko * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL 3860a7c3e901SMichal Hocko * and __GFP_NOFAIL are not supported 3861a7c3e901SMichal Hocko * 3862a7c3e901SMichal Hocko * Any use of gfp flags outside of GFP_KERNEL should be consulted 3863a7c3e901SMichal Hocko * with mm people. 3864a862f68aSMike Rapoport * 3865a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 38661da177e4SLinus Torvalds */ 38672b905948SChristoph Hellwig void *__vmalloc_node(unsigned long size, unsigned long align, 3868f38fcb9cSChristoph Hellwig gfp_t gfp_mask, int node, const void *caller) 38691da177e4SLinus Torvalds { 3870d0a21265SDavid Rientjes return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 3871f38fcb9cSChristoph Hellwig gfp_mask, PAGE_KERNEL, 0, node, caller); 38721da177e4SLinus Torvalds } 3873c3f896dcSChristoph Hellwig /* 3874c3f896dcSChristoph Hellwig * This is only for performance analysis of vmalloc and stress purpose. 3875c3f896dcSChristoph Hellwig * It is required by vmalloc test module, therefore do not use it other 3876c3f896dcSChristoph Hellwig * than that. 3877c3f896dcSChristoph Hellwig */ 3878c3f896dcSChristoph Hellwig #ifdef CONFIG_TEST_VMALLOC_MODULE 3879c3f896dcSChristoph Hellwig EXPORT_SYMBOL_GPL(__vmalloc_node); 3880c3f896dcSChristoph Hellwig #endif 38811da177e4SLinus Torvalds 388288dca4caSChristoph Hellwig void *__vmalloc(unsigned long size, gfp_t gfp_mask) 3883930fc45aSChristoph Lameter { 3884f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE, 388523016969SChristoph Lameter __builtin_return_address(0)); 3886930fc45aSChristoph Lameter } 38871da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc); 38881da177e4SLinus Torvalds 38891da177e4SLinus Torvalds /** 38901da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory 38911da177e4SLinus Torvalds * @size: allocation size 389292eac168SMike Rapoport * 38931da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 38941da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space. 38951da177e4SLinus Torvalds * 3896c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 38971da177e4SLinus Torvalds * use __vmalloc() instead. 3898a862f68aSMike Rapoport * 3899a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 39001da177e4SLinus Torvalds */ 39011da177e4SLinus Torvalds void *vmalloc(unsigned long size) 39021da177e4SLinus Torvalds { 39034d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE, 39044d39d728SChristoph Hellwig __builtin_return_address(0)); 39051da177e4SLinus Torvalds } 39061da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc); 39071da177e4SLinus Torvalds 3908930fc45aSChristoph Lameter /** 3909559089e0SSong Liu * vmalloc_huge - allocate virtually contiguous memory, allow huge pages 391015a64f5aSClaudio Imbrenda * @size: allocation size 3911559089e0SSong Liu * @gfp_mask: flags for the page level allocator 391215a64f5aSClaudio Imbrenda * 3913559089e0SSong Liu * Allocate enough pages to cover @size from the page level 391415a64f5aSClaudio Imbrenda * allocator and map them into contiguous kernel virtual space. 3915559089e0SSong Liu * If @size is greater than or equal to PMD_SIZE, allow using 3916559089e0SSong Liu * huge pages for the memory 391715a64f5aSClaudio Imbrenda * 391815a64f5aSClaudio Imbrenda * Return: pointer to the allocated memory or %NULL on error 391915a64f5aSClaudio Imbrenda */ 3920559089e0SSong Liu void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) 392115a64f5aSClaudio Imbrenda { 392215a64f5aSClaudio Imbrenda return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, 3923559089e0SSong Liu gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, 392415a64f5aSClaudio Imbrenda NUMA_NO_NODE, __builtin_return_address(0)); 392515a64f5aSClaudio Imbrenda } 3926559089e0SSong Liu EXPORT_SYMBOL_GPL(vmalloc_huge); 392715a64f5aSClaudio Imbrenda 392815a64f5aSClaudio Imbrenda /** 3929e1ca7788SDave Young * vzalloc - allocate virtually contiguous memory with zero fill 3930e1ca7788SDave Young * @size: allocation size 393192eac168SMike Rapoport * 3932e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 3933e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 3934e1ca7788SDave Young * The memory allocated is set to zero. 3935e1ca7788SDave Young * 3936e1ca7788SDave Young * For tight control over page level allocator and protection flags 3937e1ca7788SDave Young * use __vmalloc() instead. 3938a862f68aSMike Rapoport * 3939a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 3940e1ca7788SDave Young */ 3941e1ca7788SDave Young void *vzalloc(unsigned long size) 3942e1ca7788SDave Young { 39434d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 39444d39d728SChristoph Hellwig __builtin_return_address(0)); 3945e1ca7788SDave Young } 3946e1ca7788SDave Young EXPORT_SYMBOL(vzalloc); 3947e1ca7788SDave Young 3948e1ca7788SDave Young /** 3949ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 395083342314SNick Piggin * @size: allocation size 3951ead04089SRolf Eike Beer * 3952ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace 3953ead04089SRolf Eike Beer * without leaking data. 3954a862f68aSMike Rapoport * 3955a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 395683342314SNick Piggin */ 395783342314SNick Piggin void *vmalloc_user(unsigned long size) 395883342314SNick Piggin { 3959bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 3960bc84c535SRoman Penyaev GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, 3961bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 396200ef2d2fSDavid Rientjes __builtin_return_address(0)); 396383342314SNick Piggin } 396483342314SNick Piggin EXPORT_SYMBOL(vmalloc_user); 396583342314SNick Piggin 396683342314SNick Piggin /** 3967930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node 3968930fc45aSChristoph Lameter * @size: allocation size 3969d44e0780SRandy Dunlap * @node: numa node 3970930fc45aSChristoph Lameter * 3971930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level 3972930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space. 3973930fc45aSChristoph Lameter * 3974c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 3975930fc45aSChristoph Lameter * use __vmalloc() instead. 3976a862f68aSMike Rapoport * 3977a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 3978930fc45aSChristoph Lameter */ 3979930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node) 3980930fc45aSChristoph Lameter { 3981f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL, node, 3982f38fcb9cSChristoph Hellwig __builtin_return_address(0)); 3983930fc45aSChristoph Lameter } 3984930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node); 3985930fc45aSChristoph Lameter 3986e1ca7788SDave Young /** 3987e1ca7788SDave Young * vzalloc_node - allocate memory on a specific node with zero fill 3988e1ca7788SDave Young * @size: allocation size 3989e1ca7788SDave Young * @node: numa node 3990e1ca7788SDave Young * 3991e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 3992e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 3993e1ca7788SDave Young * The memory allocated is set to zero. 3994e1ca7788SDave Young * 3995a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 3996e1ca7788SDave Young */ 3997e1ca7788SDave Young void *vzalloc_node(unsigned long size, int node) 3998e1ca7788SDave Young { 39994d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node, 40004d39d728SChristoph Hellwig __builtin_return_address(0)); 4001e1ca7788SDave Young } 4002e1ca7788SDave Young EXPORT_SYMBOL(vzalloc_node); 4003e1ca7788SDave Young 40040d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 4005698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 40060d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 4007698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) 40080d08e0d3SAndi Kleen #else 4009698d0831SMichal Hocko /* 4010698d0831SMichal Hocko * 64b systems should always have either DMA or DMA32 zones. For others 4011698d0831SMichal Hocko * GFP_DMA32 should do the right thing and use the normal zone. 4012698d0831SMichal Hocko */ 401368d68ff6SZhiyuan Dai #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 40140d08e0d3SAndi Kleen #endif 40150d08e0d3SAndi Kleen 40161da177e4SLinus Torvalds /** 40171da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 40181da177e4SLinus Torvalds * @size: allocation size 40191da177e4SLinus Torvalds * 40201da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the 40211da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space. 4022a862f68aSMike Rapoport * 4023a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 40241da177e4SLinus Torvalds */ 40251da177e4SLinus Torvalds void *vmalloc_32(unsigned long size) 40261da177e4SLinus Torvalds { 4027f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE, 4028f38fcb9cSChristoph Hellwig __builtin_return_address(0)); 40291da177e4SLinus Torvalds } 40301da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32); 40311da177e4SLinus Torvalds 403283342314SNick Piggin /** 4033ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 403483342314SNick Piggin * @size: allocation size 4035ead04089SRolf Eike Beer * 4036ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be 4037ead04089SRolf Eike Beer * mapped to userspace without leaking data. 4038a862f68aSMike Rapoport * 4039a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 404083342314SNick Piggin */ 404183342314SNick Piggin void *vmalloc_32_user(unsigned long size) 404283342314SNick Piggin { 4043bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 4044bc84c535SRoman Penyaev GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 4045bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 40465a82ac71SRoman Penyaev __builtin_return_address(0)); 404783342314SNick Piggin } 404883342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user); 404983342314SNick Piggin 4050d0107eb0SKAMEZAWA Hiroyuki /* 40514c91c07cSLorenzo Stoakes * Atomically zero bytes in the iterator. 40524c91c07cSLorenzo Stoakes * 40534c91c07cSLorenzo Stoakes * Returns the number of zeroed bytes. 4054d0107eb0SKAMEZAWA Hiroyuki */ 40554c91c07cSLorenzo Stoakes static size_t zero_iter(struct iov_iter *iter, size_t count) 4056d0107eb0SKAMEZAWA Hiroyuki { 40574c91c07cSLorenzo Stoakes size_t remains = count; 4058d0107eb0SKAMEZAWA Hiroyuki 40594c91c07cSLorenzo Stoakes while (remains > 0) { 40604c91c07cSLorenzo Stoakes size_t num, copied; 40614c91c07cSLorenzo Stoakes 40620e4bc271SLu Hongfei num = min_t(size_t, remains, PAGE_SIZE); 40634c91c07cSLorenzo Stoakes copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter); 40644c91c07cSLorenzo Stoakes remains -= copied; 40654c91c07cSLorenzo Stoakes 40664c91c07cSLorenzo Stoakes if (copied < num) 40674c91c07cSLorenzo Stoakes break; 40684c91c07cSLorenzo Stoakes } 40694c91c07cSLorenzo Stoakes 40704c91c07cSLorenzo Stoakes return count - remains; 40714c91c07cSLorenzo Stoakes } 40724c91c07cSLorenzo Stoakes 40734c91c07cSLorenzo Stoakes /* 40744c91c07cSLorenzo Stoakes * small helper routine, copy contents to iter from addr. 40754c91c07cSLorenzo Stoakes * If the page is not present, fill zero. 40764c91c07cSLorenzo Stoakes * 40774c91c07cSLorenzo Stoakes * Returns the number of copied bytes. 40784c91c07cSLorenzo Stoakes */ 40794c91c07cSLorenzo Stoakes static size_t aligned_vread_iter(struct iov_iter *iter, 40804c91c07cSLorenzo Stoakes const char *addr, size_t count) 40814c91c07cSLorenzo Stoakes { 40824c91c07cSLorenzo Stoakes size_t remains = count; 40834c91c07cSLorenzo Stoakes struct page *page; 40844c91c07cSLorenzo Stoakes 40854c91c07cSLorenzo Stoakes while (remains > 0) { 4086d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 40874c91c07cSLorenzo Stoakes size_t copied = 0; 4088d0107eb0SKAMEZAWA Hiroyuki 4089891c49abSAlexander Kuleshov offset = offset_in_page(addr); 4090d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 40914c91c07cSLorenzo Stoakes if (length > remains) 40924c91c07cSLorenzo Stoakes length = remains; 40934c91c07cSLorenzo Stoakes page = vmalloc_to_page(addr); 4094d0107eb0SKAMEZAWA Hiroyuki /* 40954c91c07cSLorenzo Stoakes * To do safe access to this _mapped_ area, we need lock. But 40964c91c07cSLorenzo Stoakes * adding lock here means that we need to add overhead of 40974c91c07cSLorenzo Stoakes * vmalloc()/vfree() calls for this _debug_ interface, rarely 40984c91c07cSLorenzo Stoakes * used. Instead of that, we'll use an local mapping via 40994c91c07cSLorenzo Stoakes * copy_page_to_iter_nofault() and accept a small overhead in 41004c91c07cSLorenzo Stoakes * this access function. 4101d0107eb0SKAMEZAWA Hiroyuki */ 41024c91c07cSLorenzo Stoakes if (page) 41034c91c07cSLorenzo Stoakes copied = copy_page_to_iter_nofault(page, offset, 41044c91c07cSLorenzo Stoakes length, iter); 41054c91c07cSLorenzo Stoakes else 41064c91c07cSLorenzo Stoakes copied = zero_iter(iter, length); 4107d0107eb0SKAMEZAWA Hiroyuki 41084c91c07cSLorenzo Stoakes addr += copied; 41094c91c07cSLorenzo Stoakes remains -= copied; 41104c91c07cSLorenzo Stoakes 41114c91c07cSLorenzo Stoakes if (copied != length) 41124c91c07cSLorenzo Stoakes break; 4113d0107eb0SKAMEZAWA Hiroyuki } 4114d0107eb0SKAMEZAWA Hiroyuki 41154c91c07cSLorenzo Stoakes return count - remains; 41164c91c07cSLorenzo Stoakes } 41174c91c07cSLorenzo Stoakes 41184c91c07cSLorenzo Stoakes /* 41194c91c07cSLorenzo Stoakes * Read from a vm_map_ram region of memory. 41204c91c07cSLorenzo Stoakes * 41214c91c07cSLorenzo Stoakes * Returns the number of copied bytes. 41224c91c07cSLorenzo Stoakes */ 41234c91c07cSLorenzo Stoakes static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr, 41244c91c07cSLorenzo Stoakes size_t count, unsigned long flags) 412506c89946SBaoquan He { 412606c89946SBaoquan He char *start; 412706c89946SBaoquan He struct vmap_block *vb; 4128062eacf5SUladzislau Rezki (Sony) struct xarray *xa; 412906c89946SBaoquan He unsigned long offset; 41304c91c07cSLorenzo Stoakes unsigned int rs, re; 41314c91c07cSLorenzo Stoakes size_t remains, n; 413206c89946SBaoquan He 413306c89946SBaoquan He /* 413406c89946SBaoquan He * If it's area created by vm_map_ram() interface directly, but 413506c89946SBaoquan He * not further subdividing and delegating management to vmap_block, 413606c89946SBaoquan He * handle it here. 413706c89946SBaoquan He */ 41384c91c07cSLorenzo Stoakes if (!(flags & VMAP_BLOCK)) 41394c91c07cSLorenzo Stoakes return aligned_vread_iter(iter, addr, count); 41404c91c07cSLorenzo Stoakes 41414c91c07cSLorenzo Stoakes remains = count; 414206c89946SBaoquan He 414306c89946SBaoquan He /* 414406c89946SBaoquan He * Area is split into regions and tracked with vmap_block, read out 414506c89946SBaoquan He * each region and zero fill the hole between regions. 414606c89946SBaoquan He */ 4147fa1c77c1SUladzislau Rezki (Sony) xa = addr_to_vb_xa((unsigned long) addr); 4148062eacf5SUladzislau Rezki (Sony) vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr)); 414906c89946SBaoquan He if (!vb) 41504c91c07cSLorenzo Stoakes goto finished_zero; 415106c89946SBaoquan He 415206c89946SBaoquan He spin_lock(&vb->lock); 415306c89946SBaoquan He if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) { 415406c89946SBaoquan He spin_unlock(&vb->lock); 41554c91c07cSLorenzo Stoakes goto finished_zero; 41564c91c07cSLorenzo Stoakes } 41574c91c07cSLorenzo Stoakes 41584c91c07cSLorenzo Stoakes for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) { 41594c91c07cSLorenzo Stoakes size_t copied; 41604c91c07cSLorenzo Stoakes 41614c91c07cSLorenzo Stoakes if (remains == 0) 41624c91c07cSLorenzo Stoakes goto finished; 41634c91c07cSLorenzo Stoakes 41644c91c07cSLorenzo Stoakes start = vmap_block_vaddr(vb->va->va_start, rs); 41654c91c07cSLorenzo Stoakes 41664c91c07cSLorenzo Stoakes if (addr < start) { 41674c91c07cSLorenzo Stoakes size_t to_zero = min_t(size_t, start - addr, remains); 41684c91c07cSLorenzo Stoakes size_t zeroed = zero_iter(iter, to_zero); 41694c91c07cSLorenzo Stoakes 41704c91c07cSLorenzo Stoakes addr += zeroed; 41714c91c07cSLorenzo Stoakes remains -= zeroed; 41724c91c07cSLorenzo Stoakes 41734c91c07cSLorenzo Stoakes if (remains == 0 || zeroed != to_zero) 417406c89946SBaoquan He goto finished; 417506c89946SBaoquan He } 41764c91c07cSLorenzo Stoakes 417706c89946SBaoquan He /*it could start reading from the middle of used region*/ 417806c89946SBaoquan He offset = offset_in_page(addr); 417906c89946SBaoquan He n = ((re - rs + 1) << PAGE_SHIFT) - offset; 41804c91c07cSLorenzo Stoakes if (n > remains) 41814c91c07cSLorenzo Stoakes n = remains; 418206c89946SBaoquan He 41834c91c07cSLorenzo Stoakes copied = aligned_vread_iter(iter, start + offset, n); 41844c91c07cSLorenzo Stoakes 41854c91c07cSLorenzo Stoakes addr += copied; 41864c91c07cSLorenzo Stoakes remains -= copied; 41874c91c07cSLorenzo Stoakes 41884c91c07cSLorenzo Stoakes if (copied != n) 41894c91c07cSLorenzo Stoakes goto finished; 419006c89946SBaoquan He } 41914c91c07cSLorenzo Stoakes 419206c89946SBaoquan He spin_unlock(&vb->lock); 419306c89946SBaoquan He 41944c91c07cSLorenzo Stoakes finished_zero: 419506c89946SBaoquan He /* zero-fill the left dirty or free regions */ 41964c91c07cSLorenzo Stoakes return count - remains + zero_iter(iter, remains); 41974c91c07cSLorenzo Stoakes finished: 41984c91c07cSLorenzo Stoakes /* We couldn't copy/zero everything */ 41994c91c07cSLorenzo Stoakes spin_unlock(&vb->lock); 42004c91c07cSLorenzo Stoakes return count - remains; 420106c89946SBaoquan He } 420206c89946SBaoquan He 4203d0107eb0SKAMEZAWA Hiroyuki /** 42044c91c07cSLorenzo Stoakes * vread_iter() - read vmalloc area in a safe way to an iterator. 42054c91c07cSLorenzo Stoakes * @iter: the iterator to which data should be written. 4206d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 4207d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 4208d0107eb0SKAMEZAWA Hiroyuki * 4209d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 4210d0107eb0SKAMEZAWA Hiroyuki * copy data from that area to a given buffer. If the given memory range 4211d0107eb0SKAMEZAWA Hiroyuki * of [addr...addr+count) includes some valid address, data is copied to 4212d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, they'll be zero-filled. 4213d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 4214d0107eb0SKAMEZAWA Hiroyuki * 4215d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 4216a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 4217d0107eb0SKAMEZAWA Hiroyuki * 4218d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vread() is never necessary because the caller 4219d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 4220d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 4221bbcd53c9SDavid Hildenbrand * any information, as /proc/kcore. 4222a862f68aSMike Rapoport * 4223a862f68aSMike Rapoport * Return: number of bytes for which addr and buf should be increased 4224a862f68aSMike Rapoport * (same number as @count) or %0 if [addr...addr+count) doesn't 4225a862f68aSMike Rapoport * include any intersection with valid vmalloc area 4226d0107eb0SKAMEZAWA Hiroyuki */ 42274c91c07cSLorenzo Stoakes long vread_iter(struct iov_iter *iter, const char *addr, size_t count) 42281da177e4SLinus Torvalds { 4229d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 4230e81ce85fSJoonsoo Kim struct vmap_area *va; 4231e81ce85fSJoonsoo Kim struct vm_struct *vm; 42324c91c07cSLorenzo Stoakes char *vaddr; 42334c91c07cSLorenzo Stoakes size_t n, size, flags, remains; 423453becf32SUladzislau Rezki (Sony) unsigned long next; 42351da177e4SLinus Torvalds 42364aff1dc4SAndrey Konovalov addr = kasan_reset_tag(addr); 42374aff1dc4SAndrey Konovalov 42381da177e4SLinus Torvalds /* Don't allow overflow */ 42391da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 42401da177e4SLinus Torvalds count = -(unsigned long) addr; 42411da177e4SLinus Torvalds 42424c91c07cSLorenzo Stoakes remains = count; 42434c91c07cSLorenzo Stoakes 424453becf32SUladzislau Rezki (Sony) vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va); 424553becf32SUladzislau Rezki (Sony) if (!vn) 42464c91c07cSLorenzo Stoakes goto finished_zero; 4247f181234aSChen Wandun 4248f181234aSChen Wandun /* no intersects with alive vmap_area */ 42494c91c07cSLorenzo Stoakes if ((unsigned long)addr + remains <= va->va_start) 42504c91c07cSLorenzo Stoakes goto finished_zero; 4251f181234aSChen Wandun 425253becf32SUladzislau Rezki (Sony) do { 42534c91c07cSLorenzo Stoakes size_t copied; 42544c91c07cSLorenzo Stoakes 42554c91c07cSLorenzo Stoakes if (remains == 0) 42564c91c07cSLorenzo Stoakes goto finished; 4257e81ce85fSJoonsoo Kim 425806c89946SBaoquan He vm = va->vm; 425906c89946SBaoquan He flags = va->flags & VMAP_FLAGS_MASK; 426006c89946SBaoquan He /* 426106c89946SBaoquan He * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need 426206c89946SBaoquan He * be set together with VMAP_RAM. 426306c89946SBaoquan He */ 426406c89946SBaoquan He WARN_ON(flags == VMAP_BLOCK); 426506c89946SBaoquan He 426606c89946SBaoquan He if (!vm && !flags) 426753becf32SUladzislau Rezki (Sony) goto next_va; 4268e81ce85fSJoonsoo Kim 426930a7a9b1SBaoquan He if (vm && (vm->flags & VM_UNINITIALIZED)) 427053becf32SUladzislau Rezki (Sony) goto next_va; 42714c91c07cSLorenzo Stoakes 427230a7a9b1SBaoquan He /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 427330a7a9b1SBaoquan He smp_rmb(); 427430a7a9b1SBaoquan He 427506c89946SBaoquan He vaddr = (char *) va->va_start; 427606c89946SBaoquan He size = vm ? get_vm_area_size(vm) : va_size(va); 427706c89946SBaoquan He 427806c89946SBaoquan He if (addr >= vaddr + size) 427953becf32SUladzislau Rezki (Sony) goto next_va; 42804c91c07cSLorenzo Stoakes 42814c91c07cSLorenzo Stoakes if (addr < vaddr) { 42824c91c07cSLorenzo Stoakes size_t to_zero = min_t(size_t, vaddr - addr, remains); 42834c91c07cSLorenzo Stoakes size_t zeroed = zero_iter(iter, to_zero); 42844c91c07cSLorenzo Stoakes 42854c91c07cSLorenzo Stoakes addr += zeroed; 42864c91c07cSLorenzo Stoakes remains -= zeroed; 42874c91c07cSLorenzo Stoakes 42884c91c07cSLorenzo Stoakes if (remains == 0 || zeroed != to_zero) 42891da177e4SLinus Torvalds goto finished; 42901da177e4SLinus Torvalds } 42914c91c07cSLorenzo Stoakes 429206c89946SBaoquan He n = vaddr + size - addr; 42934c91c07cSLorenzo Stoakes if (n > remains) 42944c91c07cSLorenzo Stoakes n = remains; 429506c89946SBaoquan He 429606c89946SBaoquan He if (flags & VMAP_RAM) 42974c91c07cSLorenzo Stoakes copied = vmap_ram_vread_iter(iter, addr, n, flags); 4298e6f79822SAlexei Starovoitov else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE)))) 42994c91c07cSLorenzo Stoakes copied = aligned_vread_iter(iter, addr, n); 4300e6f79822SAlexei Starovoitov else /* IOREMAP | SPARSE area is treated as memory hole */ 43014c91c07cSLorenzo Stoakes copied = zero_iter(iter, n); 43024c91c07cSLorenzo Stoakes 43034c91c07cSLorenzo Stoakes addr += copied; 43044c91c07cSLorenzo Stoakes remains -= copied; 43054c91c07cSLorenzo Stoakes 43064c91c07cSLorenzo Stoakes if (copied != n) 43074c91c07cSLorenzo Stoakes goto finished; 430853becf32SUladzislau Rezki (Sony) 430953becf32SUladzislau Rezki (Sony) next_va: 431053becf32SUladzislau Rezki (Sony) next = va->va_end; 431153becf32SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 431253becf32SUladzislau Rezki (Sony) } while ((vn = find_vmap_area_exceed_addr_lock(next, &va))); 43134c91c07cSLorenzo Stoakes 43144c91c07cSLorenzo Stoakes finished_zero: 431553becf32SUladzislau Rezki (Sony) if (vn) 4316d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 431753becf32SUladzislau Rezki (Sony) 43184c91c07cSLorenzo Stoakes /* zero-fill memory holes */ 43194c91c07cSLorenzo Stoakes return count - remains + zero_iter(iter, remains); 43201da177e4SLinus Torvalds finished: 43214c91c07cSLorenzo Stoakes /* Nothing remains, or We couldn't copy/zero everything. */ 432253becf32SUladzislau Rezki (Sony) if (vn) 4323d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 4324d0107eb0SKAMEZAWA Hiroyuki 43254c91c07cSLorenzo Stoakes return count - remains; 43261da177e4SLinus Torvalds } 43271da177e4SLinus Torvalds 4328d0107eb0SKAMEZAWA Hiroyuki /** 4329e69e9d4aSHATAYAMA Daisuke * remap_vmalloc_range_partial - map vmalloc pages to userspace 4330e69e9d4aSHATAYAMA Daisuke * @vma: vma to cover 4331e69e9d4aSHATAYAMA Daisuke * @uaddr: target user address to start at 4332e69e9d4aSHATAYAMA Daisuke * @kaddr: virtual address of vmalloc kernel memory 4333bdebd6a2SJann Horn * @pgoff: offset from @kaddr to start at 4334e69e9d4aSHATAYAMA Daisuke * @size: size of map area 4335e69e9d4aSHATAYAMA Daisuke * 4336e69e9d4aSHATAYAMA Daisuke * Returns: 0 for success, -Exxx on failure 4337e69e9d4aSHATAYAMA Daisuke * 4338e69e9d4aSHATAYAMA Daisuke * This function checks that @kaddr is a valid vmalloc'ed area, 4339e69e9d4aSHATAYAMA Daisuke * and that it is big enough to cover the range starting at 4340e69e9d4aSHATAYAMA Daisuke * @uaddr in @vma. Will return failure if that criteria isn't 4341e69e9d4aSHATAYAMA Daisuke * met. 4342e69e9d4aSHATAYAMA Daisuke * 4343e69e9d4aSHATAYAMA Daisuke * Similar to remap_pfn_range() (see mm/memory.c) 4344e69e9d4aSHATAYAMA Daisuke */ 4345e69e9d4aSHATAYAMA Daisuke int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 4346bdebd6a2SJann Horn void *kaddr, unsigned long pgoff, 4347bdebd6a2SJann Horn unsigned long size) 4348e69e9d4aSHATAYAMA Daisuke { 4349e69e9d4aSHATAYAMA Daisuke struct vm_struct *area; 4350bdebd6a2SJann Horn unsigned long off; 4351bdebd6a2SJann Horn unsigned long end_index; 4352bdebd6a2SJann Horn 4353bdebd6a2SJann Horn if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) 4354bdebd6a2SJann Horn return -EINVAL; 4355e69e9d4aSHATAYAMA Daisuke 4356e69e9d4aSHATAYAMA Daisuke size = PAGE_ALIGN(size); 4357e69e9d4aSHATAYAMA Daisuke 4358e69e9d4aSHATAYAMA Daisuke if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 4359e69e9d4aSHATAYAMA Daisuke return -EINVAL; 4360e69e9d4aSHATAYAMA Daisuke 4361e69e9d4aSHATAYAMA Daisuke area = find_vm_area(kaddr); 4362e69e9d4aSHATAYAMA Daisuke if (!area) 4363e69e9d4aSHATAYAMA Daisuke return -EINVAL; 4364e69e9d4aSHATAYAMA Daisuke 4365fe9041c2SChristoph Hellwig if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) 4366e69e9d4aSHATAYAMA Daisuke return -EINVAL; 4367e69e9d4aSHATAYAMA Daisuke 4368bdebd6a2SJann Horn if (check_add_overflow(size, off, &end_index) || 4369bdebd6a2SJann Horn end_index > get_vm_area_size(area)) 4370e69e9d4aSHATAYAMA Daisuke return -EINVAL; 4371bdebd6a2SJann Horn kaddr += off; 4372e69e9d4aSHATAYAMA Daisuke 4373e69e9d4aSHATAYAMA Daisuke do { 4374e69e9d4aSHATAYAMA Daisuke struct page *page = vmalloc_to_page(kaddr); 4375e69e9d4aSHATAYAMA Daisuke int ret; 4376e69e9d4aSHATAYAMA Daisuke 4377e69e9d4aSHATAYAMA Daisuke ret = vm_insert_page(vma, uaddr, page); 4378e69e9d4aSHATAYAMA Daisuke if (ret) 4379e69e9d4aSHATAYAMA Daisuke return ret; 4380e69e9d4aSHATAYAMA Daisuke 4381e69e9d4aSHATAYAMA Daisuke uaddr += PAGE_SIZE; 4382e69e9d4aSHATAYAMA Daisuke kaddr += PAGE_SIZE; 4383e69e9d4aSHATAYAMA Daisuke size -= PAGE_SIZE; 4384e69e9d4aSHATAYAMA Daisuke } while (size > 0); 4385e69e9d4aSHATAYAMA Daisuke 43861c71222eSSuren Baghdasaryan vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); 4387e69e9d4aSHATAYAMA Daisuke 4388e69e9d4aSHATAYAMA Daisuke return 0; 4389e69e9d4aSHATAYAMA Daisuke } 4390e69e9d4aSHATAYAMA Daisuke 4391e69e9d4aSHATAYAMA Daisuke /** 439283342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace 439383342314SNick Piggin * @vma: vma to cover (map full range of vma) 439483342314SNick Piggin * @addr: vmalloc memory 439583342314SNick Piggin * @pgoff: number of pages into addr before first page to map 43967682486bSRandy Dunlap * 43977682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure 439883342314SNick Piggin * 439983342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and 440083342314SNick Piggin * that it is big enough to cover the vma. Will return failure if 440183342314SNick Piggin * that criteria isn't met. 440283342314SNick Piggin * 440372fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c) 440483342314SNick Piggin */ 440583342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 440683342314SNick Piggin unsigned long pgoff) 440783342314SNick Piggin { 4408e69e9d4aSHATAYAMA Daisuke return remap_vmalloc_range_partial(vma, vma->vm_start, 4409bdebd6a2SJann Horn addr, pgoff, 4410e69e9d4aSHATAYAMA Daisuke vma->vm_end - vma->vm_start); 441183342314SNick Piggin } 441283342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range); 441383342314SNick Piggin 44145f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area) 44155f4352fbSJeremy Fitzhardinge { 44165f4352fbSJeremy Fitzhardinge struct vm_struct *ret; 44175f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr); 44185f4352fbSJeremy Fitzhardinge BUG_ON(ret != area); 44195f4352fbSJeremy Fitzhardinge kfree(area); 44205f4352fbSJeremy Fitzhardinge } 44215f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area); 4422a10aa579SChristoph Lameter 44234f8b02b4STejun Heo #ifdef CONFIG_SMP 4424ca23e405STejun Heo static struct vmap_area *node_to_va(struct rb_node *n) 4425ca23e405STejun Heo { 44264583e773SGeliang Tang return rb_entry_safe(n, struct vmap_area, rb_node); 4427ca23e405STejun Heo } 4428ca23e405STejun Heo 4429ca23e405STejun Heo /** 443068ad4a33SUladzislau Rezki (Sony) * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to 443168ad4a33SUladzislau Rezki (Sony) * @addr: target address 4432ca23e405STejun Heo * 443368ad4a33SUladzislau Rezki (Sony) * Returns: vmap_area if it is found. If there is no such area 443468ad4a33SUladzislau Rezki (Sony) * the first highest(reverse order) vmap_area is returned 443568ad4a33SUladzislau Rezki (Sony) * i.e. va->va_start < addr && va->va_end < addr or NULL 443668ad4a33SUladzislau Rezki (Sony) * if there are no any areas before @addr. 4437ca23e405STejun Heo */ 443868ad4a33SUladzislau Rezki (Sony) static struct vmap_area * 443968ad4a33SUladzislau Rezki (Sony) pvm_find_va_enclose_addr(unsigned long addr) 4440ca23e405STejun Heo { 444168ad4a33SUladzislau Rezki (Sony) struct vmap_area *va, *tmp; 444268ad4a33SUladzislau Rezki (Sony) struct rb_node *n; 444368ad4a33SUladzislau Rezki (Sony) 444468ad4a33SUladzislau Rezki (Sony) n = free_vmap_area_root.rb_node; 444568ad4a33SUladzislau Rezki (Sony) va = NULL; 4446ca23e405STejun Heo 4447ca23e405STejun Heo while (n) { 444868ad4a33SUladzislau Rezki (Sony) tmp = rb_entry(n, struct vmap_area, rb_node); 444968ad4a33SUladzislau Rezki (Sony) if (tmp->va_start <= addr) { 445068ad4a33SUladzislau Rezki (Sony) va = tmp; 445168ad4a33SUladzislau Rezki (Sony) if (tmp->va_end >= addr) 4452ca23e405STejun Heo break; 4453ca23e405STejun Heo 445468ad4a33SUladzislau Rezki (Sony) n = n->rb_right; 4455ca23e405STejun Heo } else { 445668ad4a33SUladzislau Rezki (Sony) n = n->rb_left; 4457ca23e405STejun Heo } 445868ad4a33SUladzislau Rezki (Sony) } 445968ad4a33SUladzislau Rezki (Sony) 446068ad4a33SUladzislau Rezki (Sony) return va; 4461ca23e405STejun Heo } 4462ca23e405STejun Heo 4463ca23e405STejun Heo /** 446468ad4a33SUladzislau Rezki (Sony) * pvm_determine_end_from_reverse - find the highest aligned address 446568ad4a33SUladzislau Rezki (Sony) * of free block below VMALLOC_END 446668ad4a33SUladzislau Rezki (Sony) * @va: 446768ad4a33SUladzislau Rezki (Sony) * in - the VA we start the search(reverse order); 446868ad4a33SUladzislau Rezki (Sony) * out - the VA with the highest aligned end address. 4469799fa85dSAlex Shi * @align: alignment for required highest address 4470ca23e405STejun Heo * 447168ad4a33SUladzislau Rezki (Sony) * Returns: determined end address within vmap_area 4472ca23e405STejun Heo */ 447368ad4a33SUladzislau Rezki (Sony) static unsigned long 447468ad4a33SUladzislau Rezki (Sony) pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) 4475ca23e405STejun Heo { 447668ad4a33SUladzislau Rezki (Sony) unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 4477ca23e405STejun Heo unsigned long addr; 4478ca23e405STejun Heo 447968ad4a33SUladzislau Rezki (Sony) if (likely(*va)) { 448068ad4a33SUladzislau Rezki (Sony) list_for_each_entry_from_reverse((*va), 448168ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list, list) { 448268ad4a33SUladzislau Rezki (Sony) addr = min((*va)->va_end & ~(align - 1), vmalloc_end); 448368ad4a33SUladzislau Rezki (Sony) if ((*va)->va_start < addr) 448468ad4a33SUladzislau Rezki (Sony) return addr; 448568ad4a33SUladzislau Rezki (Sony) } 4486ca23e405STejun Heo } 4487ca23e405STejun Heo 448868ad4a33SUladzislau Rezki (Sony) return 0; 4489ca23e405STejun Heo } 4490ca23e405STejun Heo 4491ca23e405STejun Heo /** 4492ca23e405STejun Heo * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 4493ca23e405STejun Heo * @offsets: array containing offset of each area 4494ca23e405STejun Heo * @sizes: array containing size of each area 4495ca23e405STejun Heo * @nr_vms: the number of areas to allocate 4496ca23e405STejun Heo * @align: alignment, all entries in @offsets and @sizes must be aligned to this 4497ca23e405STejun Heo * 4498ca23e405STejun Heo * Returns: kmalloc'd vm_struct pointer array pointing to allocated 4499ca23e405STejun Heo * vm_structs on success, %NULL on failure 4500ca23e405STejun Heo * 4501ca23e405STejun Heo * Percpu allocator wants to use congruent vm areas so that it can 4502ca23e405STejun Heo * maintain the offsets among percpu areas. This function allocates 4503ec3f64fcSDavid Rientjes * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 4504ec3f64fcSDavid Rientjes * be scattered pretty far, distance between two areas easily going up 4505ec3f64fcSDavid Rientjes * to gigabytes. To avoid interacting with regular vmallocs, these 4506ec3f64fcSDavid Rientjes * areas are allocated from top. 4507ca23e405STejun Heo * 4508ca23e405STejun Heo * Despite its complicated look, this allocator is rather simple. It 450968ad4a33SUladzislau Rezki (Sony) * does everything top-down and scans free blocks from the end looking 451068ad4a33SUladzislau Rezki (Sony) * for matching base. While scanning, if any of the areas do not fit the 451168ad4a33SUladzislau Rezki (Sony) * base address is pulled down to fit the area. Scanning is repeated till 451268ad4a33SUladzislau Rezki (Sony) * all the areas fit and then all necessary data structures are inserted 451368ad4a33SUladzislau Rezki (Sony) * and the result is returned. 4514ca23e405STejun Heo */ 4515ca23e405STejun Heo struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 4516ca23e405STejun Heo const size_t *sizes, int nr_vms, 4517ec3f64fcSDavid Rientjes size_t align) 4518ca23e405STejun Heo { 4519ca23e405STejun Heo const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 4520ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 452168ad4a33SUladzislau Rezki (Sony) struct vmap_area **vas, *va; 4522ca23e405STejun Heo struct vm_struct **vms; 4523ca23e405STejun Heo int area, area2, last_area, term_area; 4524253a496dSDaniel Axtens unsigned long base, start, size, end, last_end, orig_start, orig_end; 4525ca23e405STejun Heo bool purged = false; 4526ca23e405STejun Heo 4527ca23e405STejun Heo /* verify parameters and allocate data structures */ 4528891c49abSAlexander Kuleshov BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 4529ca23e405STejun Heo for (last_area = 0, area = 0; area < nr_vms; area++) { 4530ca23e405STejun Heo start = offsets[area]; 4531ca23e405STejun Heo end = start + sizes[area]; 4532ca23e405STejun Heo 4533ca23e405STejun Heo /* is everything aligned properly? */ 4534ca23e405STejun Heo BUG_ON(!IS_ALIGNED(offsets[area], align)); 4535ca23e405STejun Heo BUG_ON(!IS_ALIGNED(sizes[area], align)); 4536ca23e405STejun Heo 4537ca23e405STejun Heo /* detect the area with the highest address */ 4538ca23e405STejun Heo if (start > offsets[last_area]) 4539ca23e405STejun Heo last_area = area; 4540ca23e405STejun Heo 4541c568da28SWei Yang for (area2 = area + 1; area2 < nr_vms; area2++) { 4542ca23e405STejun Heo unsigned long start2 = offsets[area2]; 4543ca23e405STejun Heo unsigned long end2 = start2 + sizes[area2]; 4544ca23e405STejun Heo 4545c568da28SWei Yang BUG_ON(start2 < end && start < end2); 4546ca23e405STejun Heo } 4547ca23e405STejun Heo } 4548ca23e405STejun Heo last_end = offsets[last_area] + sizes[last_area]; 4549ca23e405STejun Heo 4550ca23e405STejun Heo if (vmalloc_end - vmalloc_start < last_end) { 4551ca23e405STejun Heo WARN_ON(true); 4552ca23e405STejun Heo return NULL; 4553ca23e405STejun Heo } 4554ca23e405STejun Heo 45554d67d860SThomas Meyer vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 45564d67d860SThomas Meyer vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 4557ca23e405STejun Heo if (!vas || !vms) 4558f1db7afdSKautuk Consul goto err_free2; 4559ca23e405STejun Heo 4560ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 456168ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); 4562ec3f64fcSDavid Rientjes vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 4563ca23e405STejun Heo if (!vas[area] || !vms[area]) 4564ca23e405STejun Heo goto err_free; 4565ca23e405STejun Heo } 4566ca23e405STejun Heo retry: 4567e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock); 4568ca23e405STejun Heo 4569ca23e405STejun Heo /* start scanning - we scan from the top, begin with the last area */ 4570ca23e405STejun Heo area = term_area = last_area; 4571ca23e405STejun Heo start = offsets[area]; 4572ca23e405STejun Heo end = start + sizes[area]; 4573ca23e405STejun Heo 457468ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(vmalloc_end); 457568ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end; 4576ca23e405STejun Heo 4577ca23e405STejun Heo while (true) { 4578ca23e405STejun Heo /* 4579ca23e405STejun Heo * base might have underflowed, add last_end before 4580ca23e405STejun Heo * comparing. 4581ca23e405STejun Heo */ 458268ad4a33SUladzislau Rezki (Sony) if (base + last_end < vmalloc_start + last_end) 458368ad4a33SUladzislau Rezki (Sony) goto overflow; 4584ca23e405STejun Heo 4585ca23e405STejun Heo /* 458668ad4a33SUladzislau Rezki (Sony) * Fitting base has not been found. 4587ca23e405STejun Heo */ 458868ad4a33SUladzislau Rezki (Sony) if (va == NULL) 458968ad4a33SUladzislau Rezki (Sony) goto overflow; 4590ca23e405STejun Heo 4591ca23e405STejun Heo /* 4592d8cc323dSQiujun Huang * If required width exceeds current VA block, move 45935336e52cSKuppuswamy Sathyanarayanan * base downwards and then recheck. 45945336e52cSKuppuswamy Sathyanarayanan */ 45955336e52cSKuppuswamy Sathyanarayanan if (base + end > va->va_end) { 45965336e52cSKuppuswamy Sathyanarayanan base = pvm_determine_end_from_reverse(&va, align) - end; 45975336e52cSKuppuswamy Sathyanarayanan term_area = area; 45985336e52cSKuppuswamy Sathyanarayanan continue; 45995336e52cSKuppuswamy Sathyanarayanan } 46005336e52cSKuppuswamy Sathyanarayanan 46015336e52cSKuppuswamy Sathyanarayanan /* 460268ad4a33SUladzislau Rezki (Sony) * If this VA does not fit, move base downwards and recheck. 4603ca23e405STejun Heo */ 46045336e52cSKuppuswamy Sathyanarayanan if (base + start < va->va_start) { 460568ad4a33SUladzislau Rezki (Sony) va = node_to_va(rb_prev(&va->rb_node)); 460668ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end; 4607ca23e405STejun Heo term_area = area; 4608ca23e405STejun Heo continue; 4609ca23e405STejun Heo } 4610ca23e405STejun Heo 4611ca23e405STejun Heo /* 4612ca23e405STejun Heo * This area fits, move on to the previous one. If 4613ca23e405STejun Heo * the previous one is the terminal one, we're done. 4614ca23e405STejun Heo */ 4615ca23e405STejun Heo area = (area + nr_vms - 1) % nr_vms; 4616ca23e405STejun Heo if (area == term_area) 4617ca23e405STejun Heo break; 461868ad4a33SUladzislau Rezki (Sony) 4619ca23e405STejun Heo start = offsets[area]; 4620ca23e405STejun Heo end = start + sizes[area]; 462168ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(base + end); 4622ca23e405STejun Heo } 462368ad4a33SUladzislau Rezki (Sony) 4624ca23e405STejun Heo /* we've found a fitting base, insert all va's */ 4625ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 462668ad4a33SUladzislau Rezki (Sony) int ret; 4627ca23e405STejun Heo 462868ad4a33SUladzislau Rezki (Sony) start = base + offsets[area]; 462968ad4a33SUladzislau Rezki (Sony) size = sizes[area]; 463068ad4a33SUladzislau Rezki (Sony) 463168ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(start); 463268ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(va == NULL)) 463368ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */ 463468ad4a33SUladzislau Rezki (Sony) goto recovery; 463568ad4a33SUladzislau Rezki (Sony) 46365b75b8e1SUladzislau Rezki (Sony) ret = va_clip(&free_vmap_area_root, 46375b75b8e1SUladzislau Rezki (Sony) &free_vmap_area_list, va, start, size); 46381b23ff80SBaoquan He if (WARN_ON_ONCE(unlikely(ret))) 463968ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */ 464068ad4a33SUladzislau Rezki (Sony) goto recovery; 464168ad4a33SUladzislau Rezki (Sony) 464268ad4a33SUladzislau Rezki (Sony) /* Allocated area. */ 464368ad4a33SUladzislau Rezki (Sony) va = vas[area]; 464468ad4a33SUladzislau Rezki (Sony) va->va_start = start; 464568ad4a33SUladzislau Rezki (Sony) va->va_end = start + size; 4646ca23e405STejun Heo } 4647ca23e405STejun Heo 4648e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 4649ca23e405STejun Heo 4650253a496dSDaniel Axtens /* populate the kasan shadow space */ 4651253a496dSDaniel Axtens for (area = 0; area < nr_vms; area++) { 4652253a496dSDaniel Axtens if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) 4653253a496dSDaniel Axtens goto err_free_shadow; 4654253a496dSDaniel Axtens } 4655253a496dSDaniel Axtens 4656ca23e405STejun Heo /* insert all vm's */ 4657e36176beSUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) { 4658d0936029SUladzislau Rezki (Sony) struct vmap_node *vn = addr_to_node(vas[area]->va_start); 4659e36176beSUladzislau Rezki (Sony) 4660d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 4661d0936029SUladzislau Rezki (Sony) insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head); 4662e36176beSUladzislau Rezki (Sony) setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, 4663ca23e405STejun Heo pcpu_get_vm_areas); 4664d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 4665e36176beSUladzislau Rezki (Sony) } 4666ca23e405STejun Heo 466719f1c3acSAndrey Konovalov /* 466819f1c3acSAndrey Konovalov * Mark allocated areas as accessible. Do it now as a best-effort 466919f1c3acSAndrey Konovalov * approach, as they can be mapped outside of vmalloc code. 467023689e91SAndrey Konovalov * With hardware tag-based KASAN, marking is skipped for 467123689e91SAndrey Konovalov * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 467219f1c3acSAndrey Konovalov */ 46731d96320fSAndrey Konovalov for (area = 0; area < nr_vms; area++) 46741d96320fSAndrey Konovalov vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, 4675f6e39794SAndrey Konovalov vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); 46761d96320fSAndrey Konovalov 4677ca23e405STejun Heo kfree(vas); 4678ca23e405STejun Heo return vms; 4679ca23e405STejun Heo 468068ad4a33SUladzislau Rezki (Sony) recovery: 4681e36176beSUladzislau Rezki (Sony) /* 4682e36176beSUladzislau Rezki (Sony) * Remove previously allocated areas. There is no 4683e36176beSUladzislau Rezki (Sony) * need in removing these areas from the busy tree, 4684e36176beSUladzislau Rezki (Sony) * because they are inserted only on the final step 4685e36176beSUladzislau Rezki (Sony) * and when pcpu_get_vm_areas() is success. 4686e36176beSUladzislau Rezki (Sony) */ 468768ad4a33SUladzislau Rezki (Sony) while (area--) { 4688253a496dSDaniel Axtens orig_start = vas[area]->va_start; 4689253a496dSDaniel Axtens orig_end = vas[area]->va_end; 469096e2db45SUladzislau Rezki (Sony) va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 46913c5c3cfbSDaniel Axtens &free_vmap_area_list); 46929c801f61SUladzislau Rezki (Sony) if (va) 4693253a496dSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 4694253a496dSDaniel Axtens va->va_start, va->va_end); 469568ad4a33SUladzislau Rezki (Sony) vas[area] = NULL; 469668ad4a33SUladzislau Rezki (Sony) } 469768ad4a33SUladzislau Rezki (Sony) 469868ad4a33SUladzislau Rezki (Sony) overflow: 4699e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 470068ad4a33SUladzislau Rezki (Sony) if (!purged) { 470177e50af0SThomas Gleixner reclaim_and_purge_vmap_areas(); 470268ad4a33SUladzislau Rezki (Sony) purged = true; 470368ad4a33SUladzislau Rezki (Sony) 470468ad4a33SUladzislau Rezki (Sony) /* Before "retry", check if we recover. */ 470568ad4a33SUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) { 470668ad4a33SUladzislau Rezki (Sony) if (vas[area]) 470768ad4a33SUladzislau Rezki (Sony) continue; 470868ad4a33SUladzislau Rezki (Sony) 470968ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc( 471068ad4a33SUladzislau Rezki (Sony) vmap_area_cachep, GFP_KERNEL); 471168ad4a33SUladzislau Rezki (Sony) if (!vas[area]) 471268ad4a33SUladzislau Rezki (Sony) goto err_free; 471368ad4a33SUladzislau Rezki (Sony) } 471468ad4a33SUladzislau Rezki (Sony) 471568ad4a33SUladzislau Rezki (Sony) goto retry; 471668ad4a33SUladzislau Rezki (Sony) } 471768ad4a33SUladzislau Rezki (Sony) 4718ca23e405STejun Heo err_free: 4719ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 472068ad4a33SUladzislau Rezki (Sony) if (vas[area]) 472168ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, vas[area]); 472268ad4a33SUladzislau Rezki (Sony) 4723ca23e405STejun Heo kfree(vms[area]); 4724ca23e405STejun Heo } 4725f1db7afdSKautuk Consul err_free2: 4726ca23e405STejun Heo kfree(vas); 4727ca23e405STejun Heo kfree(vms); 4728ca23e405STejun Heo return NULL; 4729253a496dSDaniel Axtens 4730253a496dSDaniel Axtens err_free_shadow: 4731253a496dSDaniel Axtens spin_lock(&free_vmap_area_lock); 4732253a496dSDaniel Axtens /* 4733253a496dSDaniel Axtens * We release all the vmalloc shadows, even the ones for regions that 4734253a496dSDaniel Axtens * hadn't been successfully added. This relies on kasan_release_vmalloc 4735253a496dSDaniel Axtens * being able to tolerate this case. 4736253a496dSDaniel Axtens */ 4737253a496dSDaniel Axtens for (area = 0; area < nr_vms; area++) { 4738253a496dSDaniel Axtens orig_start = vas[area]->va_start; 4739253a496dSDaniel Axtens orig_end = vas[area]->va_end; 474096e2db45SUladzislau Rezki (Sony) va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 4741253a496dSDaniel Axtens &free_vmap_area_list); 47429c801f61SUladzislau Rezki (Sony) if (va) 4743253a496dSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 4744253a496dSDaniel Axtens va->va_start, va->va_end); 4745253a496dSDaniel Axtens vas[area] = NULL; 4746253a496dSDaniel Axtens kfree(vms[area]); 4747253a496dSDaniel Axtens } 4748253a496dSDaniel Axtens spin_unlock(&free_vmap_area_lock); 4749253a496dSDaniel Axtens kfree(vas); 4750253a496dSDaniel Axtens kfree(vms); 4751253a496dSDaniel Axtens return NULL; 4752ca23e405STejun Heo } 4753ca23e405STejun Heo 4754ca23e405STejun Heo /** 4755ca23e405STejun Heo * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 4756ca23e405STejun Heo * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 4757ca23e405STejun Heo * @nr_vms: the number of allocated areas 4758ca23e405STejun Heo * 4759ca23e405STejun Heo * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 4760ca23e405STejun Heo */ 4761ca23e405STejun Heo void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 4762ca23e405STejun Heo { 4763ca23e405STejun Heo int i; 4764ca23e405STejun Heo 4765ca23e405STejun Heo for (i = 0; i < nr_vms; i++) 4766ca23e405STejun Heo free_vm_area(vms[i]); 4767ca23e405STejun Heo kfree(vms); 4768ca23e405STejun Heo } 47694f8b02b4STejun Heo #endif /* CONFIG_SMP */ 4770a10aa579SChristoph Lameter 47715bb1bb35SPaul E. McKenney #ifdef CONFIG_PRINTK 477298f18083SPaul E. McKenney bool vmalloc_dump_obj(void *object) 477398f18083SPaul E. McKenney { 47740818e739SJoel Fernandes (Google) const void *caller; 47750818e739SJoel Fernandes (Google) struct vm_struct *vm; 47760818e739SJoel Fernandes (Google) struct vmap_area *va; 4777d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 47780818e739SJoel Fernandes (Google) unsigned long addr; 47790818e739SJoel Fernandes (Google) unsigned int nr_pages; 478098f18083SPaul E. McKenney 47818be4d46eSUladzislau Rezki (Sony) addr = PAGE_ALIGN((unsigned long) object); 47828be4d46eSUladzislau Rezki (Sony) vn = addr_to_node(addr); 4783d0936029SUladzislau Rezki (Sony) 47848be4d46eSUladzislau Rezki (Sony) if (!spin_trylock(&vn->busy.lock)) 478598f18083SPaul E. McKenney return false; 4786d0936029SUladzislau Rezki (Sony) 47878be4d46eSUladzislau Rezki (Sony) va = __find_vmap_area(addr, &vn->busy.root); 47888be4d46eSUladzislau Rezki (Sony) if (!va || !va->vm) { 4789d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 47900818e739SJoel Fernandes (Google) return false; 47910818e739SJoel Fernandes (Google) } 47920818e739SJoel Fernandes (Google) 47930818e739SJoel Fernandes (Google) vm = va->vm; 47940818e739SJoel Fernandes (Google) addr = (unsigned long) vm->addr; 47950818e739SJoel Fernandes (Google) caller = vm->caller; 47960818e739SJoel Fernandes (Google) nr_pages = vm->nr_pages; 47978be4d46eSUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 47988be4d46eSUladzislau Rezki (Sony) 4799bd34dcd4SPaul E. McKenney pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n", 48000818e739SJoel Fernandes (Google) nr_pages, addr, caller); 4801d0936029SUladzislau Rezki (Sony) 480298f18083SPaul E. McKenney return true; 480398f18083SPaul E. McKenney } 48045bb1bb35SPaul E. McKenney #endif 480598f18083SPaul E. McKenney 4806a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS 4807a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v) 4808a47a126aSEric Dumazet { 4809e5adfffcSKirill A. Shutemov if (IS_ENABLED(CONFIG_NUMA)) { 4810a47a126aSEric Dumazet unsigned int nr, *counters = m->private; 481151e50b3aSEric Dumazet unsigned int step = 1U << vm_area_page_order(v); 4812a47a126aSEric Dumazet 4813a47a126aSEric Dumazet if (!counters) 4814a47a126aSEric Dumazet return; 4815a47a126aSEric Dumazet 4816af12346cSWanpeng Li if (v->flags & VM_UNINITIALIZED) 4817af12346cSWanpeng Li return; 48187e5b528bSDmitry Vyukov /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 48197e5b528bSDmitry Vyukov smp_rmb(); 4820af12346cSWanpeng Li 4821a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 4822a47a126aSEric Dumazet 482351e50b3aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr += step) 482451e50b3aSEric Dumazet counters[page_to_nid(v->pages[nr])] += step; 4825a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY) 4826a47a126aSEric Dumazet if (counters[nr]) 4827a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]); 4828a47a126aSEric Dumazet } 4829a47a126aSEric Dumazet } 4830a47a126aSEric Dumazet 4831dd3b8353SUladzislau Rezki (Sony) static void show_purge_info(struct seq_file *m) 4832dd3b8353SUladzislau Rezki (Sony) { 4833282631cbSUladzislau Rezki (Sony) struct vmap_node *vn; 4834dd3b8353SUladzislau Rezki (Sony) struct vmap_area *va; 4835282631cbSUladzislau Rezki (Sony) int i; 4836dd3b8353SUladzislau Rezki (Sony) 4837282631cbSUladzislau Rezki (Sony) for (i = 0; i < nr_vmap_nodes; i++) { 4838282631cbSUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 4839282631cbSUladzislau Rezki (Sony) 4840282631cbSUladzislau Rezki (Sony) spin_lock(&vn->lazy.lock); 4841282631cbSUladzislau Rezki (Sony) list_for_each_entry(va, &vn->lazy.head, list) { 4842dd3b8353SUladzislau Rezki (Sony) seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", 4843dd3b8353SUladzislau Rezki (Sony) (void *)va->va_start, (void *)va->va_end, 4844dd3b8353SUladzislau Rezki (Sony) va->va_end - va->va_start); 4845dd3b8353SUladzislau Rezki (Sony) } 4846282631cbSUladzislau Rezki (Sony) spin_unlock(&vn->lazy.lock); 4847282631cbSUladzislau Rezki (Sony) } 4848dd3b8353SUladzislau Rezki (Sony) } 4849dd3b8353SUladzislau Rezki (Sony) 48508e1d743fSUladzislau Rezki (Sony) static int vmalloc_info_show(struct seq_file *m, void *p) 4851a10aa579SChristoph Lameter { 4852d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 48533f500069Szijun_hu struct vmap_area *va; 4854d4033afdSJoonsoo Kim struct vm_struct *v; 48558e1d743fSUladzislau Rezki (Sony) int i; 4856d4033afdSJoonsoo Kim 48578e1d743fSUladzislau Rezki (Sony) for (i = 0; i < nr_vmap_nodes; i++) { 48588e1d743fSUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 48593f500069Szijun_hu 48608e1d743fSUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 48618e1d743fSUladzislau Rezki (Sony) list_for_each_entry(va, &vn->busy.head, list) { 4862688fcbfcSPengfei Li if (!va->vm) { 4863bba9697bSBaoquan He if (va->flags & VMAP_RAM) 4864dd3b8353SUladzislau Rezki (Sony) seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", 486578c72746SYisheng Xie (void *)va->va_start, (void *)va->va_end, 4866dd3b8353SUladzislau Rezki (Sony) va->va_end - va->va_start); 486778c72746SYisheng Xie 48688e1d743fSUladzislau Rezki (Sony) continue; 486978c72746SYisheng Xie } 4870d4033afdSJoonsoo Kim 4871d4033afdSJoonsoo Kim v = va->vm; 4872a10aa579SChristoph Lameter 487345ec1690SKees Cook seq_printf(m, "0x%pK-0x%pK %7ld", 4874a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size); 4875a10aa579SChristoph Lameter 487662c70bceSJoe Perches if (v->caller) 487762c70bceSJoe Perches seq_printf(m, " %pS", v->caller); 487823016969SChristoph Lameter 4879a10aa579SChristoph Lameter if (v->nr_pages) 4880a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages); 4881a10aa579SChristoph Lameter 4882a10aa579SChristoph Lameter if (v->phys_addr) 4883199eaa05SMiles Chen seq_printf(m, " phys=%pa", &v->phys_addr); 4884a10aa579SChristoph Lameter 4885a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP) 4886f4527c90SFabian Frederick seq_puts(m, " ioremap"); 4887a10aa579SChristoph Lameter 4888e6f79822SAlexei Starovoitov if (v->flags & VM_SPARSE) 4889e6f79822SAlexei Starovoitov seq_puts(m, " sparse"); 4890e6f79822SAlexei Starovoitov 4891a10aa579SChristoph Lameter if (v->flags & VM_ALLOC) 4892f4527c90SFabian Frederick seq_puts(m, " vmalloc"); 4893a10aa579SChristoph Lameter 4894a10aa579SChristoph Lameter if (v->flags & VM_MAP) 4895f4527c90SFabian Frederick seq_puts(m, " vmap"); 4896a10aa579SChristoph Lameter 4897a10aa579SChristoph Lameter if (v->flags & VM_USERMAP) 4898f4527c90SFabian Frederick seq_puts(m, " user"); 4899a10aa579SChristoph Lameter 4900fe9041c2SChristoph Hellwig if (v->flags & VM_DMA_COHERENT) 4901fe9041c2SChristoph Hellwig seq_puts(m, " dma-coherent"); 4902fe9041c2SChristoph Hellwig 4903244d63eeSDavid Rientjes if (is_vmalloc_addr(v->pages)) 4904f4527c90SFabian Frederick seq_puts(m, " vpages"); 4905a10aa579SChristoph Lameter 4906a47a126aSEric Dumazet show_numa_info(m, v); 4907a10aa579SChristoph Lameter seq_putc(m, '\n'); 49088e1d743fSUladzislau Rezki (Sony) } 49098e1d743fSUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 49108e1d743fSUladzislau Rezki (Sony) } 4911dd3b8353SUladzislau Rezki (Sony) 4912dd3b8353SUladzislau Rezki (Sony) /* 491396e2db45SUladzislau Rezki (Sony) * As a final step, dump "unpurged" areas. 4914dd3b8353SUladzislau Rezki (Sony) */ 4915dd3b8353SUladzislau Rezki (Sony) show_purge_info(m); 4916a10aa579SChristoph Lameter return 0; 4917a10aa579SChristoph Lameter } 4918a10aa579SChristoph Lameter 49195f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void) 49205f6a6a9cSAlexey Dobriyan { 49218e1d743fSUladzislau Rezki (Sony) void *priv_data = NULL; 49228e1d743fSUladzislau Rezki (Sony) 4923fddda2b7SChristoph Hellwig if (IS_ENABLED(CONFIG_NUMA)) 49248e1d743fSUladzislau Rezki (Sony) priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); 49258e1d743fSUladzislau Rezki (Sony) 49268e1d743fSUladzislau Rezki (Sony) proc_create_single_data("vmallocinfo", 49278e1d743fSUladzislau Rezki (Sony) 0400, NULL, vmalloc_info_show, priv_data); 49288e1d743fSUladzislau Rezki (Sony) 49295f6a6a9cSAlexey Dobriyan return 0; 49305f6a6a9cSAlexey Dobriyan } 49315f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init); 4932db3808c1SJoonsoo Kim 4933a10aa579SChristoph Lameter #endif 4934208162f4SChristoph Hellwig 4935d0936029SUladzislau Rezki (Sony) static void __init vmap_init_free_space(void) 49367fa8cee0SUladzislau Rezki (Sony) { 49377fa8cee0SUladzislau Rezki (Sony) unsigned long vmap_start = 1; 49387fa8cee0SUladzislau Rezki (Sony) const unsigned long vmap_end = ULONG_MAX; 4939d0936029SUladzislau Rezki (Sony) struct vmap_area *free; 4940d0936029SUladzislau Rezki (Sony) struct vm_struct *busy; 49417fa8cee0SUladzislau Rezki (Sony) 49427fa8cee0SUladzislau Rezki (Sony) /* 49437fa8cee0SUladzislau Rezki (Sony) * B F B B B F 49447fa8cee0SUladzislau Rezki (Sony) * -|-----|.....|-----|-----|-----|.....|- 49457fa8cee0SUladzislau Rezki (Sony) * | The KVA space | 49467fa8cee0SUladzislau Rezki (Sony) * |<--------------------------------->| 49477fa8cee0SUladzislau Rezki (Sony) */ 4948d0936029SUladzislau Rezki (Sony) for (busy = vmlist; busy; busy = busy->next) { 4949d0936029SUladzislau Rezki (Sony) if ((unsigned long) busy->addr - vmap_start > 0) { 49507fa8cee0SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 49517fa8cee0SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) { 49527fa8cee0SUladzislau Rezki (Sony) free->va_start = vmap_start; 4953d0936029SUladzislau Rezki (Sony) free->va_end = (unsigned long) busy->addr; 49547fa8cee0SUladzislau Rezki (Sony) 49557fa8cee0SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL, 49567fa8cee0SUladzislau Rezki (Sony) &free_vmap_area_root, 49577fa8cee0SUladzislau Rezki (Sony) &free_vmap_area_list); 49587fa8cee0SUladzislau Rezki (Sony) } 49597fa8cee0SUladzislau Rezki (Sony) } 49607fa8cee0SUladzislau Rezki (Sony) 4961d0936029SUladzislau Rezki (Sony) vmap_start = (unsigned long) busy->addr + busy->size; 49627fa8cee0SUladzislau Rezki (Sony) } 49637fa8cee0SUladzislau Rezki (Sony) 49647fa8cee0SUladzislau Rezki (Sony) if (vmap_end - vmap_start > 0) { 49657fa8cee0SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 49667fa8cee0SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) { 49677fa8cee0SUladzislau Rezki (Sony) free->va_start = vmap_start; 49687fa8cee0SUladzislau Rezki (Sony) free->va_end = vmap_end; 49697fa8cee0SUladzislau Rezki (Sony) 49707fa8cee0SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL, 49717fa8cee0SUladzislau Rezki (Sony) &free_vmap_area_root, 49727fa8cee0SUladzislau Rezki (Sony) &free_vmap_area_list); 49737fa8cee0SUladzislau Rezki (Sony) } 49747fa8cee0SUladzislau Rezki (Sony) } 49757fa8cee0SUladzislau Rezki (Sony) } 49767fa8cee0SUladzislau Rezki (Sony) 4977d0936029SUladzislau Rezki (Sony) static void vmap_init_nodes(void) 4978d0936029SUladzislau Rezki (Sony) { 4979d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 49808f33a2ffSUladzislau Rezki (Sony) int i, n; 4981d0936029SUladzislau Rezki (Sony) 49828f33a2ffSUladzislau Rezki (Sony) #if BITS_PER_LONG == 64 498315e02a39SUladzislau Rezki (Sony) /* 498415e02a39SUladzislau Rezki (Sony) * A high threshold of max nodes is fixed and bound to 128, 498515e02a39SUladzislau Rezki (Sony) * thus a scale factor is 1 for systems where number of cores 498615e02a39SUladzislau Rezki (Sony) * are less or equal to specified threshold. 498715e02a39SUladzislau Rezki (Sony) * 498815e02a39SUladzislau Rezki (Sony) * As for NUMA-aware notes. For bigger systems, for example 498915e02a39SUladzislau Rezki (Sony) * NUMA with multi-sockets, where we can end-up with thousands 499015e02a39SUladzislau Rezki (Sony) * of cores in total, a "sub-numa-clustering" should be added. 499115e02a39SUladzislau Rezki (Sony) * 499215e02a39SUladzislau Rezki (Sony) * In this case a NUMA domain is considered as a single entity 499315e02a39SUladzislau Rezki (Sony) * with dedicated sub-nodes in it which describe one group or 499415e02a39SUladzislau Rezki (Sony) * set of cores. Therefore a per-domain purging is supposed to 499515e02a39SUladzislau Rezki (Sony) * be added as well as a per-domain balancing. 499615e02a39SUladzislau Rezki (Sony) */ 49978f33a2ffSUladzislau Rezki (Sony) n = clamp_t(unsigned int, num_possible_cpus(), 1, 128); 49988f33a2ffSUladzislau Rezki (Sony) 49998f33a2ffSUladzislau Rezki (Sony) if (n > 1) { 50008f33a2ffSUladzislau Rezki (Sony) vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN); 50018f33a2ffSUladzislau Rezki (Sony) if (vn) { 50028f33a2ffSUladzislau Rezki (Sony) /* Node partition is 16 pages. */ 50038f33a2ffSUladzislau Rezki (Sony) vmap_zone_size = (1 << 4) * PAGE_SIZE; 50048f33a2ffSUladzislau Rezki (Sony) nr_vmap_nodes = n; 50058f33a2ffSUladzislau Rezki (Sony) vmap_nodes = vn; 50068f33a2ffSUladzislau Rezki (Sony) } else { 50078f33a2ffSUladzislau Rezki (Sony) pr_err("Failed to allocate an array. Disable a node layer\n"); 50088f33a2ffSUladzislau Rezki (Sony) } 50098f33a2ffSUladzislau Rezki (Sony) } 50108f33a2ffSUladzislau Rezki (Sony) #endif 50118f33a2ffSUladzislau Rezki (Sony) 50128f33a2ffSUladzislau Rezki (Sony) for (n = 0; n < nr_vmap_nodes; n++) { 50138f33a2ffSUladzislau Rezki (Sony) vn = &vmap_nodes[n]; 5014d0936029SUladzislau Rezki (Sony) vn->busy.root = RB_ROOT; 5015d0936029SUladzislau Rezki (Sony) INIT_LIST_HEAD(&vn->busy.head); 5016d0936029SUladzislau Rezki (Sony) spin_lock_init(&vn->busy.lock); 5017282631cbSUladzislau Rezki (Sony) 5018282631cbSUladzislau Rezki (Sony) vn->lazy.root = RB_ROOT; 5019282631cbSUladzislau Rezki (Sony) INIT_LIST_HEAD(&vn->lazy.head); 5020282631cbSUladzislau Rezki (Sony) spin_lock_init(&vn->lazy.lock); 502172210662SUladzislau Rezki (Sony) 50228f33a2ffSUladzislau Rezki (Sony) for (i = 0; i < MAX_VA_SIZE_PAGES; i++) { 50238f33a2ffSUladzislau Rezki (Sony) INIT_LIST_HEAD(&vn->pool[i].head); 50248f33a2ffSUladzislau Rezki (Sony) WRITE_ONCE(vn->pool[i].len, 0); 502572210662SUladzislau Rezki (Sony) } 502672210662SUladzislau Rezki (Sony) 502772210662SUladzislau Rezki (Sony) spin_lock_init(&vn->pool_lock); 5028d0936029SUladzislau Rezki (Sony) } 5029d0936029SUladzislau Rezki (Sony) } 5030d0936029SUladzislau Rezki (Sony) 50317679ba6bSUladzislau Rezki (Sony) static unsigned long 50327679ba6bSUladzislau Rezki (Sony) vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 50337679ba6bSUladzislau Rezki (Sony) { 50347679ba6bSUladzislau Rezki (Sony) unsigned long count; 50357679ba6bSUladzislau Rezki (Sony) struct vmap_node *vn; 50367679ba6bSUladzislau Rezki (Sony) int i, j; 50377679ba6bSUladzislau Rezki (Sony) 50387679ba6bSUladzislau Rezki (Sony) for (count = 0, i = 0; i < nr_vmap_nodes; i++) { 50397679ba6bSUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 50407679ba6bSUladzislau Rezki (Sony) 50417679ba6bSUladzislau Rezki (Sony) for (j = 0; j < MAX_VA_SIZE_PAGES; j++) 50427679ba6bSUladzislau Rezki (Sony) count += READ_ONCE(vn->pool[j].len); 50437679ba6bSUladzislau Rezki (Sony) } 50447679ba6bSUladzislau Rezki (Sony) 50457679ba6bSUladzislau Rezki (Sony) return count ? count : SHRINK_EMPTY; 50467679ba6bSUladzislau Rezki (Sony) } 50477679ba6bSUladzislau Rezki (Sony) 50487679ba6bSUladzislau Rezki (Sony) static unsigned long 50497679ba6bSUladzislau Rezki (Sony) vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 50507679ba6bSUladzislau Rezki (Sony) { 50517679ba6bSUladzislau Rezki (Sony) int i; 50527679ba6bSUladzislau Rezki (Sony) 50537679ba6bSUladzislau Rezki (Sony) for (i = 0; i < nr_vmap_nodes; i++) 50547679ba6bSUladzislau Rezki (Sony) decay_va_pool_node(&vmap_nodes[i], true); 50557679ba6bSUladzislau Rezki (Sony) 50567679ba6bSUladzislau Rezki (Sony) return SHRINK_STOP; 50577679ba6bSUladzislau Rezki (Sony) } 50587679ba6bSUladzislau Rezki (Sony) 5059208162f4SChristoph Hellwig void __init vmalloc_init(void) 5060208162f4SChristoph Hellwig { 50617679ba6bSUladzislau Rezki (Sony) struct shrinker *vmap_node_shrinker; 5062208162f4SChristoph Hellwig struct vmap_area *va; 5063d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 5064208162f4SChristoph Hellwig struct vm_struct *tmp; 5065208162f4SChristoph Hellwig int i; 5066208162f4SChristoph Hellwig 5067208162f4SChristoph Hellwig /* 5068208162f4SChristoph Hellwig * Create the cache for vmap_area objects. 5069208162f4SChristoph Hellwig */ 5070208162f4SChristoph Hellwig vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); 5071208162f4SChristoph Hellwig 5072208162f4SChristoph Hellwig for_each_possible_cpu(i) { 5073208162f4SChristoph Hellwig struct vmap_block_queue *vbq; 5074208162f4SChristoph Hellwig struct vfree_deferred *p; 5075208162f4SChristoph Hellwig 5076208162f4SChristoph Hellwig vbq = &per_cpu(vmap_block_queue, i); 5077208162f4SChristoph Hellwig spin_lock_init(&vbq->lock); 5078208162f4SChristoph Hellwig INIT_LIST_HEAD(&vbq->free); 5079208162f4SChristoph Hellwig p = &per_cpu(vfree_deferred, i); 5080208162f4SChristoph Hellwig init_llist_head(&p->list); 5081208162f4SChristoph Hellwig INIT_WORK(&p->wq, delayed_vfree_work); 5082062eacf5SUladzislau Rezki (Sony) xa_init(&vbq->vmap_blocks); 5083208162f4SChristoph Hellwig } 5084208162f4SChristoph Hellwig 5085d0936029SUladzislau Rezki (Sony) /* 5086d0936029SUladzislau Rezki (Sony) * Setup nodes before importing vmlist. 5087d0936029SUladzislau Rezki (Sony) */ 5088d0936029SUladzislau Rezki (Sony) vmap_init_nodes(); 5089d0936029SUladzislau Rezki (Sony) 5090208162f4SChristoph Hellwig /* Import existing vmlist entries. */ 5091208162f4SChristoph Hellwig for (tmp = vmlist; tmp; tmp = tmp->next) { 5092208162f4SChristoph Hellwig va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 5093208162f4SChristoph Hellwig if (WARN_ON_ONCE(!va)) 5094208162f4SChristoph Hellwig continue; 5095208162f4SChristoph Hellwig 5096208162f4SChristoph Hellwig va->va_start = (unsigned long)tmp->addr; 5097208162f4SChristoph Hellwig va->va_end = va->va_start + tmp->size; 5098208162f4SChristoph Hellwig va->vm = tmp; 5099d0936029SUladzislau Rezki (Sony) 5100d0936029SUladzislau Rezki (Sony) vn = addr_to_node(va->va_start); 5101d0936029SUladzislau Rezki (Sony) insert_vmap_area(va, &vn->busy.root, &vn->busy.head); 5102208162f4SChristoph Hellwig } 5103208162f4SChristoph Hellwig 5104208162f4SChristoph Hellwig /* 5105208162f4SChristoph Hellwig * Now we can initialize a free vmap space. 5106208162f4SChristoph Hellwig */ 5107208162f4SChristoph Hellwig vmap_init_free_space(); 5108208162f4SChristoph Hellwig vmap_initialized = true; 51097679ba6bSUladzislau Rezki (Sony) 51107679ba6bSUladzislau Rezki (Sony) vmap_node_shrinker = shrinker_alloc(0, "vmap-node"); 51117679ba6bSUladzislau Rezki (Sony) if (!vmap_node_shrinker) { 51127679ba6bSUladzislau Rezki (Sony) pr_err("Failed to allocate vmap-node shrinker!\n"); 51137679ba6bSUladzislau Rezki (Sony) return; 51147679ba6bSUladzislau Rezki (Sony) } 51157679ba6bSUladzislau Rezki (Sony) 51167679ba6bSUladzislau Rezki (Sony) vmap_node_shrinker->count_objects = vmap_node_shrink_count; 51177679ba6bSUladzislau Rezki (Sony) vmap_node_shrinker->scan_objects = vmap_node_shrink_scan; 51187679ba6bSUladzislau Rezki (Sony) shrinker_register(vmap_node_shrinker); 5119208162f4SChristoph Hellwig } 5120