1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Copyright (C) 1993 Linus Torvalds 41da177e4SLinus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 51da177e4SLinus Torvalds * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 61da177e4SLinus Torvalds * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 7930fc45aSChristoph Lameter * Numa awareness, Christoph Lameter, SGI, June 2005 8d758ffe6SUladzislau Rezki (Sony) * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 11db64fe02SNick Piggin #include <linux/vmalloc.h> 121da177e4SLinus Torvalds #include <linux/mm.h> 131da177e4SLinus Torvalds #include <linux/module.h> 141da177e4SLinus Torvalds #include <linux/highmem.h> 15c3edc401SIngo Molnar #include <linux/sched/signal.h> 161da177e4SLinus Torvalds #include <linux/slab.h> 171da177e4SLinus Torvalds #include <linux/spinlock.h> 181da177e4SLinus Torvalds #include <linux/interrupt.h> 195f6a6a9cSAlexey Dobriyan #include <linux/proc_fs.h> 20a10aa579SChristoph Lameter #include <linux/seq_file.h> 21868b104dSRick Edgecombe #include <linux/set_memory.h> 223ac7fe5aSThomas Gleixner #include <linux/debugobjects.h> 2323016969SChristoph Lameter #include <linux/kallsyms.h> 24db64fe02SNick Piggin #include <linux/list.h> 254da56b99SChris Wilson #include <linux/notifier.h> 26db64fe02SNick Piggin #include <linux/rbtree.h> 270f14599cSMatthew Wilcox (Oracle) #include <linux/xarray.h> 285da96bddSMel Gorman #include <linux/io.h> 29db64fe02SNick Piggin #include <linux/rcupdate.h> 30f0aa6617STejun Heo #include <linux/pfn.h> 3189219d37SCatalin Marinas #include <linux/kmemleak.h> 3260063497SArun Sharma #include <linux/atomic.h> 333b32123dSGideon Israel Dsouza #include <linux/compiler.h> 344e5aa1f4SShakeel Butt #include <linux/memcontrol.h> 3532fcfd40SAl Viro #include <linux/llist.h> 364c91c07cSLorenzo Stoakes #include <linux/uio.h> 370f616be1SToshi Kani #include <linux/bitops.h> 3868ad4a33SUladzislau Rezki (Sony) #include <linux/rbtree_augmented.h> 39bdebd6a2SJann Horn #include <linux/overflow.h> 40c0eb315aSNicholas Piggin #include <linux/pgtable.h> 41f7ee1f13SChristophe Leroy #include <linux/hugetlb.h> 42451769ebSMichal Hocko #include <linux/sched/mm.h> 431da177e4SLinus Torvalds #include <asm/tlbflush.h> 442dca6999SDavid Miller #include <asm/shmparam.h> 451da177e4SLinus Torvalds 46cf243da6SUladzislau Rezki (Sony) #define CREATE_TRACE_POINTS 47cf243da6SUladzislau Rezki (Sony) #include <trace/events/vmalloc.h> 48cf243da6SUladzislau Rezki (Sony) 49dd56b046SMel Gorman #include "internal.h" 502a681cfaSJoerg Roedel #include "pgalloc-track.h" 51dd56b046SMel Gorman 5282a70ce0SChristoph Hellwig #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 5382a70ce0SChristoph Hellwig static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1; 5482a70ce0SChristoph Hellwig 5582a70ce0SChristoph Hellwig static int __init set_nohugeiomap(char *str) 5682a70ce0SChristoph Hellwig { 5782a70ce0SChristoph Hellwig ioremap_max_page_shift = PAGE_SHIFT; 5882a70ce0SChristoph Hellwig return 0; 5982a70ce0SChristoph Hellwig } 6082a70ce0SChristoph Hellwig early_param("nohugeiomap", set_nohugeiomap); 6182a70ce0SChristoph Hellwig #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 6282a70ce0SChristoph Hellwig static const unsigned int ioremap_max_page_shift = PAGE_SHIFT; 6382a70ce0SChristoph Hellwig #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 6482a70ce0SChristoph Hellwig 65121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 66121e6f32SNicholas Piggin static bool __ro_after_init vmap_allow_huge = true; 67121e6f32SNicholas Piggin 68121e6f32SNicholas Piggin static int __init set_nohugevmalloc(char *str) 69121e6f32SNicholas Piggin { 70121e6f32SNicholas Piggin vmap_allow_huge = false; 71121e6f32SNicholas Piggin return 0; 72121e6f32SNicholas Piggin } 73121e6f32SNicholas Piggin early_param("nohugevmalloc", set_nohugevmalloc); 74121e6f32SNicholas Piggin #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 75121e6f32SNicholas Piggin static const bool vmap_allow_huge = false; 76121e6f32SNicholas Piggin #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 77121e6f32SNicholas Piggin 78186525bdSIngo Molnar bool is_vmalloc_addr(const void *x) 79186525bdSIngo Molnar { 804aff1dc4SAndrey Konovalov unsigned long addr = (unsigned long)kasan_reset_tag(x); 81186525bdSIngo Molnar 82186525bdSIngo Molnar return addr >= VMALLOC_START && addr < VMALLOC_END; 83186525bdSIngo Molnar } 84186525bdSIngo Molnar EXPORT_SYMBOL(is_vmalloc_addr); 85186525bdSIngo Molnar 8632fcfd40SAl Viro struct vfree_deferred { 8732fcfd40SAl Viro struct llist_head list; 8832fcfd40SAl Viro struct work_struct wq; 8932fcfd40SAl Viro }; 9032fcfd40SAl Viro static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 9132fcfd40SAl Viro 92db64fe02SNick Piggin /*** Page table manipulation functions ***/ 935e9e3d77SNicholas Piggin static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 945e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 95f7ee1f13SChristophe Leroy unsigned int max_page_shift, pgtbl_mod_mask *mask) 965e9e3d77SNicholas Piggin { 975e9e3d77SNicholas Piggin pte_t *pte; 985e9e3d77SNicholas Piggin u64 pfn; 99f7ee1f13SChristophe Leroy unsigned long size = PAGE_SIZE; 1005e9e3d77SNicholas Piggin 1015e9e3d77SNicholas Piggin pfn = phys_addr >> PAGE_SHIFT; 1025e9e3d77SNicholas Piggin pte = pte_alloc_kernel_track(pmd, addr, mask); 1035e9e3d77SNicholas Piggin if (!pte) 1045e9e3d77SNicholas Piggin return -ENOMEM; 1055e9e3d77SNicholas Piggin do { 106c33c7948SRyan Roberts BUG_ON(!pte_none(ptep_get(pte))); 107f7ee1f13SChristophe Leroy 108f7ee1f13SChristophe Leroy #ifdef CONFIG_HUGETLB_PAGE 109f7ee1f13SChristophe Leroy size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); 110f7ee1f13SChristophe Leroy if (size != PAGE_SIZE) { 111f7ee1f13SChristophe Leroy pte_t entry = pfn_pte(pfn, prot); 112f7ee1f13SChristophe Leroy 113f7ee1f13SChristophe Leroy entry = arch_make_huge_pte(entry, ilog2(size), 0); 114935d4f0cSRyan Roberts set_huge_pte_at(&init_mm, addr, pte, entry, size); 115f7ee1f13SChristophe Leroy pfn += PFN_DOWN(size); 116f7ee1f13SChristophe Leroy continue; 117f7ee1f13SChristophe Leroy } 118f7ee1f13SChristophe Leroy #endif 1195e9e3d77SNicholas Piggin set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); 1205e9e3d77SNicholas Piggin pfn++; 121f7ee1f13SChristophe Leroy } while (pte += PFN_DOWN(size), addr += size, addr != end); 1225e9e3d77SNicholas Piggin *mask |= PGTBL_PTE_MODIFIED; 1235e9e3d77SNicholas Piggin return 0; 1245e9e3d77SNicholas Piggin } 1255e9e3d77SNicholas Piggin 1265e9e3d77SNicholas Piggin static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, 1275e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 1285e9e3d77SNicholas Piggin unsigned int max_page_shift) 1295e9e3d77SNicholas Piggin { 1305e9e3d77SNicholas Piggin if (max_page_shift < PMD_SHIFT) 1315e9e3d77SNicholas Piggin return 0; 1325e9e3d77SNicholas Piggin 1335e9e3d77SNicholas Piggin if (!arch_vmap_pmd_supported(prot)) 1345e9e3d77SNicholas Piggin return 0; 1355e9e3d77SNicholas Piggin 1365e9e3d77SNicholas Piggin if ((end - addr) != PMD_SIZE) 1375e9e3d77SNicholas Piggin return 0; 1385e9e3d77SNicholas Piggin 1395e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, PMD_SIZE)) 1405e9e3d77SNicholas Piggin return 0; 1415e9e3d77SNicholas Piggin 1425e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, PMD_SIZE)) 1435e9e3d77SNicholas Piggin return 0; 1445e9e3d77SNicholas Piggin 1455e9e3d77SNicholas Piggin if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) 1465e9e3d77SNicholas Piggin return 0; 1475e9e3d77SNicholas Piggin 1485e9e3d77SNicholas Piggin return pmd_set_huge(pmd, phys_addr, prot); 1495e9e3d77SNicholas Piggin } 1505e9e3d77SNicholas Piggin 1515e9e3d77SNicholas Piggin static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 1525e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 1535e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask) 1545e9e3d77SNicholas Piggin { 1555e9e3d77SNicholas Piggin pmd_t *pmd; 1565e9e3d77SNicholas Piggin unsigned long next; 1575e9e3d77SNicholas Piggin 1585e9e3d77SNicholas Piggin pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 1595e9e3d77SNicholas Piggin if (!pmd) 1605e9e3d77SNicholas Piggin return -ENOMEM; 1615e9e3d77SNicholas Piggin do { 1625e9e3d77SNicholas Piggin next = pmd_addr_end(addr, end); 1635e9e3d77SNicholas Piggin 1645e9e3d77SNicholas Piggin if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, 1655e9e3d77SNicholas Piggin max_page_shift)) { 1665e9e3d77SNicholas Piggin *mask |= PGTBL_PMD_MODIFIED; 1675e9e3d77SNicholas Piggin continue; 1685e9e3d77SNicholas Piggin } 1695e9e3d77SNicholas Piggin 170f7ee1f13SChristophe Leroy if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask)) 1715e9e3d77SNicholas Piggin return -ENOMEM; 1725e9e3d77SNicholas Piggin } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); 1735e9e3d77SNicholas Piggin return 0; 1745e9e3d77SNicholas Piggin } 1755e9e3d77SNicholas Piggin 1765e9e3d77SNicholas Piggin static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, 1775e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 1785e9e3d77SNicholas Piggin unsigned int max_page_shift) 1795e9e3d77SNicholas Piggin { 1805e9e3d77SNicholas Piggin if (max_page_shift < PUD_SHIFT) 1815e9e3d77SNicholas Piggin return 0; 1825e9e3d77SNicholas Piggin 1835e9e3d77SNicholas Piggin if (!arch_vmap_pud_supported(prot)) 1845e9e3d77SNicholas Piggin return 0; 1855e9e3d77SNicholas Piggin 1865e9e3d77SNicholas Piggin if ((end - addr) != PUD_SIZE) 1875e9e3d77SNicholas Piggin return 0; 1885e9e3d77SNicholas Piggin 1895e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, PUD_SIZE)) 1905e9e3d77SNicholas Piggin return 0; 1915e9e3d77SNicholas Piggin 1925e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, PUD_SIZE)) 1935e9e3d77SNicholas Piggin return 0; 1945e9e3d77SNicholas Piggin 1955e9e3d77SNicholas Piggin if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) 1965e9e3d77SNicholas Piggin return 0; 1975e9e3d77SNicholas Piggin 1985e9e3d77SNicholas Piggin return pud_set_huge(pud, phys_addr, prot); 1995e9e3d77SNicholas Piggin } 2005e9e3d77SNicholas Piggin 2015e9e3d77SNicholas Piggin static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 2025e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 2035e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask) 2045e9e3d77SNicholas Piggin { 2055e9e3d77SNicholas Piggin pud_t *pud; 2065e9e3d77SNicholas Piggin unsigned long next; 2075e9e3d77SNicholas Piggin 2085e9e3d77SNicholas Piggin pud = pud_alloc_track(&init_mm, p4d, addr, mask); 2095e9e3d77SNicholas Piggin if (!pud) 2105e9e3d77SNicholas Piggin return -ENOMEM; 2115e9e3d77SNicholas Piggin do { 2125e9e3d77SNicholas Piggin next = pud_addr_end(addr, end); 2135e9e3d77SNicholas Piggin 2145e9e3d77SNicholas Piggin if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, 2155e9e3d77SNicholas Piggin max_page_shift)) { 2165e9e3d77SNicholas Piggin *mask |= PGTBL_PUD_MODIFIED; 2175e9e3d77SNicholas Piggin continue; 2185e9e3d77SNicholas Piggin } 2195e9e3d77SNicholas Piggin 2205e9e3d77SNicholas Piggin if (vmap_pmd_range(pud, addr, next, phys_addr, prot, 2215e9e3d77SNicholas Piggin max_page_shift, mask)) 2225e9e3d77SNicholas Piggin return -ENOMEM; 2235e9e3d77SNicholas Piggin } while (pud++, phys_addr += (next - addr), addr = next, addr != end); 2245e9e3d77SNicholas Piggin return 0; 2255e9e3d77SNicholas Piggin } 2265e9e3d77SNicholas Piggin 2275e9e3d77SNicholas Piggin static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, 2285e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 2295e9e3d77SNicholas Piggin unsigned int max_page_shift) 2305e9e3d77SNicholas Piggin { 2315e9e3d77SNicholas Piggin if (max_page_shift < P4D_SHIFT) 2325e9e3d77SNicholas Piggin return 0; 2335e9e3d77SNicholas Piggin 2345e9e3d77SNicholas Piggin if (!arch_vmap_p4d_supported(prot)) 2355e9e3d77SNicholas Piggin return 0; 2365e9e3d77SNicholas Piggin 2375e9e3d77SNicholas Piggin if ((end - addr) != P4D_SIZE) 2385e9e3d77SNicholas Piggin return 0; 2395e9e3d77SNicholas Piggin 2405e9e3d77SNicholas Piggin if (!IS_ALIGNED(addr, P4D_SIZE)) 2415e9e3d77SNicholas Piggin return 0; 2425e9e3d77SNicholas Piggin 2435e9e3d77SNicholas Piggin if (!IS_ALIGNED(phys_addr, P4D_SIZE)) 2445e9e3d77SNicholas Piggin return 0; 2455e9e3d77SNicholas Piggin 2465e9e3d77SNicholas Piggin if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) 2475e9e3d77SNicholas Piggin return 0; 2485e9e3d77SNicholas Piggin 2495e9e3d77SNicholas Piggin return p4d_set_huge(p4d, phys_addr, prot); 2505e9e3d77SNicholas Piggin } 2515e9e3d77SNicholas Piggin 2525e9e3d77SNicholas Piggin static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 2535e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 2545e9e3d77SNicholas Piggin unsigned int max_page_shift, pgtbl_mod_mask *mask) 2555e9e3d77SNicholas Piggin { 2565e9e3d77SNicholas Piggin p4d_t *p4d; 2575e9e3d77SNicholas Piggin unsigned long next; 2585e9e3d77SNicholas Piggin 2595e9e3d77SNicholas Piggin p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 2605e9e3d77SNicholas Piggin if (!p4d) 2615e9e3d77SNicholas Piggin return -ENOMEM; 2625e9e3d77SNicholas Piggin do { 2635e9e3d77SNicholas Piggin next = p4d_addr_end(addr, end); 2645e9e3d77SNicholas Piggin 2655e9e3d77SNicholas Piggin if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, 2665e9e3d77SNicholas Piggin max_page_shift)) { 2675e9e3d77SNicholas Piggin *mask |= PGTBL_P4D_MODIFIED; 2685e9e3d77SNicholas Piggin continue; 2695e9e3d77SNicholas Piggin } 2705e9e3d77SNicholas Piggin 2715e9e3d77SNicholas Piggin if (vmap_pud_range(p4d, addr, next, phys_addr, prot, 2725e9e3d77SNicholas Piggin max_page_shift, mask)) 2735e9e3d77SNicholas Piggin return -ENOMEM; 2745e9e3d77SNicholas Piggin } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); 2755e9e3d77SNicholas Piggin return 0; 2765e9e3d77SNicholas Piggin } 2775e9e3d77SNicholas Piggin 2785d87510dSNicholas Piggin static int vmap_range_noflush(unsigned long addr, unsigned long end, 2795e9e3d77SNicholas Piggin phys_addr_t phys_addr, pgprot_t prot, 2805e9e3d77SNicholas Piggin unsigned int max_page_shift) 2815e9e3d77SNicholas Piggin { 2825e9e3d77SNicholas Piggin pgd_t *pgd; 2835e9e3d77SNicholas Piggin unsigned long start; 2845e9e3d77SNicholas Piggin unsigned long next; 2855e9e3d77SNicholas Piggin int err; 2865e9e3d77SNicholas Piggin pgtbl_mod_mask mask = 0; 2875e9e3d77SNicholas Piggin 2885e9e3d77SNicholas Piggin might_sleep(); 2895e9e3d77SNicholas Piggin BUG_ON(addr >= end); 2905e9e3d77SNicholas Piggin 2915e9e3d77SNicholas Piggin start = addr; 2925e9e3d77SNicholas Piggin pgd = pgd_offset_k(addr); 2935e9e3d77SNicholas Piggin do { 2945e9e3d77SNicholas Piggin next = pgd_addr_end(addr, end); 2955e9e3d77SNicholas Piggin err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, 2965e9e3d77SNicholas Piggin max_page_shift, &mask); 2975e9e3d77SNicholas Piggin if (err) 2985e9e3d77SNicholas Piggin break; 2995e9e3d77SNicholas Piggin } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); 3005e9e3d77SNicholas Piggin 3015e9e3d77SNicholas Piggin if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 3025e9e3d77SNicholas Piggin arch_sync_kernel_mappings(start, end); 3035e9e3d77SNicholas Piggin 3045e9e3d77SNicholas Piggin return err; 3055e9e3d77SNicholas Piggin } 306b221385bSAdrian Bunk 30782a70ce0SChristoph Hellwig int ioremap_page_range(unsigned long addr, unsigned long end, 30882a70ce0SChristoph Hellwig phys_addr_t phys_addr, pgprot_t prot) 3095d87510dSNicholas Piggin { 3105d87510dSNicholas Piggin int err; 3115d87510dSNicholas Piggin 3128491502fSChristoph Hellwig err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), 31382a70ce0SChristoph Hellwig ioremap_max_page_shift); 3145d87510dSNicholas Piggin flush_cache_vmap(addr, end); 315b073d7f8SAlexander Potapenko if (!err) 316fdea03e1SAlexander Potapenko err = kmsan_ioremap_page_range(addr, end, phys_addr, prot, 317b073d7f8SAlexander Potapenko ioremap_max_page_shift); 3185d87510dSNicholas Piggin return err; 3195d87510dSNicholas Piggin } 3205d87510dSNicholas Piggin 3212ba3e694SJoerg Roedel static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 3222ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 3231da177e4SLinus Torvalds { 3241da177e4SLinus Torvalds pte_t *pte; 3251da177e4SLinus Torvalds 3261da177e4SLinus Torvalds pte = pte_offset_kernel(pmd, addr); 3271da177e4SLinus Torvalds do { 3281da177e4SLinus Torvalds pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 3291da177e4SLinus Torvalds WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 3301da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 3312ba3e694SJoerg Roedel *mask |= PGTBL_PTE_MODIFIED; 3321da177e4SLinus Torvalds } 3331da177e4SLinus Torvalds 3342ba3e694SJoerg Roedel static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 3352ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 3361da177e4SLinus Torvalds { 3371da177e4SLinus Torvalds pmd_t *pmd; 3381da177e4SLinus Torvalds unsigned long next; 3392ba3e694SJoerg Roedel int cleared; 3401da177e4SLinus Torvalds 3411da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 3421da177e4SLinus Torvalds do { 3431da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 3442ba3e694SJoerg Roedel 3452ba3e694SJoerg Roedel cleared = pmd_clear_huge(pmd); 3462ba3e694SJoerg Roedel if (cleared || pmd_bad(*pmd)) 3472ba3e694SJoerg Roedel *mask |= PGTBL_PMD_MODIFIED; 3482ba3e694SJoerg Roedel 3492ba3e694SJoerg Roedel if (cleared) 350b9820d8fSToshi Kani continue; 3511da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 3521da177e4SLinus Torvalds continue; 3532ba3e694SJoerg Roedel vunmap_pte_range(pmd, addr, next, mask); 354e47110e9SAneesh Kumar K.V 355e47110e9SAneesh Kumar K.V cond_resched(); 3561da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 3571da177e4SLinus Torvalds } 3581da177e4SLinus Torvalds 3592ba3e694SJoerg Roedel static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 3602ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 3611da177e4SLinus Torvalds { 3621da177e4SLinus Torvalds pud_t *pud; 3631da177e4SLinus Torvalds unsigned long next; 3642ba3e694SJoerg Roedel int cleared; 3651da177e4SLinus Torvalds 366c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 3671da177e4SLinus Torvalds do { 3681da177e4SLinus Torvalds next = pud_addr_end(addr, end); 3692ba3e694SJoerg Roedel 3702ba3e694SJoerg Roedel cleared = pud_clear_huge(pud); 3712ba3e694SJoerg Roedel if (cleared || pud_bad(*pud)) 3722ba3e694SJoerg Roedel *mask |= PGTBL_PUD_MODIFIED; 3732ba3e694SJoerg Roedel 3742ba3e694SJoerg Roedel if (cleared) 375b9820d8fSToshi Kani continue; 3761da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 3771da177e4SLinus Torvalds continue; 3782ba3e694SJoerg Roedel vunmap_pmd_range(pud, addr, next, mask); 3791da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 3801da177e4SLinus Torvalds } 3811da177e4SLinus Torvalds 3822ba3e694SJoerg Roedel static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 3832ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 384c2febafcSKirill A. Shutemov { 385c2febafcSKirill A. Shutemov p4d_t *p4d; 386c2febafcSKirill A. Shutemov unsigned long next; 387c2febafcSKirill A. Shutemov 388c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 389c2febafcSKirill A. Shutemov do { 390c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 3912ba3e694SJoerg Roedel 392c8db8c26SLi kunyu p4d_clear_huge(p4d); 393c8db8c26SLi kunyu if (p4d_bad(*p4d)) 3942ba3e694SJoerg Roedel *mask |= PGTBL_P4D_MODIFIED; 3952ba3e694SJoerg Roedel 396c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 397c2febafcSKirill A. Shutemov continue; 3982ba3e694SJoerg Roedel vunmap_pud_range(p4d, addr, next, mask); 399c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 400c2febafcSKirill A. Shutemov } 401c2febafcSKirill A. Shutemov 4024ad0ae8cSNicholas Piggin /* 4034ad0ae8cSNicholas Piggin * vunmap_range_noflush is similar to vunmap_range, but does not 4044ad0ae8cSNicholas Piggin * flush caches or TLBs. 405b521c43fSChristoph Hellwig * 4064ad0ae8cSNicholas Piggin * The caller is responsible for calling flush_cache_vmap() before calling 4074ad0ae8cSNicholas Piggin * this function, and flush_tlb_kernel_range after it has returned 4084ad0ae8cSNicholas Piggin * successfully (and before the addresses are expected to cause a page fault 4094ad0ae8cSNicholas Piggin * or be re-mapped for something else, if TLB flushes are being delayed or 4104ad0ae8cSNicholas Piggin * coalesced). 411b521c43fSChristoph Hellwig * 4124ad0ae8cSNicholas Piggin * This is an internal function only. Do not use outside mm/. 413b521c43fSChristoph Hellwig */ 414b073d7f8SAlexander Potapenko void __vunmap_range_noflush(unsigned long start, unsigned long end) 4151da177e4SLinus Torvalds { 4161da177e4SLinus Torvalds unsigned long next; 417b521c43fSChristoph Hellwig pgd_t *pgd; 4182ba3e694SJoerg Roedel unsigned long addr = start; 4192ba3e694SJoerg Roedel pgtbl_mod_mask mask = 0; 4201da177e4SLinus Torvalds 4211da177e4SLinus Torvalds BUG_ON(addr >= end); 4221da177e4SLinus Torvalds pgd = pgd_offset_k(addr); 4231da177e4SLinus Torvalds do { 4241da177e4SLinus Torvalds next = pgd_addr_end(addr, end); 4252ba3e694SJoerg Roedel if (pgd_bad(*pgd)) 4262ba3e694SJoerg Roedel mask |= PGTBL_PGD_MODIFIED; 4271da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 4281da177e4SLinus Torvalds continue; 4292ba3e694SJoerg Roedel vunmap_p4d_range(pgd, addr, next, &mask); 4301da177e4SLinus Torvalds } while (pgd++, addr = next, addr != end); 4312ba3e694SJoerg Roedel 4322ba3e694SJoerg Roedel if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 4332ba3e694SJoerg Roedel arch_sync_kernel_mappings(start, end); 4341da177e4SLinus Torvalds } 4351da177e4SLinus Torvalds 436b073d7f8SAlexander Potapenko void vunmap_range_noflush(unsigned long start, unsigned long end) 437b073d7f8SAlexander Potapenko { 438b073d7f8SAlexander Potapenko kmsan_vunmap_range_noflush(start, end); 439b073d7f8SAlexander Potapenko __vunmap_range_noflush(start, end); 440b073d7f8SAlexander Potapenko } 441b073d7f8SAlexander Potapenko 4424ad0ae8cSNicholas Piggin /** 4434ad0ae8cSNicholas Piggin * vunmap_range - unmap kernel virtual addresses 4444ad0ae8cSNicholas Piggin * @addr: start of the VM area to unmap 4454ad0ae8cSNicholas Piggin * @end: end of the VM area to unmap (non-inclusive) 4464ad0ae8cSNicholas Piggin * 4474ad0ae8cSNicholas Piggin * Clears any present PTEs in the virtual address range, flushes TLBs and 4484ad0ae8cSNicholas Piggin * caches. Any subsequent access to the address before it has been re-mapped 4494ad0ae8cSNicholas Piggin * is a kernel bug. 4504ad0ae8cSNicholas Piggin */ 4514ad0ae8cSNicholas Piggin void vunmap_range(unsigned long addr, unsigned long end) 4524ad0ae8cSNicholas Piggin { 4534ad0ae8cSNicholas Piggin flush_cache_vunmap(addr, end); 4544ad0ae8cSNicholas Piggin vunmap_range_noflush(addr, end); 4554ad0ae8cSNicholas Piggin flush_tlb_kernel_range(addr, end); 4564ad0ae8cSNicholas Piggin } 4574ad0ae8cSNicholas Piggin 4580a264884SNicholas Piggin static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, 4592ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 4602ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 4611da177e4SLinus Torvalds { 4621da177e4SLinus Torvalds pte_t *pte; 4631da177e4SLinus Torvalds 464db64fe02SNick Piggin /* 465db64fe02SNick Piggin * nr is a running index into the array which helps higher level 466db64fe02SNick Piggin * callers keep track of where we're up to. 467db64fe02SNick Piggin */ 468db64fe02SNick Piggin 4692ba3e694SJoerg Roedel pte = pte_alloc_kernel_track(pmd, addr, mask); 4701da177e4SLinus Torvalds if (!pte) 4711da177e4SLinus Torvalds return -ENOMEM; 4721da177e4SLinus Torvalds do { 473db64fe02SNick Piggin struct page *page = pages[*nr]; 474db64fe02SNick Piggin 475c33c7948SRyan Roberts if (WARN_ON(!pte_none(ptep_get(pte)))) 476db64fe02SNick Piggin return -EBUSY; 477db64fe02SNick Piggin if (WARN_ON(!page)) 4781da177e4SLinus Torvalds return -ENOMEM; 4794fcdcc12SYury Norov if (WARN_ON(!pfn_valid(page_to_pfn(page)))) 4804fcdcc12SYury Norov return -EINVAL; 4814fcdcc12SYury Norov 4821da177e4SLinus Torvalds set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 483db64fe02SNick Piggin (*nr)++; 4841da177e4SLinus Torvalds } while (pte++, addr += PAGE_SIZE, addr != end); 4852ba3e694SJoerg Roedel *mask |= PGTBL_PTE_MODIFIED; 4861da177e4SLinus Torvalds return 0; 4871da177e4SLinus Torvalds } 4881da177e4SLinus Torvalds 4890a264884SNicholas Piggin static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, 4902ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 4912ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 4921da177e4SLinus Torvalds { 4931da177e4SLinus Torvalds pmd_t *pmd; 4941da177e4SLinus Torvalds unsigned long next; 4951da177e4SLinus Torvalds 4962ba3e694SJoerg Roedel pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 4971da177e4SLinus Torvalds if (!pmd) 4981da177e4SLinus Torvalds return -ENOMEM; 4991da177e4SLinus Torvalds do { 5001da177e4SLinus Torvalds next = pmd_addr_end(addr, end); 5010a264884SNicholas Piggin if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask)) 5021da177e4SLinus Torvalds return -ENOMEM; 5031da177e4SLinus Torvalds } while (pmd++, addr = next, addr != end); 5041da177e4SLinus Torvalds return 0; 5051da177e4SLinus Torvalds } 5061da177e4SLinus Torvalds 5070a264884SNicholas Piggin static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr, 5082ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 5092ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 5101da177e4SLinus Torvalds { 5111da177e4SLinus Torvalds pud_t *pud; 5121da177e4SLinus Torvalds unsigned long next; 5131da177e4SLinus Torvalds 5142ba3e694SJoerg Roedel pud = pud_alloc_track(&init_mm, p4d, addr, mask); 5151da177e4SLinus Torvalds if (!pud) 5161da177e4SLinus Torvalds return -ENOMEM; 5171da177e4SLinus Torvalds do { 5181da177e4SLinus Torvalds next = pud_addr_end(addr, end); 5190a264884SNicholas Piggin if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask)) 5201da177e4SLinus Torvalds return -ENOMEM; 5211da177e4SLinus Torvalds } while (pud++, addr = next, addr != end); 5221da177e4SLinus Torvalds return 0; 5231da177e4SLinus Torvalds } 5241da177e4SLinus Torvalds 5250a264884SNicholas Piggin static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, 5262ba3e694SJoerg Roedel unsigned long end, pgprot_t prot, struct page **pages, int *nr, 5272ba3e694SJoerg Roedel pgtbl_mod_mask *mask) 528c2febafcSKirill A. Shutemov { 529c2febafcSKirill A. Shutemov p4d_t *p4d; 530c2febafcSKirill A. Shutemov unsigned long next; 531c2febafcSKirill A. Shutemov 5322ba3e694SJoerg Roedel p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 533c2febafcSKirill A. Shutemov if (!p4d) 534c2febafcSKirill A. Shutemov return -ENOMEM; 535c2febafcSKirill A. Shutemov do { 536c2febafcSKirill A. Shutemov next = p4d_addr_end(addr, end); 5370a264884SNicholas Piggin if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask)) 538c2febafcSKirill A. Shutemov return -ENOMEM; 539c2febafcSKirill A. Shutemov } while (p4d++, addr = next, addr != end); 540c2febafcSKirill A. Shutemov return 0; 541c2febafcSKirill A. Shutemov } 542c2febafcSKirill A. Shutemov 543121e6f32SNicholas Piggin static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, 544121e6f32SNicholas Piggin pgprot_t prot, struct page **pages) 545121e6f32SNicholas Piggin { 546121e6f32SNicholas Piggin unsigned long start = addr; 547121e6f32SNicholas Piggin pgd_t *pgd; 548121e6f32SNicholas Piggin unsigned long next; 549121e6f32SNicholas Piggin int err = 0; 550121e6f32SNicholas Piggin int nr = 0; 551121e6f32SNicholas Piggin pgtbl_mod_mask mask = 0; 552121e6f32SNicholas Piggin 553121e6f32SNicholas Piggin BUG_ON(addr >= end); 554121e6f32SNicholas Piggin pgd = pgd_offset_k(addr); 555121e6f32SNicholas Piggin do { 556121e6f32SNicholas Piggin next = pgd_addr_end(addr, end); 557121e6f32SNicholas Piggin if (pgd_bad(*pgd)) 558121e6f32SNicholas Piggin mask |= PGTBL_PGD_MODIFIED; 559121e6f32SNicholas Piggin err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); 560121e6f32SNicholas Piggin if (err) 561121e6f32SNicholas Piggin return err; 562121e6f32SNicholas Piggin } while (pgd++, addr = next, addr != end); 563121e6f32SNicholas Piggin 564121e6f32SNicholas Piggin if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 565121e6f32SNicholas Piggin arch_sync_kernel_mappings(start, end); 566121e6f32SNicholas Piggin 567121e6f32SNicholas Piggin return 0; 568121e6f32SNicholas Piggin } 569121e6f32SNicholas Piggin 570b67177ecSNicholas Piggin /* 571b67177ecSNicholas Piggin * vmap_pages_range_noflush is similar to vmap_pages_range, but does not 572b67177ecSNicholas Piggin * flush caches. 573b67177ecSNicholas Piggin * 574b67177ecSNicholas Piggin * The caller is responsible for calling flush_cache_vmap() after this 575b67177ecSNicholas Piggin * function returns successfully and before the addresses are accessed. 576b67177ecSNicholas Piggin * 577b67177ecSNicholas Piggin * This is an internal function only. Do not use outside mm/. 578b67177ecSNicholas Piggin */ 579b073d7f8SAlexander Potapenko int __vmap_pages_range_noflush(unsigned long addr, unsigned long end, 580121e6f32SNicholas Piggin pgprot_t prot, struct page **pages, unsigned int page_shift) 581121e6f32SNicholas Piggin { 582121e6f32SNicholas Piggin unsigned int i, nr = (end - addr) >> PAGE_SHIFT; 583121e6f32SNicholas Piggin 584121e6f32SNicholas Piggin WARN_ON(page_shift < PAGE_SHIFT); 585121e6f32SNicholas Piggin 586121e6f32SNicholas Piggin if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || 587121e6f32SNicholas Piggin page_shift == PAGE_SHIFT) 588121e6f32SNicholas Piggin return vmap_small_pages_range_noflush(addr, end, prot, pages); 589121e6f32SNicholas Piggin 590121e6f32SNicholas Piggin for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) { 591121e6f32SNicholas Piggin int err; 592121e6f32SNicholas Piggin 593121e6f32SNicholas Piggin err = vmap_range_noflush(addr, addr + (1UL << page_shift), 59408262ac5SMatthew Wilcox page_to_phys(pages[i]), prot, 595121e6f32SNicholas Piggin page_shift); 596121e6f32SNicholas Piggin if (err) 597121e6f32SNicholas Piggin return err; 598121e6f32SNicholas Piggin 599121e6f32SNicholas Piggin addr += 1UL << page_shift; 600121e6f32SNicholas Piggin } 601121e6f32SNicholas Piggin 602121e6f32SNicholas Piggin return 0; 603121e6f32SNicholas Piggin } 604121e6f32SNicholas Piggin 605b073d7f8SAlexander Potapenko int vmap_pages_range_noflush(unsigned long addr, unsigned long end, 606b073d7f8SAlexander Potapenko pgprot_t prot, struct page **pages, unsigned int page_shift) 607b073d7f8SAlexander Potapenko { 60847ebd031SAlexander Potapenko int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages, 60947ebd031SAlexander Potapenko page_shift); 61047ebd031SAlexander Potapenko 61147ebd031SAlexander Potapenko if (ret) 61247ebd031SAlexander Potapenko return ret; 613b073d7f8SAlexander Potapenko return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 614b073d7f8SAlexander Potapenko } 615b073d7f8SAlexander Potapenko 616b67177ecSNicholas Piggin /** 617b67177ecSNicholas Piggin * vmap_pages_range - map pages to a kernel virtual address 618b67177ecSNicholas Piggin * @addr: start of the VM area to map 619b67177ecSNicholas Piggin * @end: end of the VM area to map (non-inclusive) 620b67177ecSNicholas Piggin * @prot: page protection flags to use 621b67177ecSNicholas Piggin * @pages: pages to map (always PAGE_SIZE pages) 622b67177ecSNicholas Piggin * @page_shift: maximum shift that the pages may be mapped with, @pages must 623b67177ecSNicholas Piggin * be aligned and contiguous up to at least this shift. 624b67177ecSNicholas Piggin * 625b67177ecSNicholas Piggin * RETURNS: 626b67177ecSNicholas Piggin * 0 on success, -errno on failure. 627b67177ecSNicholas Piggin */ 628121e6f32SNicholas Piggin static int vmap_pages_range(unsigned long addr, unsigned long end, 629121e6f32SNicholas Piggin pgprot_t prot, struct page **pages, unsigned int page_shift) 630121e6f32SNicholas Piggin { 631121e6f32SNicholas Piggin int err; 632121e6f32SNicholas Piggin 633121e6f32SNicholas Piggin err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 634121e6f32SNicholas Piggin flush_cache_vmap(addr, end); 635121e6f32SNicholas Piggin return err; 636121e6f32SNicholas Piggin } 637121e6f32SNicholas Piggin 63881ac3ad9SKAMEZAWA Hiroyuki int is_vmalloc_or_module_addr(const void *x) 63973bdf0a6SLinus Torvalds { 64073bdf0a6SLinus Torvalds /* 641ab4f2ee1SRussell King * ARM, x86-64 and sparc64 put modules in a special place, 64273bdf0a6SLinus Torvalds * and fall back on vmalloc() if that fails. Others 64373bdf0a6SLinus Torvalds * just put it in the vmalloc space. 64473bdf0a6SLinus Torvalds */ 64573bdf0a6SLinus Torvalds #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 6464aff1dc4SAndrey Konovalov unsigned long addr = (unsigned long)kasan_reset_tag(x); 64773bdf0a6SLinus Torvalds if (addr >= MODULES_VADDR && addr < MODULES_END) 64873bdf0a6SLinus Torvalds return 1; 64973bdf0a6SLinus Torvalds #endif 65073bdf0a6SLinus Torvalds return is_vmalloc_addr(x); 65173bdf0a6SLinus Torvalds } 65201858469SDavid Howells EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr); 65373bdf0a6SLinus Torvalds 65448667e7aSChristoph Lameter /* 655c0eb315aSNicholas Piggin * Walk a vmap address to the struct page it maps. Huge vmap mappings will 656c0eb315aSNicholas Piggin * return the tail page that corresponds to the base page address, which 657c0eb315aSNicholas Piggin * matches small vmap mappings. 65848667e7aSChristoph Lameter */ 659add688fbSmalc struct page *vmalloc_to_page(const void *vmalloc_addr) 66048667e7aSChristoph Lameter { 66148667e7aSChristoph Lameter unsigned long addr = (unsigned long) vmalloc_addr; 662add688fbSmalc struct page *page = NULL; 66348667e7aSChristoph Lameter pgd_t *pgd = pgd_offset_k(addr); 664c2febafcSKirill A. Shutemov p4d_t *p4d; 665c2febafcSKirill A. Shutemov pud_t *pud; 666c2febafcSKirill A. Shutemov pmd_t *pmd; 667c2febafcSKirill A. Shutemov pte_t *ptep, pte; 66848667e7aSChristoph Lameter 6697aa413deSIngo Molnar /* 6707aa413deSIngo Molnar * XXX we might need to change this if we add VIRTUAL_BUG_ON for 6717aa413deSIngo Molnar * architectures that do not vmalloc module space 6727aa413deSIngo Molnar */ 67373bdf0a6SLinus Torvalds VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 67459ea7463SJiri Slaby 675c2febafcSKirill A. Shutemov if (pgd_none(*pgd)) 676c2febafcSKirill A. Shutemov return NULL; 677c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pgd_leaf(*pgd))) 678c0eb315aSNicholas Piggin return NULL; /* XXX: no allowance for huge pgd */ 679c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pgd_bad(*pgd))) 680c0eb315aSNicholas Piggin return NULL; 681c0eb315aSNicholas Piggin 682c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 683c2febafcSKirill A. Shutemov if (p4d_none(*p4d)) 684c2febafcSKirill A. Shutemov return NULL; 685c0eb315aSNicholas Piggin if (p4d_leaf(*p4d)) 686c0eb315aSNicholas Piggin return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); 687c0eb315aSNicholas Piggin if (WARN_ON_ONCE(p4d_bad(*p4d))) 688c2febafcSKirill A. Shutemov return NULL; 689c0eb315aSNicholas Piggin 690c0eb315aSNicholas Piggin pud = pud_offset(p4d, addr); 691c0eb315aSNicholas Piggin if (pud_none(*pud)) 692c0eb315aSNicholas Piggin return NULL; 693c0eb315aSNicholas Piggin if (pud_leaf(*pud)) 694c0eb315aSNicholas Piggin return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 695c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pud_bad(*pud))) 696c0eb315aSNicholas Piggin return NULL; 697c0eb315aSNicholas Piggin 698c2febafcSKirill A. Shutemov pmd = pmd_offset(pud, addr); 699c0eb315aSNicholas Piggin if (pmd_none(*pmd)) 700c0eb315aSNicholas Piggin return NULL; 701c0eb315aSNicholas Piggin if (pmd_leaf(*pmd)) 702c0eb315aSNicholas Piggin return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 703c0eb315aSNicholas Piggin if (WARN_ON_ONCE(pmd_bad(*pmd))) 704c2febafcSKirill A. Shutemov return NULL; 705db64fe02SNick Piggin 7060d1c81edSHugh Dickins ptep = pte_offset_kernel(pmd, addr); 707c33c7948SRyan Roberts pte = ptep_get(ptep); 70848667e7aSChristoph Lameter if (pte_present(pte)) 709add688fbSmalc page = pte_page(pte); 710c0eb315aSNicholas Piggin 711add688fbSmalc return page; 712ece86e22SJianyu Zhan } 713ece86e22SJianyu Zhan EXPORT_SYMBOL(vmalloc_to_page); 714ece86e22SJianyu Zhan 715add688fbSmalc /* 716add688fbSmalc * Map a vmalloc()-space virtual address to the physical page frame number. 717add688fbSmalc */ 718add688fbSmalc unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 719add688fbSmalc { 720add688fbSmalc return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 721add688fbSmalc } 722add688fbSmalc EXPORT_SYMBOL(vmalloc_to_pfn); 723add688fbSmalc 724db64fe02SNick Piggin 725db64fe02SNick Piggin /*** Global kva allocator ***/ 726db64fe02SNick Piggin 727bb850f4dSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 728a6cf4e0fSUladzislau Rezki (Sony) #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 729bb850f4dSUladzislau Rezki (Sony) 730db64fe02SNick Piggin 731e36176beSUladzislau Rezki (Sony) static DEFINE_SPINLOCK(free_vmap_area_lock); 73268ad4a33SUladzislau Rezki (Sony) static bool vmap_initialized __read_mostly; 73389699605SNick Piggin 73468ad4a33SUladzislau Rezki (Sony) /* 73568ad4a33SUladzislau Rezki (Sony) * This kmem_cache is used for vmap_area objects. Instead of 73668ad4a33SUladzislau Rezki (Sony) * allocating from slab we reuse an object from this cache to 73768ad4a33SUladzislau Rezki (Sony) * make things faster. Especially in "no edge" splitting of 73868ad4a33SUladzislau Rezki (Sony) * free block. 73968ad4a33SUladzislau Rezki (Sony) */ 74068ad4a33SUladzislau Rezki (Sony) static struct kmem_cache *vmap_area_cachep; 74189699605SNick Piggin 74268ad4a33SUladzislau Rezki (Sony) /* 74368ad4a33SUladzislau Rezki (Sony) * This linked list is used in pair with free_vmap_area_root. 74468ad4a33SUladzislau Rezki (Sony) * It gives O(1) access to prev/next to perform fast coalescing. 74568ad4a33SUladzislau Rezki (Sony) */ 74668ad4a33SUladzislau Rezki (Sony) static LIST_HEAD(free_vmap_area_list); 74768ad4a33SUladzislau Rezki (Sony) 74868ad4a33SUladzislau Rezki (Sony) /* 74968ad4a33SUladzislau Rezki (Sony) * This augment red-black tree represents the free vmap space. 75068ad4a33SUladzislau Rezki (Sony) * All vmap_area objects in this tree are sorted by va->va_start 75168ad4a33SUladzislau Rezki (Sony) * address. It is used for allocation and merging when a vmap 75268ad4a33SUladzislau Rezki (Sony) * object is released. 75368ad4a33SUladzislau Rezki (Sony) * 75468ad4a33SUladzislau Rezki (Sony) * Each vmap_area node contains a maximum available free block 75568ad4a33SUladzislau Rezki (Sony) * of its sub-tree, right or left. Therefore it is possible to 75668ad4a33SUladzislau Rezki (Sony) * find a lowest match of free area. 75768ad4a33SUladzislau Rezki (Sony) */ 75868ad4a33SUladzislau Rezki (Sony) static struct rb_root free_vmap_area_root = RB_ROOT; 75968ad4a33SUladzislau Rezki (Sony) 76082dd23e8SUladzislau Rezki (Sony) /* 76182dd23e8SUladzislau Rezki (Sony) * Preload a CPU with one object for "no edge" split case. The 76282dd23e8SUladzislau Rezki (Sony) * aim is to get rid of allocations from the atomic context, thus 76382dd23e8SUladzislau Rezki (Sony) * to use more permissive allocation masks. 76482dd23e8SUladzislau Rezki (Sony) */ 76582dd23e8SUladzislau Rezki (Sony) static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); 76682dd23e8SUladzislau Rezki (Sony) 767d0936029SUladzislau Rezki (Sony) /* 768*15e02a39SUladzislau Rezki (Sony) * This structure defines a single, solid model where a list and 769*15e02a39SUladzislau Rezki (Sony) * rb-tree are part of one entity protected by the lock. Nodes are 770*15e02a39SUladzislau Rezki (Sony) * sorted in ascending order, thus for O(1) access to left/right 771*15e02a39SUladzislau Rezki (Sony) * neighbors a list is used as well as for sequential traversal. 772d0936029SUladzislau Rezki (Sony) */ 773d0936029SUladzislau Rezki (Sony) struct rb_list { 774d0936029SUladzislau Rezki (Sony) struct rb_root root; 775d0936029SUladzislau Rezki (Sony) struct list_head head; 776d0936029SUladzislau Rezki (Sony) spinlock_t lock; 777d0936029SUladzislau Rezki (Sony) }; 778d0936029SUladzislau Rezki (Sony) 779*15e02a39SUladzislau Rezki (Sony) /* 780*15e02a39SUladzislau Rezki (Sony) * A fast size storage contains VAs up to 1M size. A pool consists 781*15e02a39SUladzislau Rezki (Sony) * of linked between each other ready to go VAs of certain sizes. 782*15e02a39SUladzislau Rezki (Sony) * An index in the pool-array corresponds to number of pages + 1. 783*15e02a39SUladzislau Rezki (Sony) */ 784*15e02a39SUladzislau Rezki (Sony) #define MAX_VA_SIZE_PAGES 256 785*15e02a39SUladzislau Rezki (Sony) 78672210662SUladzislau Rezki (Sony) struct vmap_pool { 78772210662SUladzislau Rezki (Sony) struct list_head head; 78872210662SUladzislau Rezki (Sony) unsigned long len; 78972210662SUladzislau Rezki (Sony) }; 79072210662SUladzislau Rezki (Sony) 79172210662SUladzislau Rezki (Sony) /* 792*15e02a39SUladzislau Rezki (Sony) * An effective vmap-node logic. Users make use of nodes instead 793*15e02a39SUladzislau Rezki (Sony) * of a global heap. It allows to balance an access and mitigate 794*15e02a39SUladzislau Rezki (Sony) * contention. 79572210662SUladzislau Rezki (Sony) */ 796d0936029SUladzislau Rezki (Sony) static struct vmap_node { 79772210662SUladzislau Rezki (Sony) /* Simple size segregated storage. */ 79872210662SUladzislau Rezki (Sony) struct vmap_pool pool[MAX_VA_SIZE_PAGES]; 79972210662SUladzislau Rezki (Sony) spinlock_t pool_lock; 80072210662SUladzislau Rezki (Sony) bool skip_populate; 80172210662SUladzislau Rezki (Sony) 802d0936029SUladzislau Rezki (Sony) /* Bookkeeping data of this node. */ 803d0936029SUladzislau Rezki (Sony) struct rb_list busy; 804282631cbSUladzislau Rezki (Sony) struct rb_list lazy; 805282631cbSUladzislau Rezki (Sony) 806282631cbSUladzislau Rezki (Sony) /* 807282631cbSUladzislau Rezki (Sony) * Ready-to-free areas. 808282631cbSUladzislau Rezki (Sony) */ 809282631cbSUladzislau Rezki (Sony) struct list_head purge_list; 81072210662SUladzislau Rezki (Sony) struct work_struct purge_work; 81172210662SUladzislau Rezki (Sony) unsigned long nr_purged; 812d0936029SUladzislau Rezki (Sony) } single; 813d0936029SUladzislau Rezki (Sony) 814*15e02a39SUladzislau Rezki (Sony) /* 815*15e02a39SUladzislau Rezki (Sony) * Initial setup consists of one single node, i.e. a balancing 816*15e02a39SUladzislau Rezki (Sony) * is fully disabled. Later on, after vmap is initialized these 817*15e02a39SUladzislau Rezki (Sony) * parameters are updated based on a system capacity. 818*15e02a39SUladzislau Rezki (Sony) */ 819d0936029SUladzislau Rezki (Sony) static struct vmap_node *vmap_nodes = &single; 820d0936029SUladzislau Rezki (Sony) static __read_mostly unsigned int nr_vmap_nodes = 1; 821d0936029SUladzislau Rezki (Sony) static __read_mostly unsigned int vmap_zone_size = 1; 822d0936029SUladzislau Rezki (Sony) 823d0936029SUladzislau Rezki (Sony) static inline unsigned int 824d0936029SUladzislau Rezki (Sony) addr_to_node_id(unsigned long addr) 825d0936029SUladzislau Rezki (Sony) { 826d0936029SUladzislau Rezki (Sony) return (addr / vmap_zone_size) % nr_vmap_nodes; 827d0936029SUladzislau Rezki (Sony) } 828d0936029SUladzislau Rezki (Sony) 829d0936029SUladzislau Rezki (Sony) static inline struct vmap_node * 830d0936029SUladzislau Rezki (Sony) addr_to_node(unsigned long addr) 831d0936029SUladzislau Rezki (Sony) { 832d0936029SUladzislau Rezki (Sony) return &vmap_nodes[addr_to_node_id(addr)]; 833d0936029SUladzislau Rezki (Sony) } 834d0936029SUladzislau Rezki (Sony) 83572210662SUladzislau Rezki (Sony) static inline struct vmap_node * 83672210662SUladzislau Rezki (Sony) id_to_node(unsigned int id) 83772210662SUladzislau Rezki (Sony) { 83872210662SUladzislau Rezki (Sony) return &vmap_nodes[id % nr_vmap_nodes]; 83972210662SUladzislau Rezki (Sony) } 84072210662SUladzislau Rezki (Sony) 84172210662SUladzislau Rezki (Sony) /* 84272210662SUladzislau Rezki (Sony) * We use the value 0 to represent "no node", that is why 84372210662SUladzislau Rezki (Sony) * an encoded value will be the node-id incremented by 1. 84472210662SUladzislau Rezki (Sony) * It is always greater then 0. A valid node_id which can 84572210662SUladzislau Rezki (Sony) * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id 84672210662SUladzislau Rezki (Sony) * is not valid 0 is returned. 84772210662SUladzislau Rezki (Sony) */ 84872210662SUladzislau Rezki (Sony) static unsigned int 84972210662SUladzislau Rezki (Sony) encode_vn_id(unsigned int node_id) 85072210662SUladzislau Rezki (Sony) { 85172210662SUladzislau Rezki (Sony) /* Can store U8_MAX [0:254] nodes. */ 85272210662SUladzislau Rezki (Sony) if (node_id < nr_vmap_nodes) 85372210662SUladzislau Rezki (Sony) return (node_id + 1) << BITS_PER_BYTE; 85472210662SUladzislau Rezki (Sony) 85572210662SUladzislau Rezki (Sony) /* Warn and no node encoded. */ 85672210662SUladzislau Rezki (Sony) WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id); 85772210662SUladzislau Rezki (Sony) return 0; 85872210662SUladzislau Rezki (Sony) } 85972210662SUladzislau Rezki (Sony) 86072210662SUladzislau Rezki (Sony) /* 86172210662SUladzislau Rezki (Sony) * Returns an encoded node-id, the valid range is within 86272210662SUladzislau Rezki (Sony) * [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is 86372210662SUladzislau Rezki (Sony) * returned if extracted data is wrong. 86472210662SUladzislau Rezki (Sony) */ 86572210662SUladzislau Rezki (Sony) static unsigned int 86672210662SUladzislau Rezki (Sony) decode_vn_id(unsigned int val) 86772210662SUladzislau Rezki (Sony) { 86872210662SUladzislau Rezki (Sony) unsigned int node_id = (val >> BITS_PER_BYTE) - 1; 86972210662SUladzislau Rezki (Sony) 87072210662SUladzislau Rezki (Sony) /* Can store U8_MAX [0:254] nodes. */ 87172210662SUladzislau Rezki (Sony) if (node_id < nr_vmap_nodes) 87272210662SUladzislau Rezki (Sony) return node_id; 87372210662SUladzislau Rezki (Sony) 87472210662SUladzislau Rezki (Sony) /* If it was _not_ zero, warn. */ 87572210662SUladzislau Rezki (Sony) WARN_ONCE(node_id != UINT_MAX, 87672210662SUladzislau Rezki (Sony) "Decode wrong node id (%d)\n", node_id); 87772210662SUladzislau Rezki (Sony) 87872210662SUladzislau Rezki (Sony) return nr_vmap_nodes; 87972210662SUladzislau Rezki (Sony) } 88072210662SUladzislau Rezki (Sony) 88172210662SUladzislau Rezki (Sony) static bool 88272210662SUladzislau Rezki (Sony) is_vn_id_valid(unsigned int node_id) 88372210662SUladzislau Rezki (Sony) { 88472210662SUladzislau Rezki (Sony) if (node_id < nr_vmap_nodes) 88572210662SUladzislau Rezki (Sony) return true; 88672210662SUladzislau Rezki (Sony) 88772210662SUladzislau Rezki (Sony) return false; 88872210662SUladzislau Rezki (Sony) } 88972210662SUladzislau Rezki (Sony) 89068ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 89168ad4a33SUladzislau Rezki (Sony) va_size(struct vmap_area *va) 89268ad4a33SUladzislau Rezki (Sony) { 89368ad4a33SUladzislau Rezki (Sony) return (va->va_end - va->va_start); 89468ad4a33SUladzislau Rezki (Sony) } 89568ad4a33SUladzislau Rezki (Sony) 89668ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 89768ad4a33SUladzislau Rezki (Sony) get_subtree_max_size(struct rb_node *node) 89868ad4a33SUladzislau Rezki (Sony) { 89968ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 90068ad4a33SUladzislau Rezki (Sony) 90168ad4a33SUladzislau Rezki (Sony) va = rb_entry_safe(node, struct vmap_area, rb_node); 90268ad4a33SUladzislau Rezki (Sony) return va ? va->subtree_max_size : 0; 90368ad4a33SUladzislau Rezki (Sony) } 90468ad4a33SUladzislau Rezki (Sony) 905315cc066SMichel Lespinasse RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, 906315cc066SMichel Lespinasse struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) 90768ad4a33SUladzislau Rezki (Sony) 90877e50af0SThomas Gleixner static void reclaim_and_purge_vmap_areas(void); 90968ad4a33SUladzislau Rezki (Sony) static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 910690467c8SUladzislau Rezki (Sony) static void drain_vmap_area_work(struct work_struct *work); 911690467c8SUladzislau Rezki (Sony) static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work); 912db64fe02SNick Piggin 91397105f0aSRoman Gushchin static atomic_long_t nr_vmalloc_pages; 91497105f0aSRoman Gushchin 91597105f0aSRoman Gushchin unsigned long vmalloc_nr_pages(void) 91697105f0aSRoman Gushchin { 91797105f0aSRoman Gushchin return atomic_long_read(&nr_vmalloc_pages); 91897105f0aSRoman Gushchin } 91997105f0aSRoman Gushchin 920153090f2SBaoquan He /* Look up the first VA which satisfies addr < va_end, NULL if none. */ 921d0936029SUladzislau Rezki (Sony) static struct vmap_area * 92253becf32SUladzislau Rezki (Sony) __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root) 923f181234aSChen Wandun { 924f181234aSChen Wandun struct vmap_area *va = NULL; 925d0936029SUladzislau Rezki (Sony) struct rb_node *n = root->rb_node; 926f181234aSChen Wandun 9274aff1dc4SAndrey Konovalov addr = (unsigned long)kasan_reset_tag((void *)addr); 9284aff1dc4SAndrey Konovalov 929f181234aSChen Wandun while (n) { 930f181234aSChen Wandun struct vmap_area *tmp; 931f181234aSChen Wandun 932f181234aSChen Wandun tmp = rb_entry(n, struct vmap_area, rb_node); 933f181234aSChen Wandun if (tmp->va_end > addr) { 934f181234aSChen Wandun va = tmp; 935f181234aSChen Wandun if (tmp->va_start <= addr) 936f181234aSChen Wandun break; 937f181234aSChen Wandun 938f181234aSChen Wandun n = n->rb_left; 939f181234aSChen Wandun } else 940f181234aSChen Wandun n = n->rb_right; 941f181234aSChen Wandun } 942f181234aSChen Wandun 943f181234aSChen Wandun return va; 944f181234aSChen Wandun } 945f181234aSChen Wandun 94653becf32SUladzislau Rezki (Sony) /* 94753becf32SUladzislau Rezki (Sony) * Returns a node where a first VA, that satisfies addr < va_end, resides. 94853becf32SUladzislau Rezki (Sony) * If success, a node is locked. A user is responsible to unlock it when a 94953becf32SUladzislau Rezki (Sony) * VA is no longer needed to be accessed. 95053becf32SUladzislau Rezki (Sony) * 95153becf32SUladzislau Rezki (Sony) * Returns NULL if nothing found. 95253becf32SUladzislau Rezki (Sony) */ 95353becf32SUladzislau Rezki (Sony) static struct vmap_node * 95453becf32SUladzislau Rezki (Sony) find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va) 95553becf32SUladzislau Rezki (Sony) { 95653becf32SUladzislau Rezki (Sony) struct vmap_node *vn, *va_node = NULL; 95753becf32SUladzislau Rezki (Sony) struct vmap_area *va_lowest; 95853becf32SUladzislau Rezki (Sony) int i; 95953becf32SUladzislau Rezki (Sony) 96053becf32SUladzislau Rezki (Sony) for (i = 0; i < nr_vmap_nodes; i++) { 96153becf32SUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 96253becf32SUladzislau Rezki (Sony) 96353becf32SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 96453becf32SUladzislau Rezki (Sony) va_lowest = __find_vmap_area_exceed_addr(addr, &vn->busy.root); 96553becf32SUladzislau Rezki (Sony) if (va_lowest) { 96653becf32SUladzislau Rezki (Sony) if (!va_node || va_lowest->va_start < (*va)->va_start) { 96753becf32SUladzislau Rezki (Sony) if (va_node) 96853becf32SUladzislau Rezki (Sony) spin_unlock(&va_node->busy.lock); 96953becf32SUladzislau Rezki (Sony) 97053becf32SUladzislau Rezki (Sony) *va = va_lowest; 97153becf32SUladzislau Rezki (Sony) va_node = vn; 97253becf32SUladzislau Rezki (Sony) continue; 97353becf32SUladzislau Rezki (Sony) } 97453becf32SUladzislau Rezki (Sony) } 97553becf32SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 97653becf32SUladzislau Rezki (Sony) } 97753becf32SUladzislau Rezki (Sony) 97853becf32SUladzislau Rezki (Sony) return va_node; 97953becf32SUladzislau Rezki (Sony) } 98053becf32SUladzislau Rezki (Sony) 981899c6efeSUladzislau Rezki (Sony) static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root) 9821da177e4SLinus Torvalds { 983899c6efeSUladzislau Rezki (Sony) struct rb_node *n = root->rb_node; 984db64fe02SNick Piggin 9854aff1dc4SAndrey Konovalov addr = (unsigned long)kasan_reset_tag((void *)addr); 9864aff1dc4SAndrey Konovalov 987db64fe02SNick Piggin while (n) { 988db64fe02SNick Piggin struct vmap_area *va; 989db64fe02SNick Piggin 990db64fe02SNick Piggin va = rb_entry(n, struct vmap_area, rb_node); 991db64fe02SNick Piggin if (addr < va->va_start) 992db64fe02SNick Piggin n = n->rb_left; 993cef2ac3fSHATAYAMA Daisuke else if (addr >= va->va_end) 994db64fe02SNick Piggin n = n->rb_right; 995db64fe02SNick Piggin else 996db64fe02SNick Piggin return va; 997db64fe02SNick Piggin } 998db64fe02SNick Piggin 999db64fe02SNick Piggin return NULL; 1000db64fe02SNick Piggin } 1001db64fe02SNick Piggin 100268ad4a33SUladzislau Rezki (Sony) /* 100368ad4a33SUladzislau Rezki (Sony) * This function returns back addresses of parent node 100468ad4a33SUladzislau Rezki (Sony) * and its left or right link for further processing. 10059c801f61SUladzislau Rezki (Sony) * 10069c801f61SUladzislau Rezki (Sony) * Otherwise NULL is returned. In that case all further 10079c801f61SUladzislau Rezki (Sony) * steps regarding inserting of conflicting overlap range 10089c801f61SUladzislau Rezki (Sony) * have to be declined and actually considered as a bug. 100968ad4a33SUladzislau Rezki (Sony) */ 101068ad4a33SUladzislau Rezki (Sony) static __always_inline struct rb_node ** 101168ad4a33SUladzislau Rezki (Sony) find_va_links(struct vmap_area *va, 101268ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct rb_node *from, 101368ad4a33SUladzislau Rezki (Sony) struct rb_node **parent) 1014db64fe02SNick Piggin { 1015170168d0SNamhyung Kim struct vmap_area *tmp_va; 101668ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 1017db64fe02SNick Piggin 101868ad4a33SUladzislau Rezki (Sony) if (root) { 101968ad4a33SUladzislau Rezki (Sony) link = &root->rb_node; 102068ad4a33SUladzislau Rezki (Sony) if (unlikely(!*link)) { 102168ad4a33SUladzislau Rezki (Sony) *parent = NULL; 102268ad4a33SUladzislau Rezki (Sony) return link; 102368ad4a33SUladzislau Rezki (Sony) } 102468ad4a33SUladzislau Rezki (Sony) } else { 102568ad4a33SUladzislau Rezki (Sony) link = &from; 102668ad4a33SUladzislau Rezki (Sony) } 102768ad4a33SUladzislau Rezki (Sony) 102868ad4a33SUladzislau Rezki (Sony) /* 102968ad4a33SUladzislau Rezki (Sony) * Go to the bottom of the tree. When we hit the last point 103068ad4a33SUladzislau Rezki (Sony) * we end up with parent rb_node and correct direction, i name 103168ad4a33SUladzislau Rezki (Sony) * it link, where the new va->rb_node will be attached to. 103268ad4a33SUladzislau Rezki (Sony) */ 103368ad4a33SUladzislau Rezki (Sony) do { 103468ad4a33SUladzislau Rezki (Sony) tmp_va = rb_entry(*link, struct vmap_area, rb_node); 103568ad4a33SUladzislau Rezki (Sony) 103668ad4a33SUladzislau Rezki (Sony) /* 103768ad4a33SUladzislau Rezki (Sony) * During the traversal we also do some sanity check. 103868ad4a33SUladzislau Rezki (Sony) * Trigger the BUG() if there are sides(left/right) 103968ad4a33SUladzislau Rezki (Sony) * or full overlaps. 104068ad4a33SUladzislau Rezki (Sony) */ 1041753df96bSBaoquan He if (va->va_end <= tmp_va->va_start) 104268ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_left; 1043753df96bSBaoquan He else if (va->va_start >= tmp_va->va_end) 104468ad4a33SUladzislau Rezki (Sony) link = &(*link)->rb_right; 10459c801f61SUladzislau Rezki (Sony) else { 10469c801f61SUladzislau Rezki (Sony) WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", 10479c801f61SUladzislau Rezki (Sony) va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); 10489c801f61SUladzislau Rezki (Sony) 10499c801f61SUladzislau Rezki (Sony) return NULL; 10509c801f61SUladzislau Rezki (Sony) } 105168ad4a33SUladzislau Rezki (Sony) } while (*link); 105268ad4a33SUladzislau Rezki (Sony) 105368ad4a33SUladzislau Rezki (Sony) *parent = &tmp_va->rb_node; 105468ad4a33SUladzislau Rezki (Sony) return link; 1055db64fe02SNick Piggin } 1056db64fe02SNick Piggin 105768ad4a33SUladzislau Rezki (Sony) static __always_inline struct list_head * 105868ad4a33SUladzislau Rezki (Sony) get_va_next_sibling(struct rb_node *parent, struct rb_node **link) 105968ad4a33SUladzislau Rezki (Sony) { 106068ad4a33SUladzislau Rezki (Sony) struct list_head *list; 1061db64fe02SNick Piggin 106268ad4a33SUladzislau Rezki (Sony) if (unlikely(!parent)) 106368ad4a33SUladzislau Rezki (Sony) /* 106468ad4a33SUladzislau Rezki (Sony) * The red-black tree where we try to find VA neighbors 106568ad4a33SUladzislau Rezki (Sony) * before merging or inserting is empty, i.e. it means 106668ad4a33SUladzislau Rezki (Sony) * there is no free vmap space. Normally it does not 106768ad4a33SUladzislau Rezki (Sony) * happen but we handle this case anyway. 106868ad4a33SUladzislau Rezki (Sony) */ 106968ad4a33SUladzislau Rezki (Sony) return NULL; 107068ad4a33SUladzislau Rezki (Sony) 107168ad4a33SUladzislau Rezki (Sony) list = &rb_entry(parent, struct vmap_area, rb_node)->list; 107268ad4a33SUladzislau Rezki (Sony) return (&parent->rb_right == link ? list->next : list); 1073db64fe02SNick Piggin } 1074db64fe02SNick Piggin 107568ad4a33SUladzislau Rezki (Sony) static __always_inline void 10768eb510dbSUladzislau Rezki (Sony) __link_va(struct vmap_area *va, struct rb_root *root, 10778eb510dbSUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link, 10788eb510dbSUladzislau Rezki (Sony) struct list_head *head, bool augment) 107968ad4a33SUladzislau Rezki (Sony) { 108068ad4a33SUladzislau Rezki (Sony) /* 108168ad4a33SUladzislau Rezki (Sony) * VA is still not in the list, but we can 108268ad4a33SUladzislau Rezki (Sony) * identify its future previous list_head node. 108368ad4a33SUladzislau Rezki (Sony) */ 108468ad4a33SUladzislau Rezki (Sony) if (likely(parent)) { 108568ad4a33SUladzislau Rezki (Sony) head = &rb_entry(parent, struct vmap_area, rb_node)->list; 108668ad4a33SUladzislau Rezki (Sony) if (&parent->rb_right != link) 108768ad4a33SUladzislau Rezki (Sony) head = head->prev; 108868ad4a33SUladzislau Rezki (Sony) } 1089db64fe02SNick Piggin 109068ad4a33SUladzislau Rezki (Sony) /* Insert to the rb-tree */ 109168ad4a33SUladzislau Rezki (Sony) rb_link_node(&va->rb_node, parent, link); 10928eb510dbSUladzislau Rezki (Sony) if (augment) { 109368ad4a33SUladzislau Rezki (Sony) /* 109468ad4a33SUladzislau Rezki (Sony) * Some explanation here. Just perform simple insertion 109568ad4a33SUladzislau Rezki (Sony) * to the tree. We do not set va->subtree_max_size to 109668ad4a33SUladzislau Rezki (Sony) * its current size before calling rb_insert_augmented(). 1097153090f2SBaoquan He * It is because we populate the tree from the bottom 109868ad4a33SUladzislau Rezki (Sony) * to parent levels when the node _is_ in the tree. 109968ad4a33SUladzislau Rezki (Sony) * 110068ad4a33SUladzislau Rezki (Sony) * Therefore we set subtree_max_size to zero after insertion, 110168ad4a33SUladzislau Rezki (Sony) * to let __augment_tree_propagate_from() puts everything to 110268ad4a33SUladzislau Rezki (Sony) * the correct order later on. 110368ad4a33SUladzislau Rezki (Sony) */ 110468ad4a33SUladzislau Rezki (Sony) rb_insert_augmented(&va->rb_node, 110568ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb); 110668ad4a33SUladzislau Rezki (Sony) va->subtree_max_size = 0; 110768ad4a33SUladzislau Rezki (Sony) } else { 110868ad4a33SUladzislau Rezki (Sony) rb_insert_color(&va->rb_node, root); 110968ad4a33SUladzislau Rezki (Sony) } 111068ad4a33SUladzislau Rezki (Sony) 111168ad4a33SUladzislau Rezki (Sony) /* Address-sort this list */ 111268ad4a33SUladzislau Rezki (Sony) list_add(&va->list, head); 111368ad4a33SUladzislau Rezki (Sony) } 111468ad4a33SUladzislau Rezki (Sony) 111568ad4a33SUladzislau Rezki (Sony) static __always_inline void 11168eb510dbSUladzislau Rezki (Sony) link_va(struct vmap_area *va, struct rb_root *root, 11178eb510dbSUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link, 11188eb510dbSUladzislau Rezki (Sony) struct list_head *head) 11198eb510dbSUladzislau Rezki (Sony) { 11208eb510dbSUladzislau Rezki (Sony) __link_va(va, root, parent, link, head, false); 11218eb510dbSUladzislau Rezki (Sony) } 11228eb510dbSUladzislau Rezki (Sony) 11238eb510dbSUladzislau Rezki (Sony) static __always_inline void 11248eb510dbSUladzislau Rezki (Sony) link_va_augment(struct vmap_area *va, struct rb_root *root, 11258eb510dbSUladzislau Rezki (Sony) struct rb_node *parent, struct rb_node **link, 11268eb510dbSUladzislau Rezki (Sony) struct list_head *head) 11278eb510dbSUladzislau Rezki (Sony) { 11288eb510dbSUladzislau Rezki (Sony) __link_va(va, root, parent, link, head, true); 11298eb510dbSUladzislau Rezki (Sony) } 11308eb510dbSUladzislau Rezki (Sony) 11318eb510dbSUladzislau Rezki (Sony) static __always_inline void 11328eb510dbSUladzislau Rezki (Sony) __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment) 113368ad4a33SUladzislau Rezki (Sony) { 1134460e42d1SUladzislau Rezki (Sony) if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) 1135460e42d1SUladzislau Rezki (Sony) return; 1136460e42d1SUladzislau Rezki (Sony) 11378eb510dbSUladzislau Rezki (Sony) if (augment) 113868ad4a33SUladzislau Rezki (Sony) rb_erase_augmented(&va->rb_node, 113968ad4a33SUladzislau Rezki (Sony) root, &free_vmap_area_rb_augment_cb); 114068ad4a33SUladzislau Rezki (Sony) else 114168ad4a33SUladzislau Rezki (Sony) rb_erase(&va->rb_node, root); 114268ad4a33SUladzislau Rezki (Sony) 11435d7a7c54SUladzislau Rezki (Sony) list_del_init(&va->list); 114468ad4a33SUladzislau Rezki (Sony) RB_CLEAR_NODE(&va->rb_node); 114568ad4a33SUladzislau Rezki (Sony) } 114668ad4a33SUladzislau Rezki (Sony) 11478eb510dbSUladzislau Rezki (Sony) static __always_inline void 11488eb510dbSUladzislau Rezki (Sony) unlink_va(struct vmap_area *va, struct rb_root *root) 11498eb510dbSUladzislau Rezki (Sony) { 11508eb510dbSUladzislau Rezki (Sony) __unlink_va(va, root, false); 11518eb510dbSUladzislau Rezki (Sony) } 11528eb510dbSUladzislau Rezki (Sony) 11538eb510dbSUladzislau Rezki (Sony) static __always_inline void 11548eb510dbSUladzislau Rezki (Sony) unlink_va_augment(struct vmap_area *va, struct rb_root *root) 11558eb510dbSUladzislau Rezki (Sony) { 11568eb510dbSUladzislau Rezki (Sony) __unlink_va(va, root, true); 11578eb510dbSUladzislau Rezki (Sony) } 11588eb510dbSUladzislau Rezki (Sony) 1159bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK 1160c3385e84SJiapeng Chong /* 1161c3385e84SJiapeng Chong * Gets called when remove the node and rotate. 1162c3385e84SJiapeng Chong */ 1163c3385e84SJiapeng Chong static __always_inline unsigned long 1164c3385e84SJiapeng Chong compute_subtree_max_size(struct vmap_area *va) 1165c3385e84SJiapeng Chong { 1166c3385e84SJiapeng Chong return max3(va_size(va), 1167c3385e84SJiapeng Chong get_subtree_max_size(va->rb_node.rb_left), 1168c3385e84SJiapeng Chong get_subtree_max_size(va->rb_node.rb_right)); 1169c3385e84SJiapeng Chong } 1170c3385e84SJiapeng Chong 1171bb850f4dSUladzislau Rezki (Sony) static void 1172da27c9edSUladzislau Rezki (Sony) augment_tree_propagate_check(void) 1173bb850f4dSUladzislau Rezki (Sony) { 1174bb850f4dSUladzislau Rezki (Sony) struct vmap_area *va; 1175da27c9edSUladzislau Rezki (Sony) unsigned long computed_size; 1176bb850f4dSUladzislau Rezki (Sony) 1177da27c9edSUladzislau Rezki (Sony) list_for_each_entry(va, &free_vmap_area_list, list) { 1178da27c9edSUladzislau Rezki (Sony) computed_size = compute_subtree_max_size(va); 1179da27c9edSUladzislau Rezki (Sony) if (computed_size != va->subtree_max_size) 1180bb850f4dSUladzislau Rezki (Sony) pr_emerg("tree is corrupted: %lu, %lu\n", 1181bb850f4dSUladzislau Rezki (Sony) va_size(va), va->subtree_max_size); 1182bb850f4dSUladzislau Rezki (Sony) } 1183bb850f4dSUladzislau Rezki (Sony) } 1184bb850f4dSUladzislau Rezki (Sony) #endif 1185bb850f4dSUladzislau Rezki (Sony) 118668ad4a33SUladzislau Rezki (Sony) /* 118768ad4a33SUladzislau Rezki (Sony) * This function populates subtree_max_size from bottom to upper 118868ad4a33SUladzislau Rezki (Sony) * levels starting from VA point. The propagation must be done 118968ad4a33SUladzislau Rezki (Sony) * when VA size is modified by changing its va_start/va_end. Or 119068ad4a33SUladzislau Rezki (Sony) * in case of newly inserting of VA to the tree. 119168ad4a33SUladzislau Rezki (Sony) * 119268ad4a33SUladzislau Rezki (Sony) * It means that __augment_tree_propagate_from() must be called: 119368ad4a33SUladzislau Rezki (Sony) * - After VA has been inserted to the tree(free path); 119468ad4a33SUladzislau Rezki (Sony) * - After VA has been shrunk(allocation path); 119568ad4a33SUladzislau Rezki (Sony) * - After VA has been increased(merging path). 119668ad4a33SUladzislau Rezki (Sony) * 119768ad4a33SUladzislau Rezki (Sony) * Please note that, it does not mean that upper parent nodes 119868ad4a33SUladzislau Rezki (Sony) * and their subtree_max_size are recalculated all the time up 119968ad4a33SUladzislau Rezki (Sony) * to the root node. 120068ad4a33SUladzislau Rezki (Sony) * 120168ad4a33SUladzislau Rezki (Sony) * 4--8 120268ad4a33SUladzislau Rezki (Sony) * /\ 120368ad4a33SUladzislau Rezki (Sony) * / \ 120468ad4a33SUladzislau Rezki (Sony) * / \ 120568ad4a33SUladzislau Rezki (Sony) * 2--2 8--8 120668ad4a33SUladzislau Rezki (Sony) * 120768ad4a33SUladzislau Rezki (Sony) * For example if we modify the node 4, shrinking it to 2, then 120868ad4a33SUladzislau Rezki (Sony) * no any modification is required. If we shrink the node 2 to 1 120968ad4a33SUladzislau Rezki (Sony) * its subtree_max_size is updated only, and set to 1. If we shrink 121068ad4a33SUladzislau Rezki (Sony) * the node 8 to 6, then its subtree_max_size is set to 6 and parent 121168ad4a33SUladzislau Rezki (Sony) * node becomes 4--6. 121268ad4a33SUladzislau Rezki (Sony) */ 121368ad4a33SUladzislau Rezki (Sony) static __always_inline void 121468ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(struct vmap_area *va) 121568ad4a33SUladzislau Rezki (Sony) { 121668ad4a33SUladzislau Rezki (Sony) /* 121715ae144fSUladzislau Rezki (Sony) * Populate the tree from bottom towards the root until 121815ae144fSUladzislau Rezki (Sony) * the calculated maximum available size of checked node 121915ae144fSUladzislau Rezki (Sony) * is equal to its current one. 122068ad4a33SUladzislau Rezki (Sony) */ 122115ae144fSUladzislau Rezki (Sony) free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); 1222bb850f4dSUladzislau Rezki (Sony) 1223bb850f4dSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_PROPAGATE_CHECK 1224da27c9edSUladzislau Rezki (Sony) augment_tree_propagate_check(); 1225bb850f4dSUladzislau Rezki (Sony) #endif 122668ad4a33SUladzislau Rezki (Sony) } 122768ad4a33SUladzislau Rezki (Sony) 122868ad4a33SUladzislau Rezki (Sony) static void 122968ad4a33SUladzislau Rezki (Sony) insert_vmap_area(struct vmap_area *va, 123068ad4a33SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 123168ad4a33SUladzislau Rezki (Sony) { 123268ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 123368ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 123468ad4a33SUladzislau Rezki (Sony) 123568ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 12369c801f61SUladzislau Rezki (Sony) if (link) 123768ad4a33SUladzislau Rezki (Sony) link_va(va, root, parent, link, head); 123868ad4a33SUladzislau Rezki (Sony) } 123968ad4a33SUladzislau Rezki (Sony) 124068ad4a33SUladzislau Rezki (Sony) static void 124168ad4a33SUladzislau Rezki (Sony) insert_vmap_area_augment(struct vmap_area *va, 124268ad4a33SUladzislau Rezki (Sony) struct rb_node *from, struct rb_root *root, 124368ad4a33SUladzislau Rezki (Sony) struct list_head *head) 124468ad4a33SUladzislau Rezki (Sony) { 124568ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 124668ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 124768ad4a33SUladzislau Rezki (Sony) 124868ad4a33SUladzislau Rezki (Sony) if (from) 124968ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, NULL, from, &parent); 125068ad4a33SUladzislau Rezki (Sony) else 125168ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 125268ad4a33SUladzislau Rezki (Sony) 12539c801f61SUladzislau Rezki (Sony) if (link) { 12548eb510dbSUladzislau Rezki (Sony) link_va_augment(va, root, parent, link, head); 125568ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 125668ad4a33SUladzislau Rezki (Sony) } 12579c801f61SUladzislau Rezki (Sony) } 125868ad4a33SUladzislau Rezki (Sony) 125968ad4a33SUladzislau Rezki (Sony) /* 126068ad4a33SUladzislau Rezki (Sony) * Merge de-allocated chunk of VA memory with previous 126168ad4a33SUladzislau Rezki (Sony) * and next free blocks. If coalesce is not done a new 126268ad4a33SUladzislau Rezki (Sony) * free area is inserted. If VA has been merged, it is 126368ad4a33SUladzislau Rezki (Sony) * freed. 12649c801f61SUladzislau Rezki (Sony) * 12659c801f61SUladzislau Rezki (Sony) * Please note, it can return NULL in case of overlap 12669c801f61SUladzislau Rezki (Sony) * ranges, followed by WARN() report. Despite it is a 12679c801f61SUladzislau Rezki (Sony) * buggy behaviour, a system can be alive and keep 12689c801f61SUladzislau Rezki (Sony) * ongoing. 126968ad4a33SUladzislau Rezki (Sony) */ 12703c5c3cfbSDaniel Axtens static __always_inline struct vmap_area * 12718eb510dbSUladzislau Rezki (Sony) __merge_or_add_vmap_area(struct vmap_area *va, 12728eb510dbSUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head, bool augment) 127368ad4a33SUladzislau Rezki (Sony) { 127468ad4a33SUladzislau Rezki (Sony) struct vmap_area *sibling; 127568ad4a33SUladzislau Rezki (Sony) struct list_head *next; 127668ad4a33SUladzislau Rezki (Sony) struct rb_node **link; 127768ad4a33SUladzislau Rezki (Sony) struct rb_node *parent; 127868ad4a33SUladzislau Rezki (Sony) bool merged = false; 127968ad4a33SUladzislau Rezki (Sony) 128068ad4a33SUladzislau Rezki (Sony) /* 128168ad4a33SUladzislau Rezki (Sony) * Find a place in the tree where VA potentially will be 128268ad4a33SUladzislau Rezki (Sony) * inserted, unless it is merged with its sibling/siblings. 128368ad4a33SUladzislau Rezki (Sony) */ 128468ad4a33SUladzislau Rezki (Sony) link = find_va_links(va, root, NULL, &parent); 12859c801f61SUladzislau Rezki (Sony) if (!link) 12869c801f61SUladzislau Rezki (Sony) return NULL; 128768ad4a33SUladzislau Rezki (Sony) 128868ad4a33SUladzislau Rezki (Sony) /* 128968ad4a33SUladzislau Rezki (Sony) * Get next node of VA to check if merging can be done. 129068ad4a33SUladzislau Rezki (Sony) */ 129168ad4a33SUladzislau Rezki (Sony) next = get_va_next_sibling(parent, link); 129268ad4a33SUladzislau Rezki (Sony) if (unlikely(next == NULL)) 129368ad4a33SUladzislau Rezki (Sony) goto insert; 129468ad4a33SUladzislau Rezki (Sony) 129568ad4a33SUladzislau Rezki (Sony) /* 129668ad4a33SUladzislau Rezki (Sony) * start end 129768ad4a33SUladzislau Rezki (Sony) * | | 129868ad4a33SUladzislau Rezki (Sony) * |<------VA------>|<-----Next----->| 129968ad4a33SUladzislau Rezki (Sony) * | | 130068ad4a33SUladzislau Rezki (Sony) * start end 130168ad4a33SUladzislau Rezki (Sony) */ 130268ad4a33SUladzislau Rezki (Sony) if (next != head) { 130368ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next, struct vmap_area, list); 130468ad4a33SUladzislau Rezki (Sony) if (sibling->va_start == va->va_end) { 130568ad4a33SUladzislau Rezki (Sony) sibling->va_start = va->va_start; 130668ad4a33SUladzislau Rezki (Sony) 130768ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */ 130868ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 130968ad4a33SUladzislau Rezki (Sony) 131068ad4a33SUladzislau Rezki (Sony) /* Point to the new merged area. */ 131168ad4a33SUladzislau Rezki (Sony) va = sibling; 131268ad4a33SUladzislau Rezki (Sony) merged = true; 131368ad4a33SUladzislau Rezki (Sony) } 131468ad4a33SUladzislau Rezki (Sony) } 131568ad4a33SUladzislau Rezki (Sony) 131668ad4a33SUladzislau Rezki (Sony) /* 131768ad4a33SUladzislau Rezki (Sony) * start end 131868ad4a33SUladzislau Rezki (Sony) * | | 131968ad4a33SUladzislau Rezki (Sony) * |<-----Prev----->|<------VA------>| 132068ad4a33SUladzislau Rezki (Sony) * | | 132168ad4a33SUladzislau Rezki (Sony) * start end 132268ad4a33SUladzislau Rezki (Sony) */ 132368ad4a33SUladzislau Rezki (Sony) if (next->prev != head) { 132468ad4a33SUladzislau Rezki (Sony) sibling = list_entry(next->prev, struct vmap_area, list); 132568ad4a33SUladzislau Rezki (Sony) if (sibling->va_end == va->va_start) { 13265dd78640SUladzislau Rezki (Sony) /* 13275dd78640SUladzislau Rezki (Sony) * If both neighbors are coalesced, it is important 13285dd78640SUladzislau Rezki (Sony) * to unlink the "next" node first, followed by merging 13295dd78640SUladzislau Rezki (Sony) * with "previous" one. Otherwise the tree might not be 13305dd78640SUladzislau Rezki (Sony) * fully populated if a sibling's augmented value is 13315dd78640SUladzislau Rezki (Sony) * "normalized" because of rotation operations. 13325dd78640SUladzislau Rezki (Sony) */ 133354f63d9dSUladzislau Rezki (Sony) if (merged) 13348eb510dbSUladzislau Rezki (Sony) __unlink_va(va, root, augment); 133568ad4a33SUladzislau Rezki (Sony) 13365dd78640SUladzislau Rezki (Sony) sibling->va_end = va->va_end; 13375dd78640SUladzislau Rezki (Sony) 133868ad4a33SUladzislau Rezki (Sony) /* Free vmap_area object. */ 133968ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 13403c5c3cfbSDaniel Axtens 13413c5c3cfbSDaniel Axtens /* Point to the new merged area. */ 13423c5c3cfbSDaniel Axtens va = sibling; 13433c5c3cfbSDaniel Axtens merged = true; 134468ad4a33SUladzislau Rezki (Sony) } 134568ad4a33SUladzislau Rezki (Sony) } 134668ad4a33SUladzislau Rezki (Sony) 134768ad4a33SUladzislau Rezki (Sony) insert: 13485dd78640SUladzislau Rezki (Sony) if (!merged) 13498eb510dbSUladzislau Rezki (Sony) __link_va(va, root, parent, link, head, augment); 13503c5c3cfbSDaniel Axtens 135196e2db45SUladzislau Rezki (Sony) return va; 135296e2db45SUladzislau Rezki (Sony) } 135396e2db45SUladzislau Rezki (Sony) 135496e2db45SUladzislau Rezki (Sony) static __always_inline struct vmap_area * 13558eb510dbSUladzislau Rezki (Sony) merge_or_add_vmap_area(struct vmap_area *va, 13568eb510dbSUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 13578eb510dbSUladzislau Rezki (Sony) { 13588eb510dbSUladzislau Rezki (Sony) return __merge_or_add_vmap_area(va, root, head, false); 13598eb510dbSUladzislau Rezki (Sony) } 13608eb510dbSUladzislau Rezki (Sony) 13618eb510dbSUladzislau Rezki (Sony) static __always_inline struct vmap_area * 136296e2db45SUladzislau Rezki (Sony) merge_or_add_vmap_area_augment(struct vmap_area *va, 136396e2db45SUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head) 136496e2db45SUladzislau Rezki (Sony) { 13658eb510dbSUladzislau Rezki (Sony) va = __merge_or_add_vmap_area(va, root, head, true); 136696e2db45SUladzislau Rezki (Sony) if (va) 13675dd78640SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 136896e2db45SUladzislau Rezki (Sony) 13693c5c3cfbSDaniel Axtens return va; 137068ad4a33SUladzislau Rezki (Sony) } 137168ad4a33SUladzislau Rezki (Sony) 137268ad4a33SUladzislau Rezki (Sony) static __always_inline bool 137368ad4a33SUladzislau Rezki (Sony) is_within_this_va(struct vmap_area *va, unsigned long size, 137468ad4a33SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 137568ad4a33SUladzislau Rezki (Sony) { 137668ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr; 137768ad4a33SUladzislau Rezki (Sony) 137868ad4a33SUladzislau Rezki (Sony) if (va->va_start > vstart) 137968ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align); 138068ad4a33SUladzislau Rezki (Sony) else 138168ad4a33SUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align); 138268ad4a33SUladzislau Rezki (Sony) 138368ad4a33SUladzislau Rezki (Sony) /* Can be overflowed due to big size or alignment. */ 138468ad4a33SUladzislau Rezki (Sony) if (nva_start_addr + size < nva_start_addr || 138568ad4a33SUladzislau Rezki (Sony) nva_start_addr < vstart) 138668ad4a33SUladzislau Rezki (Sony) return false; 138768ad4a33SUladzislau Rezki (Sony) 138868ad4a33SUladzislau Rezki (Sony) return (nva_start_addr + size <= va->va_end); 138968ad4a33SUladzislau Rezki (Sony) } 139068ad4a33SUladzislau Rezki (Sony) 139168ad4a33SUladzislau Rezki (Sony) /* 139268ad4a33SUladzislau Rezki (Sony) * Find the first free block(lowest start address) in the tree, 139368ad4a33SUladzislau Rezki (Sony) * that will accomplish the request corresponding to passing 13949333fe98SUladzislau Rezki * parameters. Please note, with an alignment bigger than PAGE_SIZE, 13959333fe98SUladzislau Rezki * a search length is adjusted to account for worst case alignment 13969333fe98SUladzislau Rezki * overhead. 139768ad4a33SUladzislau Rezki (Sony) */ 139868ad4a33SUladzislau Rezki (Sony) static __always_inline struct vmap_area * 1399f9863be4SUladzislau Rezki (Sony) find_vmap_lowest_match(struct rb_root *root, unsigned long size, 1400f9863be4SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart, bool adjust_search_size) 140168ad4a33SUladzislau Rezki (Sony) { 140268ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 140368ad4a33SUladzislau Rezki (Sony) struct rb_node *node; 14049333fe98SUladzislau Rezki unsigned long length; 140568ad4a33SUladzislau Rezki (Sony) 140668ad4a33SUladzislau Rezki (Sony) /* Start from the root. */ 1407f9863be4SUladzislau Rezki (Sony) node = root->rb_node; 140868ad4a33SUladzislau Rezki (Sony) 14099333fe98SUladzislau Rezki /* Adjust the search size for alignment overhead. */ 14109333fe98SUladzislau Rezki length = adjust_search_size ? size + align - 1 : size; 14119333fe98SUladzislau Rezki 141268ad4a33SUladzislau Rezki (Sony) while (node) { 141368ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 141468ad4a33SUladzislau Rezki (Sony) 14159333fe98SUladzislau Rezki if (get_subtree_max_size(node->rb_left) >= length && 141668ad4a33SUladzislau Rezki (Sony) vstart < va->va_start) { 141768ad4a33SUladzislau Rezki (Sony) node = node->rb_left; 141868ad4a33SUladzislau Rezki (Sony) } else { 141968ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart)) 142068ad4a33SUladzislau Rezki (Sony) return va; 142168ad4a33SUladzislau Rezki (Sony) 142268ad4a33SUladzislau Rezki (Sony) /* 142368ad4a33SUladzislau Rezki (Sony) * Does not make sense to go deeper towards the right 142468ad4a33SUladzislau Rezki (Sony) * sub-tree if it does not have a free block that is 14259333fe98SUladzislau Rezki * equal or bigger to the requested search length. 142668ad4a33SUladzislau Rezki (Sony) */ 14279333fe98SUladzislau Rezki if (get_subtree_max_size(node->rb_right) >= length) { 142868ad4a33SUladzislau Rezki (Sony) node = node->rb_right; 142968ad4a33SUladzislau Rezki (Sony) continue; 143068ad4a33SUladzislau Rezki (Sony) } 143168ad4a33SUladzislau Rezki (Sony) 143268ad4a33SUladzislau Rezki (Sony) /* 14333806b041SAndrew Morton * OK. We roll back and find the first right sub-tree, 143468ad4a33SUladzislau Rezki (Sony) * that will satisfy the search criteria. It can happen 14359f531973SUladzislau Rezki (Sony) * due to "vstart" restriction or an alignment overhead 14369f531973SUladzislau Rezki (Sony) * that is bigger then PAGE_SIZE. 143768ad4a33SUladzislau Rezki (Sony) */ 143868ad4a33SUladzislau Rezki (Sony) while ((node = rb_parent(node))) { 143968ad4a33SUladzislau Rezki (Sony) va = rb_entry(node, struct vmap_area, rb_node); 144068ad4a33SUladzislau Rezki (Sony) if (is_within_this_va(va, size, align, vstart)) 144168ad4a33SUladzislau Rezki (Sony) return va; 144268ad4a33SUladzislau Rezki (Sony) 14439333fe98SUladzislau Rezki if (get_subtree_max_size(node->rb_right) >= length && 144468ad4a33SUladzislau Rezki (Sony) vstart <= va->va_start) { 14459f531973SUladzislau Rezki (Sony) /* 14469f531973SUladzislau Rezki (Sony) * Shift the vstart forward. Please note, we update it with 14479f531973SUladzislau Rezki (Sony) * parent's start address adding "1" because we do not want 14489f531973SUladzislau Rezki (Sony) * to enter same sub-tree after it has already been checked 14499f531973SUladzislau Rezki (Sony) * and no suitable free block found there. 14509f531973SUladzislau Rezki (Sony) */ 14519f531973SUladzislau Rezki (Sony) vstart = va->va_start + 1; 145268ad4a33SUladzislau Rezki (Sony) node = node->rb_right; 145368ad4a33SUladzislau Rezki (Sony) break; 145468ad4a33SUladzislau Rezki (Sony) } 145568ad4a33SUladzislau Rezki (Sony) } 145668ad4a33SUladzislau Rezki (Sony) } 145768ad4a33SUladzislau Rezki (Sony) } 145868ad4a33SUladzislau Rezki (Sony) 145968ad4a33SUladzislau Rezki (Sony) return NULL; 146068ad4a33SUladzislau Rezki (Sony) } 146168ad4a33SUladzislau Rezki (Sony) 1462a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1463a6cf4e0fSUladzislau Rezki (Sony) #include <linux/random.h> 1464a6cf4e0fSUladzislau Rezki (Sony) 1465a6cf4e0fSUladzislau Rezki (Sony) static struct vmap_area * 1466bd1264c3SSong Liu find_vmap_lowest_linear_match(struct list_head *head, unsigned long size, 1467a6cf4e0fSUladzislau Rezki (Sony) unsigned long align, unsigned long vstart) 1468a6cf4e0fSUladzislau Rezki (Sony) { 1469a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va; 1470a6cf4e0fSUladzislau Rezki (Sony) 1471bd1264c3SSong Liu list_for_each_entry(va, head, list) { 1472a6cf4e0fSUladzislau Rezki (Sony) if (!is_within_this_va(va, size, align, vstart)) 1473a6cf4e0fSUladzislau Rezki (Sony) continue; 1474a6cf4e0fSUladzislau Rezki (Sony) 1475a6cf4e0fSUladzislau Rezki (Sony) return va; 1476a6cf4e0fSUladzislau Rezki (Sony) } 1477a6cf4e0fSUladzislau Rezki (Sony) 1478a6cf4e0fSUladzislau Rezki (Sony) return NULL; 1479a6cf4e0fSUladzislau Rezki (Sony) } 1480a6cf4e0fSUladzislau Rezki (Sony) 1481a6cf4e0fSUladzislau Rezki (Sony) static void 1482bd1264c3SSong Liu find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head, 1483bd1264c3SSong Liu unsigned long size, unsigned long align) 1484a6cf4e0fSUladzislau Rezki (Sony) { 1485a6cf4e0fSUladzislau Rezki (Sony) struct vmap_area *va_1, *va_2; 1486a6cf4e0fSUladzislau Rezki (Sony) unsigned long vstart; 1487a6cf4e0fSUladzislau Rezki (Sony) unsigned int rnd; 1488a6cf4e0fSUladzislau Rezki (Sony) 1489a6cf4e0fSUladzislau Rezki (Sony) get_random_bytes(&rnd, sizeof(rnd)); 1490a6cf4e0fSUladzislau Rezki (Sony) vstart = VMALLOC_START + rnd; 1491a6cf4e0fSUladzislau Rezki (Sony) 1492bd1264c3SSong Liu va_1 = find_vmap_lowest_match(root, size, align, vstart, false); 1493bd1264c3SSong Liu va_2 = find_vmap_lowest_linear_match(head, size, align, vstart); 1494a6cf4e0fSUladzislau Rezki (Sony) 1495a6cf4e0fSUladzislau Rezki (Sony) if (va_1 != va_2) 1496a6cf4e0fSUladzislau Rezki (Sony) pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", 1497a6cf4e0fSUladzislau Rezki (Sony) va_1, va_2, vstart); 1498a6cf4e0fSUladzislau Rezki (Sony) } 1499a6cf4e0fSUladzislau Rezki (Sony) #endif 1500a6cf4e0fSUladzislau Rezki (Sony) 150168ad4a33SUladzislau Rezki (Sony) enum fit_type { 150268ad4a33SUladzislau Rezki (Sony) NOTHING_FIT = 0, 150368ad4a33SUladzislau Rezki (Sony) FL_FIT_TYPE = 1, /* full fit */ 150468ad4a33SUladzislau Rezki (Sony) LE_FIT_TYPE = 2, /* left edge fit */ 150568ad4a33SUladzislau Rezki (Sony) RE_FIT_TYPE = 3, /* right edge fit */ 150668ad4a33SUladzislau Rezki (Sony) NE_FIT_TYPE = 4 /* no edge fit */ 150768ad4a33SUladzislau Rezki (Sony) }; 150868ad4a33SUladzislau Rezki (Sony) 150968ad4a33SUladzislau Rezki (Sony) static __always_inline enum fit_type 151068ad4a33SUladzislau Rezki (Sony) classify_va_fit_type(struct vmap_area *va, 151168ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr, unsigned long size) 151268ad4a33SUladzislau Rezki (Sony) { 151368ad4a33SUladzislau Rezki (Sony) enum fit_type type; 151468ad4a33SUladzislau Rezki (Sony) 151568ad4a33SUladzislau Rezki (Sony) /* Check if it is within VA. */ 151668ad4a33SUladzislau Rezki (Sony) if (nva_start_addr < va->va_start || 151768ad4a33SUladzislau Rezki (Sony) nva_start_addr + size > va->va_end) 151868ad4a33SUladzislau Rezki (Sony) return NOTHING_FIT; 151968ad4a33SUladzislau Rezki (Sony) 152068ad4a33SUladzislau Rezki (Sony) /* Now classify. */ 152168ad4a33SUladzislau Rezki (Sony) if (va->va_start == nva_start_addr) { 152268ad4a33SUladzislau Rezki (Sony) if (va->va_end == nva_start_addr + size) 152368ad4a33SUladzislau Rezki (Sony) type = FL_FIT_TYPE; 152468ad4a33SUladzislau Rezki (Sony) else 152568ad4a33SUladzislau Rezki (Sony) type = LE_FIT_TYPE; 152668ad4a33SUladzislau Rezki (Sony) } else if (va->va_end == nva_start_addr + size) { 152768ad4a33SUladzislau Rezki (Sony) type = RE_FIT_TYPE; 152868ad4a33SUladzislau Rezki (Sony) } else { 152968ad4a33SUladzislau Rezki (Sony) type = NE_FIT_TYPE; 153068ad4a33SUladzislau Rezki (Sony) } 153168ad4a33SUladzislau Rezki (Sony) 153268ad4a33SUladzislau Rezki (Sony) return type; 153368ad4a33SUladzislau Rezki (Sony) } 153468ad4a33SUladzislau Rezki (Sony) 153568ad4a33SUladzislau Rezki (Sony) static __always_inline int 15365b75b8e1SUladzislau Rezki (Sony) va_clip(struct rb_root *root, struct list_head *head, 1537f9863be4SUladzislau Rezki (Sony) struct vmap_area *va, unsigned long nva_start_addr, 1538f9863be4SUladzislau Rezki (Sony) unsigned long size) 153968ad4a33SUladzislau Rezki (Sony) { 15402c929233SArnd Bergmann struct vmap_area *lva = NULL; 15411b23ff80SBaoquan He enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); 154268ad4a33SUladzislau Rezki (Sony) 154368ad4a33SUladzislau Rezki (Sony) if (type == FL_FIT_TYPE) { 154468ad4a33SUladzislau Rezki (Sony) /* 154568ad4a33SUladzislau Rezki (Sony) * No need to split VA, it fully fits. 154668ad4a33SUladzislau Rezki (Sony) * 154768ad4a33SUladzislau Rezki (Sony) * | | 154868ad4a33SUladzislau Rezki (Sony) * V NVA V 154968ad4a33SUladzislau Rezki (Sony) * |---------------| 155068ad4a33SUladzislau Rezki (Sony) */ 1551f9863be4SUladzislau Rezki (Sony) unlink_va_augment(va, root); 155268ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 155368ad4a33SUladzislau Rezki (Sony) } else if (type == LE_FIT_TYPE) { 155468ad4a33SUladzislau Rezki (Sony) /* 155568ad4a33SUladzislau Rezki (Sony) * Split left edge of fit VA. 155668ad4a33SUladzislau Rezki (Sony) * 155768ad4a33SUladzislau Rezki (Sony) * | | 155868ad4a33SUladzislau Rezki (Sony) * V NVA V R 155968ad4a33SUladzislau Rezki (Sony) * |-------|-------| 156068ad4a33SUladzislau Rezki (Sony) */ 156168ad4a33SUladzislau Rezki (Sony) va->va_start += size; 156268ad4a33SUladzislau Rezki (Sony) } else if (type == RE_FIT_TYPE) { 156368ad4a33SUladzislau Rezki (Sony) /* 156468ad4a33SUladzislau Rezki (Sony) * Split right edge of fit VA. 156568ad4a33SUladzislau Rezki (Sony) * 156668ad4a33SUladzislau Rezki (Sony) * | | 156768ad4a33SUladzislau Rezki (Sony) * L V NVA V 156868ad4a33SUladzislau Rezki (Sony) * |-------|-------| 156968ad4a33SUladzislau Rezki (Sony) */ 157068ad4a33SUladzislau Rezki (Sony) va->va_end = nva_start_addr; 157168ad4a33SUladzislau Rezki (Sony) } else if (type == NE_FIT_TYPE) { 157268ad4a33SUladzislau Rezki (Sony) /* 157368ad4a33SUladzislau Rezki (Sony) * Split no edge of fit VA. 157468ad4a33SUladzislau Rezki (Sony) * 157568ad4a33SUladzislau Rezki (Sony) * | | 157668ad4a33SUladzislau Rezki (Sony) * L V NVA V R 157768ad4a33SUladzislau Rezki (Sony) * |---|-------|---| 157868ad4a33SUladzislau Rezki (Sony) */ 157982dd23e8SUladzislau Rezki (Sony) lva = __this_cpu_xchg(ne_fit_preload_node, NULL); 158082dd23e8SUladzislau Rezki (Sony) if (unlikely(!lva)) { 158182dd23e8SUladzislau Rezki (Sony) /* 158282dd23e8SUladzislau Rezki (Sony) * For percpu allocator we do not do any pre-allocation 158382dd23e8SUladzislau Rezki (Sony) * and leave it as it is. The reason is it most likely 158482dd23e8SUladzislau Rezki (Sony) * never ends up with NE_FIT_TYPE splitting. In case of 158582dd23e8SUladzislau Rezki (Sony) * percpu allocations offsets and sizes are aligned to 158682dd23e8SUladzislau Rezki (Sony) * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE 158782dd23e8SUladzislau Rezki (Sony) * are its main fitting cases. 158882dd23e8SUladzislau Rezki (Sony) * 158982dd23e8SUladzislau Rezki (Sony) * There are a few exceptions though, as an example it is 159082dd23e8SUladzislau Rezki (Sony) * a first allocation (early boot up) when we have "one" 159182dd23e8SUladzislau Rezki (Sony) * big free space that has to be split. 1592060650a2SUladzislau Rezki (Sony) * 1593060650a2SUladzislau Rezki (Sony) * Also we can hit this path in case of regular "vmap" 1594060650a2SUladzislau Rezki (Sony) * allocations, if "this" current CPU was not preloaded. 1595060650a2SUladzislau Rezki (Sony) * See the comment in alloc_vmap_area() why. If so, then 1596060650a2SUladzislau Rezki (Sony) * GFP_NOWAIT is used instead to get an extra object for 1597060650a2SUladzislau Rezki (Sony) * split purpose. That is rare and most time does not 1598060650a2SUladzislau Rezki (Sony) * occur. 1599060650a2SUladzislau Rezki (Sony) * 1600060650a2SUladzislau Rezki (Sony) * What happens if an allocation gets failed. Basically, 1601060650a2SUladzislau Rezki (Sony) * an "overflow" path is triggered to purge lazily freed 1602060650a2SUladzislau Rezki (Sony) * areas to free some memory, then, the "retry" path is 1603060650a2SUladzislau Rezki (Sony) * triggered to repeat one more time. See more details 1604060650a2SUladzislau Rezki (Sony) * in alloc_vmap_area() function. 160582dd23e8SUladzislau Rezki (Sony) */ 160668ad4a33SUladzislau Rezki (Sony) lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); 160782dd23e8SUladzislau Rezki (Sony) if (!lva) 160868ad4a33SUladzislau Rezki (Sony) return -1; 160982dd23e8SUladzislau Rezki (Sony) } 161068ad4a33SUladzislau Rezki (Sony) 161168ad4a33SUladzislau Rezki (Sony) /* 161268ad4a33SUladzislau Rezki (Sony) * Build the remainder. 161368ad4a33SUladzislau Rezki (Sony) */ 161468ad4a33SUladzislau Rezki (Sony) lva->va_start = va->va_start; 161568ad4a33SUladzislau Rezki (Sony) lva->va_end = nva_start_addr; 161668ad4a33SUladzislau Rezki (Sony) 161768ad4a33SUladzislau Rezki (Sony) /* 161868ad4a33SUladzislau Rezki (Sony) * Shrink this VA to remaining size. 161968ad4a33SUladzislau Rezki (Sony) */ 162068ad4a33SUladzislau Rezki (Sony) va->va_start = nva_start_addr + size; 162168ad4a33SUladzislau Rezki (Sony) } else { 162268ad4a33SUladzislau Rezki (Sony) return -1; 162368ad4a33SUladzislau Rezki (Sony) } 162468ad4a33SUladzislau Rezki (Sony) 162568ad4a33SUladzislau Rezki (Sony) if (type != FL_FIT_TYPE) { 162668ad4a33SUladzislau Rezki (Sony) augment_tree_propagate_from(va); 162768ad4a33SUladzislau Rezki (Sony) 16282c929233SArnd Bergmann if (lva) /* type == NE_FIT_TYPE */ 1629f9863be4SUladzislau Rezki (Sony) insert_vmap_area_augment(lva, &va->rb_node, root, head); 163068ad4a33SUladzislau Rezki (Sony) } 163168ad4a33SUladzislau Rezki (Sony) 163268ad4a33SUladzislau Rezki (Sony) return 0; 163368ad4a33SUladzislau Rezki (Sony) } 163468ad4a33SUladzislau Rezki (Sony) 163538f6b9afSUladzislau Rezki (Sony) static unsigned long 163638f6b9afSUladzislau Rezki (Sony) va_alloc(struct vmap_area *va, 163738f6b9afSUladzislau Rezki (Sony) struct rb_root *root, struct list_head *head, 163838f6b9afSUladzislau Rezki (Sony) unsigned long size, unsigned long align, 163938f6b9afSUladzislau Rezki (Sony) unsigned long vstart, unsigned long vend) 164038f6b9afSUladzislau Rezki (Sony) { 164138f6b9afSUladzislau Rezki (Sony) unsigned long nva_start_addr; 164238f6b9afSUladzislau Rezki (Sony) int ret; 164338f6b9afSUladzislau Rezki (Sony) 164438f6b9afSUladzislau Rezki (Sony) if (va->va_start > vstart) 164538f6b9afSUladzislau Rezki (Sony) nva_start_addr = ALIGN(va->va_start, align); 164638f6b9afSUladzislau Rezki (Sony) else 164738f6b9afSUladzislau Rezki (Sony) nva_start_addr = ALIGN(vstart, align); 164838f6b9afSUladzislau Rezki (Sony) 164938f6b9afSUladzislau Rezki (Sony) /* Check the "vend" restriction. */ 165038f6b9afSUladzislau Rezki (Sony) if (nva_start_addr + size > vend) 165138f6b9afSUladzislau Rezki (Sony) return vend; 165238f6b9afSUladzislau Rezki (Sony) 165338f6b9afSUladzislau Rezki (Sony) /* Update the free vmap_area. */ 16545b75b8e1SUladzislau Rezki (Sony) ret = va_clip(root, head, va, nva_start_addr, size); 165538f6b9afSUladzislau Rezki (Sony) if (WARN_ON_ONCE(ret)) 165638f6b9afSUladzislau Rezki (Sony) return vend; 165738f6b9afSUladzislau Rezki (Sony) 165838f6b9afSUladzislau Rezki (Sony) return nva_start_addr; 165938f6b9afSUladzislau Rezki (Sony) } 166038f6b9afSUladzislau Rezki (Sony) 166168ad4a33SUladzislau Rezki (Sony) /* 166268ad4a33SUladzislau Rezki (Sony) * Returns a start address of the newly allocated area, if success. 166368ad4a33SUladzislau Rezki (Sony) * Otherwise a vend is returned that indicates failure. 166468ad4a33SUladzislau Rezki (Sony) */ 166568ad4a33SUladzislau Rezki (Sony) static __always_inline unsigned long 1666f9863be4SUladzislau Rezki (Sony) __alloc_vmap_area(struct rb_root *root, struct list_head *head, 1667f9863be4SUladzislau Rezki (Sony) unsigned long size, unsigned long align, 1668cacca6baSUladzislau Rezki (Sony) unsigned long vstart, unsigned long vend) 166968ad4a33SUladzislau Rezki (Sony) { 16709333fe98SUladzislau Rezki bool adjust_search_size = true; 167168ad4a33SUladzislau Rezki (Sony) unsigned long nva_start_addr; 167268ad4a33SUladzislau Rezki (Sony) struct vmap_area *va; 167368ad4a33SUladzislau Rezki (Sony) 16749333fe98SUladzislau Rezki /* 16759333fe98SUladzislau Rezki * Do not adjust when: 16769333fe98SUladzislau Rezki * a) align <= PAGE_SIZE, because it does not make any sense. 16779333fe98SUladzislau Rezki * All blocks(their start addresses) are at least PAGE_SIZE 16789333fe98SUladzislau Rezki * aligned anyway; 16799333fe98SUladzislau Rezki * b) a short range where a requested size corresponds to exactly 16809333fe98SUladzislau Rezki * specified [vstart:vend] interval and an alignment > PAGE_SIZE. 16819333fe98SUladzislau Rezki * With adjusted search length an allocation would not succeed. 16829333fe98SUladzislau Rezki */ 16839333fe98SUladzislau Rezki if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size)) 16849333fe98SUladzislau Rezki adjust_search_size = false; 16859333fe98SUladzislau Rezki 1686f9863be4SUladzislau Rezki (Sony) va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); 168768ad4a33SUladzislau Rezki (Sony) if (unlikely(!va)) 168868ad4a33SUladzislau Rezki (Sony) return vend; 168968ad4a33SUladzislau Rezki (Sony) 169038f6b9afSUladzislau Rezki (Sony) nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); 169138f6b9afSUladzislau Rezki (Sony) if (nva_start_addr == vend) 169268ad4a33SUladzislau Rezki (Sony) return vend; 169368ad4a33SUladzislau Rezki (Sony) 1694a6cf4e0fSUladzislau Rezki (Sony) #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1695bd1264c3SSong Liu find_vmap_lowest_match_check(root, head, size, align); 1696a6cf4e0fSUladzislau Rezki (Sony) #endif 1697a6cf4e0fSUladzislau Rezki (Sony) 169868ad4a33SUladzislau Rezki (Sony) return nva_start_addr; 169968ad4a33SUladzislau Rezki (Sony) } 17004da56b99SChris Wilson 1701db64fe02SNick Piggin /* 1702d98c9e83SAndrey Ryabinin * Free a region of KVA allocated by alloc_vmap_area 1703d98c9e83SAndrey Ryabinin */ 1704d98c9e83SAndrey Ryabinin static void free_vmap_area(struct vmap_area *va) 1705d98c9e83SAndrey Ryabinin { 1706d0936029SUladzislau Rezki (Sony) struct vmap_node *vn = addr_to_node(va->va_start); 1707d0936029SUladzislau Rezki (Sony) 1708d98c9e83SAndrey Ryabinin /* 1709d98c9e83SAndrey Ryabinin * Remove from the busy tree/list. 1710d98c9e83SAndrey Ryabinin */ 1711d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 1712d0936029SUladzislau Rezki (Sony) unlink_va(va, &vn->busy.root); 1713d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 1714d98c9e83SAndrey Ryabinin 1715d98c9e83SAndrey Ryabinin /* 1716d98c9e83SAndrey Ryabinin * Insert/Merge it back to the free tree/list. 1717d98c9e83SAndrey Ryabinin */ 1718d98c9e83SAndrey Ryabinin spin_lock(&free_vmap_area_lock); 171996e2db45SUladzislau Rezki (Sony) merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); 1720d98c9e83SAndrey Ryabinin spin_unlock(&free_vmap_area_lock); 1721d98c9e83SAndrey Ryabinin } 1722d98c9e83SAndrey Ryabinin 1723187f8cc4SUladzislau Rezki (Sony) static inline void 1724187f8cc4SUladzislau Rezki (Sony) preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node) 1725187f8cc4SUladzislau Rezki (Sony) { 1726187f8cc4SUladzislau Rezki (Sony) struct vmap_area *va = NULL; 1727187f8cc4SUladzislau Rezki (Sony) 1728187f8cc4SUladzislau Rezki (Sony) /* 1729187f8cc4SUladzislau Rezki (Sony) * Preload this CPU with one extra vmap_area object. It is used 1730187f8cc4SUladzislau Rezki (Sony) * when fit type of free area is NE_FIT_TYPE. It guarantees that 1731187f8cc4SUladzislau Rezki (Sony) * a CPU that does an allocation is preloaded. 1732187f8cc4SUladzislau Rezki (Sony) * 1733187f8cc4SUladzislau Rezki (Sony) * We do it in non-atomic context, thus it allows us to use more 1734187f8cc4SUladzislau Rezki (Sony) * permissive allocation masks to be more stable under low memory 1735187f8cc4SUladzislau Rezki (Sony) * condition and high memory pressure. 1736187f8cc4SUladzislau Rezki (Sony) */ 1737187f8cc4SUladzislau Rezki (Sony) if (!this_cpu_read(ne_fit_preload_node)) 1738187f8cc4SUladzislau Rezki (Sony) va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1739187f8cc4SUladzislau Rezki (Sony) 1740187f8cc4SUladzislau Rezki (Sony) spin_lock(lock); 1741187f8cc4SUladzislau Rezki (Sony) 1742187f8cc4SUladzislau Rezki (Sony) if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va)) 1743187f8cc4SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 1744187f8cc4SUladzislau Rezki (Sony) } 1745187f8cc4SUladzislau Rezki (Sony) 174672210662SUladzislau Rezki (Sony) static struct vmap_pool * 174772210662SUladzislau Rezki (Sony) size_to_va_pool(struct vmap_node *vn, unsigned long size) 174872210662SUladzislau Rezki (Sony) { 174972210662SUladzislau Rezki (Sony) unsigned int idx = (size - 1) / PAGE_SIZE; 175072210662SUladzislau Rezki (Sony) 175172210662SUladzislau Rezki (Sony) if (idx < MAX_VA_SIZE_PAGES) 175272210662SUladzislau Rezki (Sony) return &vn->pool[idx]; 175372210662SUladzislau Rezki (Sony) 175472210662SUladzislau Rezki (Sony) return NULL; 175572210662SUladzislau Rezki (Sony) } 175672210662SUladzislau Rezki (Sony) 175772210662SUladzislau Rezki (Sony) static bool 175872210662SUladzislau Rezki (Sony) node_pool_add_va(struct vmap_node *n, struct vmap_area *va) 175972210662SUladzislau Rezki (Sony) { 176072210662SUladzislau Rezki (Sony) struct vmap_pool *vp; 176172210662SUladzislau Rezki (Sony) 176272210662SUladzislau Rezki (Sony) vp = size_to_va_pool(n, va_size(va)); 176372210662SUladzislau Rezki (Sony) if (!vp) 176472210662SUladzislau Rezki (Sony) return false; 176572210662SUladzislau Rezki (Sony) 176672210662SUladzislau Rezki (Sony) spin_lock(&n->pool_lock); 176772210662SUladzislau Rezki (Sony) list_add(&va->list, &vp->head); 176872210662SUladzislau Rezki (Sony) WRITE_ONCE(vp->len, vp->len + 1); 176972210662SUladzislau Rezki (Sony) spin_unlock(&n->pool_lock); 177072210662SUladzislau Rezki (Sony) 177172210662SUladzislau Rezki (Sony) return true; 177272210662SUladzislau Rezki (Sony) } 177372210662SUladzislau Rezki (Sony) 177472210662SUladzislau Rezki (Sony) static struct vmap_area * 177572210662SUladzislau Rezki (Sony) node_pool_del_va(struct vmap_node *vn, unsigned long size, 177672210662SUladzislau Rezki (Sony) unsigned long align, unsigned long vstart, 177772210662SUladzislau Rezki (Sony) unsigned long vend) 177872210662SUladzislau Rezki (Sony) { 177972210662SUladzislau Rezki (Sony) struct vmap_area *va = NULL; 178072210662SUladzislau Rezki (Sony) struct vmap_pool *vp; 178172210662SUladzislau Rezki (Sony) int err = 0; 178272210662SUladzislau Rezki (Sony) 178372210662SUladzislau Rezki (Sony) vp = size_to_va_pool(vn, size); 178472210662SUladzislau Rezki (Sony) if (!vp || list_empty(&vp->head)) 178572210662SUladzislau Rezki (Sony) return NULL; 178672210662SUladzislau Rezki (Sony) 178772210662SUladzislau Rezki (Sony) spin_lock(&vn->pool_lock); 178872210662SUladzislau Rezki (Sony) if (!list_empty(&vp->head)) { 178972210662SUladzislau Rezki (Sony) va = list_first_entry(&vp->head, struct vmap_area, list); 179072210662SUladzislau Rezki (Sony) 179172210662SUladzislau Rezki (Sony) if (IS_ALIGNED(va->va_start, align)) { 179272210662SUladzislau Rezki (Sony) /* 179372210662SUladzislau Rezki (Sony) * Do some sanity check and emit a warning 179472210662SUladzislau Rezki (Sony) * if one of below checks detects an error. 179572210662SUladzislau Rezki (Sony) */ 179672210662SUladzislau Rezki (Sony) err |= (va_size(va) != size); 179772210662SUladzislau Rezki (Sony) err |= (va->va_start < vstart); 179872210662SUladzislau Rezki (Sony) err |= (va->va_end > vend); 179972210662SUladzislau Rezki (Sony) 180072210662SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(err)) { 180172210662SUladzislau Rezki (Sony) list_del_init(&va->list); 180272210662SUladzislau Rezki (Sony) WRITE_ONCE(vp->len, vp->len - 1); 180372210662SUladzislau Rezki (Sony) } else { 180472210662SUladzislau Rezki (Sony) va = NULL; 180572210662SUladzislau Rezki (Sony) } 180672210662SUladzislau Rezki (Sony) } else { 180772210662SUladzislau Rezki (Sony) list_move_tail(&va->list, &vp->head); 180872210662SUladzislau Rezki (Sony) va = NULL; 180972210662SUladzislau Rezki (Sony) } 181072210662SUladzislau Rezki (Sony) } 181172210662SUladzislau Rezki (Sony) spin_unlock(&vn->pool_lock); 181272210662SUladzislau Rezki (Sony) 181372210662SUladzislau Rezki (Sony) return va; 181472210662SUladzislau Rezki (Sony) } 181572210662SUladzislau Rezki (Sony) 181672210662SUladzislau Rezki (Sony) static struct vmap_area * 181772210662SUladzislau Rezki (Sony) node_alloc(unsigned long size, unsigned long align, 181872210662SUladzislau Rezki (Sony) unsigned long vstart, unsigned long vend, 181972210662SUladzislau Rezki (Sony) unsigned long *addr, unsigned int *vn_id) 182072210662SUladzislau Rezki (Sony) { 182172210662SUladzislau Rezki (Sony) struct vmap_area *va; 182272210662SUladzislau Rezki (Sony) 182372210662SUladzislau Rezki (Sony) *vn_id = 0; 182472210662SUladzislau Rezki (Sony) *addr = vend; 182572210662SUladzislau Rezki (Sony) 182672210662SUladzislau Rezki (Sony) /* 182772210662SUladzislau Rezki (Sony) * Fallback to a global heap if not vmalloc or there 182872210662SUladzislau Rezki (Sony) * is only one node. 182972210662SUladzislau Rezki (Sony) */ 183072210662SUladzislau Rezki (Sony) if (vstart != VMALLOC_START || vend != VMALLOC_END || 183172210662SUladzislau Rezki (Sony) nr_vmap_nodes == 1) 183272210662SUladzislau Rezki (Sony) return NULL; 183372210662SUladzislau Rezki (Sony) 183472210662SUladzislau Rezki (Sony) *vn_id = raw_smp_processor_id() % nr_vmap_nodes; 183572210662SUladzislau Rezki (Sony) va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend); 183672210662SUladzislau Rezki (Sony) *vn_id = encode_vn_id(*vn_id); 183772210662SUladzislau Rezki (Sony) 183872210662SUladzislau Rezki (Sony) if (va) 183972210662SUladzislau Rezki (Sony) *addr = va->va_start; 184072210662SUladzislau Rezki (Sony) 184172210662SUladzislau Rezki (Sony) return va; 184272210662SUladzislau Rezki (Sony) } 184372210662SUladzislau Rezki (Sony) 1844d98c9e83SAndrey Ryabinin /* 1845db64fe02SNick Piggin * Allocate a region of KVA of the specified size and alignment, within the 1846db64fe02SNick Piggin * vstart and vend. 1847db64fe02SNick Piggin */ 1848db64fe02SNick Piggin static struct vmap_area *alloc_vmap_area(unsigned long size, 1849db64fe02SNick Piggin unsigned long align, 1850db64fe02SNick Piggin unsigned long vstart, unsigned long vend, 1851869176a0SBaoquan He int node, gfp_t gfp_mask, 1852869176a0SBaoquan He unsigned long va_flags) 1853db64fe02SNick Piggin { 1854d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 1855187f8cc4SUladzislau Rezki (Sony) struct vmap_area *va; 185612e376a6SUladzislau Rezki (Sony) unsigned long freed; 18571da177e4SLinus Torvalds unsigned long addr; 185872210662SUladzislau Rezki (Sony) unsigned int vn_id; 1859db64fe02SNick Piggin int purged = 0; 1860d98c9e83SAndrey Ryabinin int ret; 1861db64fe02SNick Piggin 18627e4a32c0SHyunmin Lee if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align))) 18637e4a32c0SHyunmin Lee return ERR_PTR(-EINVAL); 1864db64fe02SNick Piggin 186568ad4a33SUladzislau Rezki (Sony) if (unlikely(!vmap_initialized)) 186668ad4a33SUladzislau Rezki (Sony) return ERR_PTR(-EBUSY); 186768ad4a33SUladzislau Rezki (Sony) 18685803ed29SChristoph Hellwig might_sleep(); 186972210662SUladzislau Rezki (Sony) 187072210662SUladzislau Rezki (Sony) /* 187172210662SUladzislau Rezki (Sony) * If a VA is obtained from a global heap(if it fails here) 187272210662SUladzislau Rezki (Sony) * it is anyway marked with this "vn_id" so it is returned 187372210662SUladzislau Rezki (Sony) * to this pool's node later. Such way gives a possibility 187472210662SUladzislau Rezki (Sony) * to populate pools based on users demand. 187572210662SUladzislau Rezki (Sony) * 187672210662SUladzislau Rezki (Sony) * On success a ready to go VA is returned. 187772210662SUladzislau Rezki (Sony) */ 187872210662SUladzislau Rezki (Sony) va = node_alloc(size, align, vstart, vend, &addr, &vn_id); 187972210662SUladzislau Rezki (Sony) if (!va) { 1880f07116d7SUladzislau Rezki (Sony) gfp_mask = gfp_mask & GFP_RECLAIM_MASK; 18814da56b99SChris Wilson 1882f07116d7SUladzislau Rezki (Sony) va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1883db64fe02SNick Piggin if (unlikely(!va)) 1884db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 1885db64fe02SNick Piggin 18867f88f88fSCatalin Marinas /* 18877f88f88fSCatalin Marinas * Only scan the relevant parts containing pointers to other objects 18887f88f88fSCatalin Marinas * to avoid false negatives. 18897f88f88fSCatalin Marinas */ 1890f07116d7SUladzislau Rezki (Sony) kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); 189196aa8437SUladzislau Rezki (Sony) } 18927f88f88fSCatalin Marinas 1893db64fe02SNick Piggin retry: 189472210662SUladzislau Rezki (Sony) if (addr == vend) { 1895187f8cc4SUladzislau Rezki (Sony) preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); 1896f9863be4SUladzislau Rezki (Sony) addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, 1897f9863be4SUladzislau Rezki (Sony) size, align, vstart, vend); 1898187f8cc4SUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 189972210662SUladzislau Rezki (Sony) } 190068ad4a33SUladzislau Rezki (Sony) 1901cf243da6SUladzislau Rezki (Sony) trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend); 1902cf243da6SUladzislau Rezki (Sony) 190389699605SNick Piggin /* 190468ad4a33SUladzislau Rezki (Sony) * If an allocation fails, the "vend" address is 190568ad4a33SUladzislau Rezki (Sony) * returned. Therefore trigger the overflow path. 190689699605SNick Piggin */ 190768ad4a33SUladzislau Rezki (Sony) if (unlikely(addr == vend)) 190889699605SNick Piggin goto overflow; 190989699605SNick Piggin 191089699605SNick Piggin va->va_start = addr; 191189699605SNick Piggin va->va_end = addr + size; 1912688fcbfcSPengfei Li va->vm = NULL; 191372210662SUladzislau Rezki (Sony) va->flags = (va_flags | vn_id); 191468ad4a33SUladzislau Rezki (Sony) 1915d0936029SUladzislau Rezki (Sony) vn = addr_to_node(va->va_start); 1916d0936029SUladzislau Rezki (Sony) 1917d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 1918d0936029SUladzislau Rezki (Sony) insert_vmap_area(va, &vn->busy.root, &vn->busy.head); 1919d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 192089699605SNick Piggin 192161e16557SWang Xiaoqiang BUG_ON(!IS_ALIGNED(va->va_start, align)); 192289699605SNick Piggin BUG_ON(va->va_start < vstart); 192389699605SNick Piggin BUG_ON(va->va_end > vend); 192489699605SNick Piggin 1925d98c9e83SAndrey Ryabinin ret = kasan_populate_vmalloc(addr, size); 1926d98c9e83SAndrey Ryabinin if (ret) { 1927d98c9e83SAndrey Ryabinin free_vmap_area(va); 1928d98c9e83SAndrey Ryabinin return ERR_PTR(ret); 1929d98c9e83SAndrey Ryabinin } 1930d98c9e83SAndrey Ryabinin 193189699605SNick Piggin return va; 193289699605SNick Piggin 19337766970cSNick Piggin overflow: 1934db64fe02SNick Piggin if (!purged) { 193577e50af0SThomas Gleixner reclaim_and_purge_vmap_areas(); 1936db64fe02SNick Piggin purged = 1; 1937db64fe02SNick Piggin goto retry; 1938db64fe02SNick Piggin } 19394da56b99SChris Wilson 194012e376a6SUladzislau Rezki (Sony) freed = 0; 19414da56b99SChris Wilson blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 194212e376a6SUladzislau Rezki (Sony) 19434da56b99SChris Wilson if (freed > 0) { 19444da56b99SChris Wilson purged = 0; 19454da56b99SChris Wilson goto retry; 19464da56b99SChris Wilson } 19474da56b99SChris Wilson 194803497d76SFlorian Fainelli if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) 1949756a025fSJoe Perches pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", 1950756a025fSJoe Perches size); 195168ad4a33SUladzislau Rezki (Sony) 195268ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, va); 1953db64fe02SNick Piggin return ERR_PTR(-EBUSY); 1954db64fe02SNick Piggin } 1955db64fe02SNick Piggin 19564da56b99SChris Wilson int register_vmap_purge_notifier(struct notifier_block *nb) 19574da56b99SChris Wilson { 19584da56b99SChris Wilson return blocking_notifier_chain_register(&vmap_notify_list, nb); 19594da56b99SChris Wilson } 19604da56b99SChris Wilson EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 19614da56b99SChris Wilson 19624da56b99SChris Wilson int unregister_vmap_purge_notifier(struct notifier_block *nb) 19634da56b99SChris Wilson { 19644da56b99SChris Wilson return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 19654da56b99SChris Wilson } 19664da56b99SChris Wilson EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 19674da56b99SChris Wilson 1968db64fe02SNick Piggin /* 1969db64fe02SNick Piggin * lazy_max_pages is the maximum amount of virtual address space we gather up 1970db64fe02SNick Piggin * before attempting to purge with a TLB flush. 1971db64fe02SNick Piggin * 1972db64fe02SNick Piggin * There is a tradeoff here: a larger number will cover more kernel page tables 1973db64fe02SNick Piggin * and take slightly longer to purge, but it will linearly reduce the number of 1974db64fe02SNick Piggin * global TLB flushes that must be performed. It would seem natural to scale 1975db64fe02SNick Piggin * this number up linearly with the number of CPUs (because vmapping activity 1976db64fe02SNick Piggin * could also scale linearly with the number of CPUs), however it is likely 1977db64fe02SNick Piggin * that in practice, workloads might be constrained in other ways that mean 1978db64fe02SNick Piggin * vmap activity will not scale linearly with CPUs. Also, I want to be 1979db64fe02SNick Piggin * conservative and not introduce a big latency on huge systems, so go with 1980db64fe02SNick Piggin * a less aggressive log scale. It will still be an improvement over the old 1981db64fe02SNick Piggin * code, and it will be simple to change the scale factor if we find that it 1982db64fe02SNick Piggin * becomes a problem on bigger systems. 1983db64fe02SNick Piggin */ 1984db64fe02SNick Piggin static unsigned long lazy_max_pages(void) 1985db64fe02SNick Piggin { 1986db64fe02SNick Piggin unsigned int log; 1987db64fe02SNick Piggin 1988db64fe02SNick Piggin log = fls(num_online_cpus()); 1989db64fe02SNick Piggin 1990db64fe02SNick Piggin return log * (32UL * 1024 * 1024 / PAGE_SIZE); 1991db64fe02SNick Piggin } 1992db64fe02SNick Piggin 19934d36e6f8SUladzislau Rezki (Sony) static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); 1994db64fe02SNick Piggin 19950574ecd1SChristoph Hellwig /* 1996f0953a1bSIngo Molnar * Serialize vmap purging. There is no actual critical section protected 1997153090f2SBaoquan He * by this lock, but we want to avoid concurrent calls for performance 19980574ecd1SChristoph Hellwig * reasons and to make the pcpu_get_vm_areas more deterministic. 19990574ecd1SChristoph Hellwig */ 2000f9e09977SChristoph Hellwig static DEFINE_MUTEX(vmap_purge_lock); 20010574ecd1SChristoph Hellwig 200202b709dfSNick Piggin /* for per-CPU blocks */ 200302b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void); 2004282631cbSUladzislau Rezki (Sony) static cpumask_t purge_nodes; 200502b709dfSNick Piggin 200672210662SUladzislau Rezki (Sony) static void 200772210662SUladzislau Rezki (Sony) reclaim_list_global(struct list_head *head) 2008db64fe02SNick Piggin { 200972210662SUladzislau Rezki (Sony) struct vmap_area *va, *n; 2010db64fe02SNick Piggin 201172210662SUladzislau Rezki (Sony) if (list_empty(head)) 201272210662SUladzislau Rezki (Sony) return; 2013db64fe02SNick Piggin 2014e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock); 201572210662SUladzislau Rezki (Sony) list_for_each_entry_safe(va, n, head, list) 201672210662SUladzislau Rezki (Sony) merge_or_add_vmap_area_augment(va, 201772210662SUladzislau Rezki (Sony) &free_vmap_area_root, &free_vmap_area_list); 201872210662SUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 201972210662SUladzislau Rezki (Sony) } 202072210662SUladzislau Rezki (Sony) 202172210662SUladzislau Rezki (Sony) static void 202272210662SUladzislau Rezki (Sony) decay_va_pool_node(struct vmap_node *vn, bool full_decay) 202372210662SUladzislau Rezki (Sony) { 202472210662SUladzislau Rezki (Sony) struct vmap_area *va, *nva; 202572210662SUladzislau Rezki (Sony) struct list_head decay_list; 202672210662SUladzislau Rezki (Sony) struct rb_root decay_root; 202772210662SUladzislau Rezki (Sony) unsigned long n_decay; 202872210662SUladzislau Rezki (Sony) int i; 202972210662SUladzislau Rezki (Sony) 203072210662SUladzislau Rezki (Sony) decay_root = RB_ROOT; 203172210662SUladzislau Rezki (Sony) INIT_LIST_HEAD(&decay_list); 203272210662SUladzislau Rezki (Sony) 203372210662SUladzislau Rezki (Sony) for (i = 0; i < MAX_VA_SIZE_PAGES; i++) { 203472210662SUladzislau Rezki (Sony) struct list_head tmp_list; 203572210662SUladzislau Rezki (Sony) 203672210662SUladzislau Rezki (Sony) if (list_empty(&vn->pool[i].head)) 203772210662SUladzislau Rezki (Sony) continue; 203872210662SUladzislau Rezki (Sony) 203972210662SUladzislau Rezki (Sony) INIT_LIST_HEAD(&tmp_list); 204072210662SUladzislau Rezki (Sony) 204172210662SUladzislau Rezki (Sony) /* Detach the pool, so no-one can access it. */ 204272210662SUladzislau Rezki (Sony) spin_lock(&vn->pool_lock); 204372210662SUladzislau Rezki (Sony) list_replace_init(&vn->pool[i].head, &tmp_list); 204472210662SUladzislau Rezki (Sony) spin_unlock(&vn->pool_lock); 204572210662SUladzislau Rezki (Sony) 204672210662SUladzislau Rezki (Sony) if (full_decay) 204772210662SUladzislau Rezki (Sony) WRITE_ONCE(vn->pool[i].len, 0); 204872210662SUladzislau Rezki (Sony) 204972210662SUladzislau Rezki (Sony) /* Decay a pool by ~25% out of left objects. */ 205072210662SUladzislau Rezki (Sony) n_decay = vn->pool[i].len >> 2; 205172210662SUladzislau Rezki (Sony) 205272210662SUladzislau Rezki (Sony) list_for_each_entry_safe(va, nva, &tmp_list, list) { 205372210662SUladzislau Rezki (Sony) list_del_init(&va->list); 205472210662SUladzislau Rezki (Sony) merge_or_add_vmap_area(va, &decay_root, &decay_list); 205572210662SUladzislau Rezki (Sony) 205672210662SUladzislau Rezki (Sony) if (!full_decay) { 205772210662SUladzislau Rezki (Sony) WRITE_ONCE(vn->pool[i].len, vn->pool[i].len - 1); 205872210662SUladzislau Rezki (Sony) 205972210662SUladzislau Rezki (Sony) if (!--n_decay) 206072210662SUladzislau Rezki (Sony) break; 206172210662SUladzislau Rezki (Sony) } 206272210662SUladzislau Rezki (Sony) } 206372210662SUladzislau Rezki (Sony) 2064*15e02a39SUladzislau Rezki (Sony) /* 2065*15e02a39SUladzislau Rezki (Sony) * Attach the pool back if it has been partly decayed. 2066*15e02a39SUladzislau Rezki (Sony) * Please note, it is supposed that nobody(other contexts) 2067*15e02a39SUladzislau Rezki (Sony) * can populate the pool therefore a simple list replace 2068*15e02a39SUladzislau Rezki (Sony) * operation takes place here. 2069*15e02a39SUladzislau Rezki (Sony) */ 207072210662SUladzislau Rezki (Sony) if (!full_decay && !list_empty(&tmp_list)) { 207172210662SUladzislau Rezki (Sony) spin_lock(&vn->pool_lock); 207272210662SUladzislau Rezki (Sony) list_replace_init(&tmp_list, &vn->pool[i].head); 207372210662SUladzislau Rezki (Sony) spin_unlock(&vn->pool_lock); 207472210662SUladzislau Rezki (Sony) } 207572210662SUladzislau Rezki (Sony) } 207672210662SUladzislau Rezki (Sony) 207772210662SUladzislau Rezki (Sony) reclaim_list_global(&decay_list); 207872210662SUladzislau Rezki (Sony) } 207972210662SUladzislau Rezki (Sony) 208072210662SUladzislau Rezki (Sony) static void purge_vmap_node(struct work_struct *work) 208172210662SUladzislau Rezki (Sony) { 208272210662SUladzislau Rezki (Sony) struct vmap_node *vn = container_of(work, 208372210662SUladzislau Rezki (Sony) struct vmap_node, purge_work); 208472210662SUladzislau Rezki (Sony) struct vmap_area *va, *n_va; 208572210662SUladzislau Rezki (Sony) LIST_HEAD(local_list); 208672210662SUladzislau Rezki (Sony) 208772210662SUladzislau Rezki (Sony) vn->nr_purged = 0; 208872210662SUladzislau Rezki (Sony) 2089282631cbSUladzislau Rezki (Sony) list_for_each_entry_safe(va, n_va, &vn->purge_list, list) { 20904d36e6f8SUladzislau Rezki (Sony) unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; 20913c5c3cfbSDaniel Axtens unsigned long orig_start = va->va_start; 20923c5c3cfbSDaniel Axtens unsigned long orig_end = va->va_end; 209372210662SUladzislau Rezki (Sony) unsigned int vn_id = decode_vn_id(va->flags); 2094763b218dSJoel Fernandes 209572210662SUladzislau Rezki (Sony) list_del_init(&va->list); 20969c801f61SUladzislau Rezki (Sony) 20973c5c3cfbSDaniel Axtens if (is_vmalloc_or_module_addr((void *)orig_start)) 20983c5c3cfbSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 20993c5c3cfbSDaniel Axtens va->va_start, va->va_end); 2100dd3b8353SUladzislau Rezki (Sony) 21014d36e6f8SUladzislau Rezki (Sony) atomic_long_sub(nr, &vmap_lazy_nr); 210272210662SUladzislau Rezki (Sony) vn->nr_purged++; 21036030fd5fSUladzislau Rezki (Sony) 210472210662SUladzislau Rezki (Sony) if (is_vn_id_valid(vn_id) && !vn->skip_populate) 210572210662SUladzislau Rezki (Sony) if (node_pool_add_va(vn, va)) 210672210662SUladzislau Rezki (Sony) continue; 210772210662SUladzislau Rezki (Sony) 210872210662SUladzislau Rezki (Sony) /* Go back to global. */ 210972210662SUladzislau Rezki (Sony) list_add(&va->list, &local_list); 211072210662SUladzislau Rezki (Sony) } 211172210662SUladzislau Rezki (Sony) 211272210662SUladzislau Rezki (Sony) reclaim_list_global(&local_list); 2113282631cbSUladzislau Rezki (Sony) } 2114282631cbSUladzislau Rezki (Sony) 2115282631cbSUladzislau Rezki (Sony) /* 2116282631cbSUladzislau Rezki (Sony) * Purges all lazily-freed vmap areas. 2117282631cbSUladzislau Rezki (Sony) */ 211872210662SUladzislau Rezki (Sony) static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end, 211972210662SUladzislau Rezki (Sony) bool full_pool_decay) 2120282631cbSUladzislau Rezki (Sony) { 212172210662SUladzislau Rezki (Sony) unsigned long nr_purged_areas = 0; 212272210662SUladzislau Rezki (Sony) unsigned int nr_purge_helpers; 212372210662SUladzislau Rezki (Sony) unsigned int nr_purge_nodes; 2124282631cbSUladzislau Rezki (Sony) struct vmap_node *vn; 2125282631cbSUladzislau Rezki (Sony) int i; 2126282631cbSUladzislau Rezki (Sony) 2127282631cbSUladzislau Rezki (Sony) lockdep_assert_held(&vmap_purge_lock); 212872210662SUladzislau Rezki (Sony) 212972210662SUladzislau Rezki (Sony) /* 213072210662SUladzislau Rezki (Sony) * Use cpumask to mark which node has to be processed. 213172210662SUladzislau Rezki (Sony) */ 2132282631cbSUladzislau Rezki (Sony) purge_nodes = CPU_MASK_NONE; 2133282631cbSUladzislau Rezki (Sony) 2134282631cbSUladzislau Rezki (Sony) for (i = 0; i < nr_vmap_nodes; i++) { 2135282631cbSUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 2136282631cbSUladzislau Rezki (Sony) 2137282631cbSUladzislau Rezki (Sony) INIT_LIST_HEAD(&vn->purge_list); 213872210662SUladzislau Rezki (Sony) vn->skip_populate = full_pool_decay; 213972210662SUladzislau Rezki (Sony) decay_va_pool_node(vn, full_pool_decay); 2140282631cbSUladzislau Rezki (Sony) 2141282631cbSUladzislau Rezki (Sony) if (RB_EMPTY_ROOT(&vn->lazy.root)) 2142282631cbSUladzislau Rezki (Sony) continue; 2143282631cbSUladzislau Rezki (Sony) 2144282631cbSUladzislau Rezki (Sony) spin_lock(&vn->lazy.lock); 2145282631cbSUladzislau Rezki (Sony) WRITE_ONCE(vn->lazy.root.rb_node, NULL); 2146282631cbSUladzislau Rezki (Sony) list_replace_init(&vn->lazy.head, &vn->purge_list); 2147282631cbSUladzislau Rezki (Sony) spin_unlock(&vn->lazy.lock); 2148282631cbSUladzislau Rezki (Sony) 2149282631cbSUladzislau Rezki (Sony) start = min(start, list_first_entry(&vn->purge_list, 2150282631cbSUladzislau Rezki (Sony) struct vmap_area, list)->va_start); 2151282631cbSUladzislau Rezki (Sony) 2152282631cbSUladzislau Rezki (Sony) end = max(end, list_last_entry(&vn->purge_list, 2153282631cbSUladzislau Rezki (Sony) struct vmap_area, list)->va_end); 2154282631cbSUladzislau Rezki (Sony) 2155282631cbSUladzislau Rezki (Sony) cpumask_set_cpu(i, &purge_nodes); 2156282631cbSUladzislau Rezki (Sony) } 2157282631cbSUladzislau Rezki (Sony) 215872210662SUladzislau Rezki (Sony) nr_purge_nodes = cpumask_weight(&purge_nodes); 215972210662SUladzislau Rezki (Sony) if (nr_purge_nodes > 0) { 2160282631cbSUladzislau Rezki (Sony) flush_tlb_kernel_range(start, end); 2161282631cbSUladzislau Rezki (Sony) 216272210662SUladzislau Rezki (Sony) /* One extra worker is per a lazy_max_pages() full set minus one. */ 216372210662SUladzislau Rezki (Sony) nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages(); 216472210662SUladzislau Rezki (Sony) nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1; 216572210662SUladzislau Rezki (Sony) 2166282631cbSUladzislau Rezki (Sony) for_each_cpu(i, &purge_nodes) { 216772210662SUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 216872210662SUladzislau Rezki (Sony) 216972210662SUladzislau Rezki (Sony) if (nr_purge_helpers > 0) { 217072210662SUladzislau Rezki (Sony) INIT_WORK(&vn->purge_work, purge_vmap_node); 217172210662SUladzislau Rezki (Sony) 217272210662SUladzislau Rezki (Sony) if (cpumask_test_cpu(i, cpu_online_mask)) 217372210662SUladzislau Rezki (Sony) schedule_work_on(i, &vn->purge_work); 217472210662SUladzislau Rezki (Sony) else 217572210662SUladzislau Rezki (Sony) schedule_work(&vn->purge_work); 217672210662SUladzislau Rezki (Sony) 217772210662SUladzislau Rezki (Sony) nr_purge_helpers--; 217872210662SUladzislau Rezki (Sony) } else { 217972210662SUladzislau Rezki (Sony) vn->purge_work.func = NULL; 218072210662SUladzislau Rezki (Sony) purge_vmap_node(&vn->purge_work); 218172210662SUladzislau Rezki (Sony) nr_purged_areas += vn->nr_purged; 2182282631cbSUladzislau Rezki (Sony) } 2183282631cbSUladzislau Rezki (Sony) } 2184282631cbSUladzislau Rezki (Sony) 218572210662SUladzislau Rezki (Sony) for_each_cpu(i, &purge_nodes) { 218672210662SUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 218772210662SUladzislau Rezki (Sony) 218872210662SUladzislau Rezki (Sony) if (vn->purge_work.func) { 218972210662SUladzislau Rezki (Sony) flush_work(&vn->purge_work); 219072210662SUladzislau Rezki (Sony) nr_purged_areas += vn->nr_purged; 219172210662SUladzislau Rezki (Sony) } 219272210662SUladzislau Rezki (Sony) } 219372210662SUladzislau Rezki (Sony) } 219472210662SUladzislau Rezki (Sony) 219572210662SUladzislau Rezki (Sony) trace_purge_vmap_area_lazy(start, end, nr_purged_areas); 219672210662SUladzislau Rezki (Sony) return nr_purged_areas > 0; 2197db64fe02SNick Piggin } 2198db64fe02SNick Piggin 2199db64fe02SNick Piggin /* 220077e50af0SThomas Gleixner * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list. 2201db64fe02SNick Piggin */ 220277e50af0SThomas Gleixner static void reclaim_and_purge_vmap_areas(void) 220377e50af0SThomas Gleixner 2204db64fe02SNick Piggin { 2205f9e09977SChristoph Hellwig mutex_lock(&vmap_purge_lock); 22060574ecd1SChristoph Hellwig purge_fragmented_blocks_allcpus(); 220772210662SUladzislau Rezki (Sony) __purge_vmap_area_lazy(ULONG_MAX, 0, true); 2208f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 2209db64fe02SNick Piggin } 2210db64fe02SNick Piggin 2211690467c8SUladzislau Rezki (Sony) static void drain_vmap_area_work(struct work_struct *work) 2212690467c8SUladzislau Rezki (Sony) { 2213690467c8SUladzislau Rezki (Sony) mutex_lock(&vmap_purge_lock); 221472210662SUladzislau Rezki (Sony) __purge_vmap_area_lazy(ULONG_MAX, 0, false); 2215690467c8SUladzislau Rezki (Sony) mutex_unlock(&vmap_purge_lock); 2216690467c8SUladzislau Rezki (Sony) } 2217690467c8SUladzislau Rezki (Sony) 2218db64fe02SNick Piggin /* 2219edd89818SUladzislau Rezki (Sony) * Free a vmap area, caller ensuring that the area has been unmapped, 2220edd89818SUladzislau Rezki (Sony) * unlinked and flush_cache_vunmap had been called for the correct 2221edd89818SUladzislau Rezki (Sony) * range previously. 2222db64fe02SNick Piggin */ 222364141da5SJeremy Fitzhardinge static void free_vmap_area_noflush(struct vmap_area *va) 2224db64fe02SNick Piggin { 22258c4196feSUladzislau Rezki (Sony) unsigned long nr_lazy_max = lazy_max_pages(); 22268c4196feSUladzislau Rezki (Sony) unsigned long va_start = va->va_start; 222772210662SUladzislau Rezki (Sony) unsigned int vn_id = decode_vn_id(va->flags); 222872210662SUladzislau Rezki (Sony) struct vmap_node *vn; 22294d36e6f8SUladzislau Rezki (Sony) unsigned long nr_lazy; 223080c4bd7aSChris Wilson 2231edd89818SUladzislau Rezki (Sony) if (WARN_ON_ONCE(!list_empty(&va->list))) 2232edd89818SUladzislau Rezki (Sony) return; 2233dd3b8353SUladzislau Rezki (Sony) 22344d36e6f8SUladzislau Rezki (Sony) nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> 22354d36e6f8SUladzislau Rezki (Sony) PAGE_SHIFT, &vmap_lazy_nr); 223680c4bd7aSChris Wilson 223796e2db45SUladzislau Rezki (Sony) /* 223872210662SUladzislau Rezki (Sony) * If it was request by a certain node we would like to 223972210662SUladzislau Rezki (Sony) * return it to that node, i.e. its pool for later reuse. 224096e2db45SUladzislau Rezki (Sony) */ 224172210662SUladzislau Rezki (Sony) vn = is_vn_id_valid(vn_id) ? 224272210662SUladzislau Rezki (Sony) id_to_node(vn_id):addr_to_node(va->va_start); 224372210662SUladzislau Rezki (Sony) 2244282631cbSUladzislau Rezki (Sony) spin_lock(&vn->lazy.lock); 224572210662SUladzislau Rezki (Sony) insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head); 2246282631cbSUladzislau Rezki (Sony) spin_unlock(&vn->lazy.lock); 224780c4bd7aSChris Wilson 22488c4196feSUladzislau Rezki (Sony) trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max); 22498c4196feSUladzislau Rezki (Sony) 225096e2db45SUladzislau Rezki (Sony) /* After this point, we may free va at any time */ 22518c4196feSUladzislau Rezki (Sony) if (unlikely(nr_lazy > nr_lazy_max)) 2252690467c8SUladzislau Rezki (Sony) schedule_work(&drain_vmap_work); 2253db64fe02SNick Piggin } 2254db64fe02SNick Piggin 2255b29acbdcSNick Piggin /* 2256b29acbdcSNick Piggin * Free and unmap a vmap area 2257b29acbdcSNick Piggin */ 2258b29acbdcSNick Piggin static void free_unmap_vmap_area(struct vmap_area *va) 2259b29acbdcSNick Piggin { 2260b29acbdcSNick Piggin flush_cache_vunmap(va->va_start, va->va_end); 22614ad0ae8cSNicholas Piggin vunmap_range_noflush(va->va_start, va->va_end); 22628e57f8acSVlastimil Babka if (debug_pagealloc_enabled_static()) 226382a2e924SChintan Pandya flush_tlb_kernel_range(va->va_start, va->va_end); 226482a2e924SChintan Pandya 2265c8eef01eSChristoph Hellwig free_vmap_area_noflush(va); 2266b29acbdcSNick Piggin } 2267b29acbdcSNick Piggin 2268993d0b28SMatthew Wilcox (Oracle) struct vmap_area *find_vmap_area(unsigned long addr) 2269db64fe02SNick Piggin { 2270d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 2271db64fe02SNick Piggin struct vmap_area *va; 2272d0936029SUladzislau Rezki (Sony) int i, j; 2273db64fe02SNick Piggin 2274d0936029SUladzislau Rezki (Sony) /* 2275d0936029SUladzislau Rezki (Sony) * An addr_to_node_id(addr) converts an address to a node index 2276d0936029SUladzislau Rezki (Sony) * where a VA is located. If VA spans several zones and passed 2277d0936029SUladzislau Rezki (Sony) * addr is not the same as va->va_start, what is not common, we 2278*15e02a39SUladzislau Rezki (Sony) * may need to scan extra nodes. See an example: 2279d0936029SUladzislau Rezki (Sony) * 2280*15e02a39SUladzislau Rezki (Sony) * <----va----> 2281d0936029SUladzislau Rezki (Sony) * -|-----|-----|-----|-----|- 2282d0936029SUladzislau Rezki (Sony) * 1 2 0 1 2283d0936029SUladzislau Rezki (Sony) * 2284*15e02a39SUladzislau Rezki (Sony) * VA resides in node 1 whereas it spans 1, 2 an 0. If passed 2285*15e02a39SUladzislau Rezki (Sony) * addr is within 2 or 0 nodes we should do extra work. 2286d0936029SUladzislau Rezki (Sony) */ 2287d0936029SUladzislau Rezki (Sony) i = j = addr_to_node_id(addr); 2288d0936029SUladzislau Rezki (Sony) do { 2289d0936029SUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 2290db64fe02SNick Piggin 2291d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 2292d0936029SUladzislau Rezki (Sony) va = __find_vmap_area(addr, &vn->busy.root); 2293d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 2294d0936029SUladzislau Rezki (Sony) 2295d0936029SUladzislau Rezki (Sony) if (va) 2296db64fe02SNick Piggin return va; 2297d0936029SUladzislau Rezki (Sony) } while ((i = (i + 1) % nr_vmap_nodes) != j); 2298d0936029SUladzislau Rezki (Sony) 2299d0936029SUladzislau Rezki (Sony) return NULL; 2300db64fe02SNick Piggin } 2301db64fe02SNick Piggin 2302edd89818SUladzislau Rezki (Sony) static struct vmap_area *find_unlink_vmap_area(unsigned long addr) 2303edd89818SUladzislau Rezki (Sony) { 2304d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 2305edd89818SUladzislau Rezki (Sony) struct vmap_area *va; 2306d0936029SUladzislau Rezki (Sony) int i, j; 2307edd89818SUladzislau Rezki (Sony) 2308*15e02a39SUladzislau Rezki (Sony) /* 2309*15e02a39SUladzislau Rezki (Sony) * Check the comment in the find_vmap_area() about the loop. 2310*15e02a39SUladzislau Rezki (Sony) */ 2311d0936029SUladzislau Rezki (Sony) i = j = addr_to_node_id(addr); 2312d0936029SUladzislau Rezki (Sony) do { 2313d0936029SUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 2314d0936029SUladzislau Rezki (Sony) 2315d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 2316d0936029SUladzislau Rezki (Sony) va = __find_vmap_area(addr, &vn->busy.root); 2317edd89818SUladzislau Rezki (Sony) if (va) 2318d0936029SUladzislau Rezki (Sony) unlink_va(va, &vn->busy.root); 2319d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 2320edd89818SUladzislau Rezki (Sony) 2321d0936029SUladzislau Rezki (Sony) if (va) 2322edd89818SUladzislau Rezki (Sony) return va; 2323d0936029SUladzislau Rezki (Sony) } while ((i = (i + 1) % nr_vmap_nodes) != j); 2324d0936029SUladzislau Rezki (Sony) 2325d0936029SUladzislau Rezki (Sony) return NULL; 2326edd89818SUladzislau Rezki (Sony) } 2327edd89818SUladzislau Rezki (Sony) 2328db64fe02SNick Piggin /*** Per cpu kva allocator ***/ 2329db64fe02SNick Piggin 2330db64fe02SNick Piggin /* 2331db64fe02SNick Piggin * vmap space is limited especially on 32 bit architectures. Ensure there is 2332db64fe02SNick Piggin * room for at least 16 percpu vmap blocks per CPU. 2333db64fe02SNick Piggin */ 2334db64fe02SNick Piggin /* 2335db64fe02SNick Piggin * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 2336db64fe02SNick Piggin * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 2337db64fe02SNick Piggin * instead (we just need a rough idea) 2338db64fe02SNick Piggin */ 2339db64fe02SNick Piggin #if BITS_PER_LONG == 32 2340db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024) 2341db64fe02SNick Piggin #else 2342db64fe02SNick Piggin #define VMALLOC_SPACE (128UL*1024*1024*1024) 2343db64fe02SNick Piggin #endif 2344db64fe02SNick Piggin 2345db64fe02SNick Piggin #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 2346db64fe02SNick Piggin #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 2347db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 2348db64fe02SNick Piggin #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 2349db64fe02SNick Piggin #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 2350db64fe02SNick Piggin #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 2351f982f915SClemens Ladisch #define VMAP_BBMAP_BITS \ 2352f982f915SClemens Ladisch VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 2353db64fe02SNick Piggin VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 2354f982f915SClemens Ladisch VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 2355db64fe02SNick Piggin 2356db64fe02SNick Piggin #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 2357db64fe02SNick Piggin 235877e50af0SThomas Gleixner /* 235977e50af0SThomas Gleixner * Purge threshold to prevent overeager purging of fragmented blocks for 236077e50af0SThomas Gleixner * regular operations: Purge if vb->free is less than 1/4 of the capacity. 236177e50af0SThomas Gleixner */ 236277e50af0SThomas Gleixner #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4) 236377e50af0SThomas Gleixner 2364869176a0SBaoquan He #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/ 2365869176a0SBaoquan He #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/ 2366869176a0SBaoquan He #define VMAP_FLAGS_MASK 0x3 2367869176a0SBaoquan He 2368db64fe02SNick Piggin struct vmap_block_queue { 2369db64fe02SNick Piggin spinlock_t lock; 2370db64fe02SNick Piggin struct list_head free; 2371062eacf5SUladzislau Rezki (Sony) 2372062eacf5SUladzislau Rezki (Sony) /* 2373062eacf5SUladzislau Rezki (Sony) * An xarray requires an extra memory dynamically to 2374062eacf5SUladzislau Rezki (Sony) * be allocated. If it is an issue, we can use rb-tree 2375062eacf5SUladzislau Rezki (Sony) * instead. 2376062eacf5SUladzislau Rezki (Sony) */ 2377062eacf5SUladzislau Rezki (Sony) struct xarray vmap_blocks; 2378db64fe02SNick Piggin }; 2379db64fe02SNick Piggin 2380db64fe02SNick Piggin struct vmap_block { 2381db64fe02SNick Piggin spinlock_t lock; 2382db64fe02SNick Piggin struct vmap_area *va; 2383db64fe02SNick Piggin unsigned long free, dirty; 2384d76f9954SBaoquan He DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS); 23857d61bfe8SRoman Pen unsigned long dirty_min, dirty_max; /*< dirty range */ 2386db64fe02SNick Piggin struct list_head free_list; 2387db64fe02SNick Piggin struct rcu_head rcu_head; 238802b709dfSNick Piggin struct list_head purge; 2389db64fe02SNick Piggin }; 2390db64fe02SNick Piggin 2391db64fe02SNick Piggin /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 2392db64fe02SNick Piggin static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 2393db64fe02SNick Piggin 2394db64fe02SNick Piggin /* 2395062eacf5SUladzislau Rezki (Sony) * In order to fast access to any "vmap_block" associated with a 2396062eacf5SUladzislau Rezki (Sony) * specific address, we use a hash. 2397062eacf5SUladzislau Rezki (Sony) * 2398062eacf5SUladzislau Rezki (Sony) * A per-cpu vmap_block_queue is used in both ways, to serialize 2399062eacf5SUladzislau Rezki (Sony) * an access to free block chains among CPUs(alloc path) and it 2400062eacf5SUladzislau Rezki (Sony) * also acts as a vmap_block hash(alloc/free paths). It means we 2401062eacf5SUladzislau Rezki (Sony) * overload it, since we already have the per-cpu array which is 2402062eacf5SUladzislau Rezki (Sony) * used as a hash table. When used as a hash a 'cpu' passed to 2403062eacf5SUladzislau Rezki (Sony) * per_cpu() is not actually a CPU but rather a hash index. 2404062eacf5SUladzislau Rezki (Sony) * 2405fa1c77c1SUladzislau Rezki (Sony) * A hash function is addr_to_vb_xa() which hashes any address 2406062eacf5SUladzislau Rezki (Sony) * to a specific index(in a hash) it belongs to. This then uses a 2407062eacf5SUladzislau Rezki (Sony) * per_cpu() macro to access an array with generated index. 2408062eacf5SUladzislau Rezki (Sony) * 2409062eacf5SUladzislau Rezki (Sony) * An example: 2410062eacf5SUladzislau Rezki (Sony) * 2411062eacf5SUladzislau Rezki (Sony) * CPU_1 CPU_2 CPU_0 2412062eacf5SUladzislau Rezki (Sony) * | | | 2413062eacf5SUladzislau Rezki (Sony) * V V V 2414062eacf5SUladzislau Rezki (Sony) * 0 10 20 30 40 50 60 2415062eacf5SUladzislau Rezki (Sony) * |------|------|------|------|------|------|...<vmap address space> 2416062eacf5SUladzislau Rezki (Sony) * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2 2417062eacf5SUladzislau Rezki (Sony) * 2418062eacf5SUladzislau Rezki (Sony) * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus 2419062eacf5SUladzislau Rezki (Sony) * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock; 2420062eacf5SUladzislau Rezki (Sony) * 2421062eacf5SUladzislau Rezki (Sony) * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus 2422062eacf5SUladzislau Rezki (Sony) * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock; 2423062eacf5SUladzislau Rezki (Sony) * 2424062eacf5SUladzislau Rezki (Sony) * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus 2425062eacf5SUladzislau Rezki (Sony) * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock. 2426062eacf5SUladzislau Rezki (Sony) * 2427062eacf5SUladzislau Rezki (Sony) * This technique almost always avoids lock contention on insert/remove, 2428062eacf5SUladzislau Rezki (Sony) * however xarray spinlocks protect against any contention that remains. 2429db64fe02SNick Piggin */ 2430062eacf5SUladzislau Rezki (Sony) static struct xarray * 2431fa1c77c1SUladzislau Rezki (Sony) addr_to_vb_xa(unsigned long addr) 2432062eacf5SUladzislau Rezki (Sony) { 2433062eacf5SUladzislau Rezki (Sony) int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus(); 2434062eacf5SUladzislau Rezki (Sony) 2435062eacf5SUladzislau Rezki (Sony) return &per_cpu(vmap_block_queue, index).vmap_blocks; 2436062eacf5SUladzislau Rezki (Sony) } 2437db64fe02SNick Piggin 2438db64fe02SNick Piggin /* 2439db64fe02SNick Piggin * We should probably have a fallback mechanism to allocate virtual memory 2440db64fe02SNick Piggin * out of partially filled vmap blocks. However vmap block sizing should be 2441db64fe02SNick Piggin * fairly reasonable according to the vmalloc size, so it shouldn't be a 2442db64fe02SNick Piggin * big problem. 2443db64fe02SNick Piggin */ 2444db64fe02SNick Piggin 2445db64fe02SNick Piggin static unsigned long addr_to_vb_idx(unsigned long addr) 2446db64fe02SNick Piggin { 2447db64fe02SNick Piggin addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 2448db64fe02SNick Piggin addr /= VMAP_BLOCK_SIZE; 2449db64fe02SNick Piggin return addr; 2450db64fe02SNick Piggin } 2451db64fe02SNick Piggin 2452cf725ce2SRoman Pen static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 2453cf725ce2SRoman Pen { 2454cf725ce2SRoman Pen unsigned long addr; 2455cf725ce2SRoman Pen 2456cf725ce2SRoman Pen addr = va_start + (pages_off << PAGE_SHIFT); 2457cf725ce2SRoman Pen BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 2458cf725ce2SRoman Pen return (void *)addr; 2459cf725ce2SRoman Pen } 2460cf725ce2SRoman Pen 2461cf725ce2SRoman Pen /** 2462cf725ce2SRoman Pen * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 2463cf725ce2SRoman Pen * block. Of course pages number can't exceed VMAP_BBMAP_BITS 2464cf725ce2SRoman Pen * @order: how many 2^order pages should be occupied in newly allocated block 2465cf725ce2SRoman Pen * @gfp_mask: flags for the page level allocator 2466cf725ce2SRoman Pen * 2467a862f68aSMike Rapoport * Return: virtual address in a newly allocated block or ERR_PTR(-errno) 2468cf725ce2SRoman Pen */ 2469cf725ce2SRoman Pen static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 2470db64fe02SNick Piggin { 2471db64fe02SNick Piggin struct vmap_block_queue *vbq; 2472db64fe02SNick Piggin struct vmap_block *vb; 2473db64fe02SNick Piggin struct vmap_area *va; 2474062eacf5SUladzislau Rezki (Sony) struct xarray *xa; 2475db64fe02SNick Piggin unsigned long vb_idx; 2476db64fe02SNick Piggin int node, err; 2477cf725ce2SRoman Pen void *vaddr; 2478db64fe02SNick Piggin 2479db64fe02SNick Piggin node = numa_node_id(); 2480db64fe02SNick Piggin 2481db64fe02SNick Piggin vb = kmalloc_node(sizeof(struct vmap_block), 2482db64fe02SNick Piggin gfp_mask & GFP_RECLAIM_MASK, node); 2483db64fe02SNick Piggin if (unlikely(!vb)) 2484db64fe02SNick Piggin return ERR_PTR(-ENOMEM); 2485db64fe02SNick Piggin 2486db64fe02SNick Piggin va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 2487db64fe02SNick Piggin VMALLOC_START, VMALLOC_END, 2488869176a0SBaoquan He node, gfp_mask, 2489869176a0SBaoquan He VMAP_RAM|VMAP_BLOCK); 2490ddf9c6d4STobias Klauser if (IS_ERR(va)) { 2491db64fe02SNick Piggin kfree(vb); 2492e7d86340SJulia Lawall return ERR_CAST(va); 2493db64fe02SNick Piggin } 2494db64fe02SNick Piggin 2495cf725ce2SRoman Pen vaddr = vmap_block_vaddr(va->va_start, 0); 2496db64fe02SNick Piggin spin_lock_init(&vb->lock); 2497db64fe02SNick Piggin vb->va = va; 2498cf725ce2SRoman Pen /* At least something should be left free */ 2499cf725ce2SRoman Pen BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 2500d76f9954SBaoquan He bitmap_zero(vb->used_map, VMAP_BBMAP_BITS); 2501cf725ce2SRoman Pen vb->free = VMAP_BBMAP_BITS - (1UL << order); 2502db64fe02SNick Piggin vb->dirty = 0; 25037d61bfe8SRoman Pen vb->dirty_min = VMAP_BBMAP_BITS; 25047d61bfe8SRoman Pen vb->dirty_max = 0; 2505d76f9954SBaoquan He bitmap_set(vb->used_map, 0, (1UL << order)); 2506db64fe02SNick Piggin INIT_LIST_HEAD(&vb->free_list); 2507db64fe02SNick Piggin 2508fa1c77c1SUladzislau Rezki (Sony) xa = addr_to_vb_xa(va->va_start); 2509db64fe02SNick Piggin vb_idx = addr_to_vb_idx(va->va_start); 2510062eacf5SUladzislau Rezki (Sony) err = xa_insert(xa, vb_idx, vb, gfp_mask); 25110f14599cSMatthew Wilcox (Oracle) if (err) { 25120f14599cSMatthew Wilcox (Oracle) kfree(vb); 25130f14599cSMatthew Wilcox (Oracle) free_vmap_area(va); 25140f14599cSMatthew Wilcox (Oracle) return ERR_PTR(err); 25150f14599cSMatthew Wilcox (Oracle) } 2516db64fe02SNick Piggin 25173f804920SSebastian Andrzej Siewior vbq = raw_cpu_ptr(&vmap_block_queue); 2518db64fe02SNick Piggin spin_lock(&vbq->lock); 251968ac546fSRoman Pen list_add_tail_rcu(&vb->free_list, &vbq->free); 2520db64fe02SNick Piggin spin_unlock(&vbq->lock); 2521db64fe02SNick Piggin 2522cf725ce2SRoman Pen return vaddr; 2523db64fe02SNick Piggin } 2524db64fe02SNick Piggin 2525db64fe02SNick Piggin static void free_vmap_block(struct vmap_block *vb) 2526db64fe02SNick Piggin { 2527d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 2528db64fe02SNick Piggin struct vmap_block *tmp; 2529062eacf5SUladzislau Rezki (Sony) struct xarray *xa; 2530db64fe02SNick Piggin 2531fa1c77c1SUladzislau Rezki (Sony) xa = addr_to_vb_xa(vb->va->va_start); 2532062eacf5SUladzislau Rezki (Sony) tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start)); 2533db64fe02SNick Piggin BUG_ON(tmp != vb); 2534db64fe02SNick Piggin 2535d0936029SUladzislau Rezki (Sony) vn = addr_to_node(vb->va->va_start); 2536d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 2537d0936029SUladzislau Rezki (Sony) unlink_va(vb->va, &vn->busy.root); 2538d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 2539edd89818SUladzislau Rezki (Sony) 254064141da5SJeremy Fitzhardinge free_vmap_area_noflush(vb->va); 254122a3c7d1SLai Jiangshan kfree_rcu(vb, rcu_head); 2542db64fe02SNick Piggin } 2543db64fe02SNick Piggin 2544ca5e46c3SThomas Gleixner static bool purge_fragmented_block(struct vmap_block *vb, 254577e50af0SThomas Gleixner struct vmap_block_queue *vbq, struct list_head *purge_list, 254677e50af0SThomas Gleixner bool force_purge) 254702b709dfSNick Piggin { 2548ca5e46c3SThomas Gleixner if (vb->free + vb->dirty != VMAP_BBMAP_BITS || 2549ca5e46c3SThomas Gleixner vb->dirty == VMAP_BBMAP_BITS) 2550ca5e46c3SThomas Gleixner return false; 255102b709dfSNick Piggin 255277e50af0SThomas Gleixner /* Don't overeagerly purge usable blocks unless requested */ 255377e50af0SThomas Gleixner if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD)) 255477e50af0SThomas Gleixner return false; 255577e50af0SThomas Gleixner 2556ca5e46c3SThomas Gleixner /* prevent further allocs after releasing lock */ 25577f48121eSThomas Gleixner WRITE_ONCE(vb->free, 0); 2558ca5e46c3SThomas Gleixner /* prevent purging it again */ 25597f48121eSThomas Gleixner WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS); 25607d61bfe8SRoman Pen vb->dirty_min = 0; 25617d61bfe8SRoman Pen vb->dirty_max = VMAP_BBMAP_BITS; 256202b709dfSNick Piggin spin_lock(&vbq->lock); 256302b709dfSNick Piggin list_del_rcu(&vb->free_list); 256402b709dfSNick Piggin spin_unlock(&vbq->lock); 2565ca5e46c3SThomas Gleixner list_add_tail(&vb->purge, purge_list); 2566ca5e46c3SThomas Gleixner return true; 256702b709dfSNick Piggin } 256802b709dfSNick Piggin 2569ca5e46c3SThomas Gleixner static void free_purged_blocks(struct list_head *purge_list) 2570ca5e46c3SThomas Gleixner { 2571ca5e46c3SThomas Gleixner struct vmap_block *vb, *n_vb; 2572ca5e46c3SThomas Gleixner 2573ca5e46c3SThomas Gleixner list_for_each_entry_safe(vb, n_vb, purge_list, purge) { 257402b709dfSNick Piggin list_del(&vb->purge); 257502b709dfSNick Piggin free_vmap_block(vb); 257602b709dfSNick Piggin } 257702b709dfSNick Piggin } 257802b709dfSNick Piggin 2579ca5e46c3SThomas Gleixner static void purge_fragmented_blocks(int cpu) 2580ca5e46c3SThomas Gleixner { 2581ca5e46c3SThomas Gleixner LIST_HEAD(purge); 2582ca5e46c3SThomas Gleixner struct vmap_block *vb; 2583ca5e46c3SThomas Gleixner struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 2584ca5e46c3SThomas Gleixner 2585ca5e46c3SThomas Gleixner rcu_read_lock(); 2586ca5e46c3SThomas Gleixner list_for_each_entry_rcu(vb, &vbq->free, free_list) { 25877f48121eSThomas Gleixner unsigned long free = READ_ONCE(vb->free); 25887f48121eSThomas Gleixner unsigned long dirty = READ_ONCE(vb->dirty); 25897f48121eSThomas Gleixner 25907f48121eSThomas Gleixner if (free + dirty != VMAP_BBMAP_BITS || 25917f48121eSThomas Gleixner dirty == VMAP_BBMAP_BITS) 2592ca5e46c3SThomas Gleixner continue; 2593ca5e46c3SThomas Gleixner 2594ca5e46c3SThomas Gleixner spin_lock(&vb->lock); 259577e50af0SThomas Gleixner purge_fragmented_block(vb, vbq, &purge, true); 2596ca5e46c3SThomas Gleixner spin_unlock(&vb->lock); 2597ca5e46c3SThomas Gleixner } 2598ca5e46c3SThomas Gleixner rcu_read_unlock(); 2599ca5e46c3SThomas Gleixner free_purged_blocks(&purge); 2600ca5e46c3SThomas Gleixner } 2601ca5e46c3SThomas Gleixner 260202b709dfSNick Piggin static void purge_fragmented_blocks_allcpus(void) 260302b709dfSNick Piggin { 260402b709dfSNick Piggin int cpu; 260502b709dfSNick Piggin 260602b709dfSNick Piggin for_each_possible_cpu(cpu) 260702b709dfSNick Piggin purge_fragmented_blocks(cpu); 260802b709dfSNick Piggin } 260902b709dfSNick Piggin 2610db64fe02SNick Piggin static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 2611db64fe02SNick Piggin { 2612db64fe02SNick Piggin struct vmap_block_queue *vbq; 2613db64fe02SNick Piggin struct vmap_block *vb; 2614cf725ce2SRoman Pen void *vaddr = NULL; 2615db64fe02SNick Piggin unsigned int order; 2616db64fe02SNick Piggin 2617891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 2618db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2619aa91c4d8SJan Kara if (WARN_ON(size == 0)) { 2620aa91c4d8SJan Kara /* 2621aa91c4d8SJan Kara * Allocating 0 bytes isn't what caller wants since 2622aa91c4d8SJan Kara * get_order(0) returns funny result. Just warn and terminate 2623aa91c4d8SJan Kara * early. 2624aa91c4d8SJan Kara */ 2625aa91c4d8SJan Kara return NULL; 2626aa91c4d8SJan Kara } 2627db64fe02SNick Piggin order = get_order(size); 2628db64fe02SNick Piggin 2629db64fe02SNick Piggin rcu_read_lock(); 26303f804920SSebastian Andrzej Siewior vbq = raw_cpu_ptr(&vmap_block_queue); 2631db64fe02SNick Piggin list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2632cf725ce2SRoman Pen unsigned long pages_off; 2633db64fe02SNick Piggin 263443d76502SThomas Gleixner if (READ_ONCE(vb->free) < (1UL << order)) 263543d76502SThomas Gleixner continue; 263643d76502SThomas Gleixner 2637db64fe02SNick Piggin spin_lock(&vb->lock); 2638cf725ce2SRoman Pen if (vb->free < (1UL << order)) { 2639cf725ce2SRoman Pen spin_unlock(&vb->lock); 2640cf725ce2SRoman Pen continue; 2641cf725ce2SRoman Pen } 264202b709dfSNick Piggin 2643cf725ce2SRoman Pen pages_off = VMAP_BBMAP_BITS - vb->free; 2644cf725ce2SRoman Pen vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 264543d76502SThomas Gleixner WRITE_ONCE(vb->free, vb->free - (1UL << order)); 2646d76f9954SBaoquan He bitmap_set(vb->used_map, pages_off, (1UL << order)); 2647db64fe02SNick Piggin if (vb->free == 0) { 2648db64fe02SNick Piggin spin_lock(&vbq->lock); 2649de560423SNick Piggin list_del_rcu(&vb->free_list); 2650db64fe02SNick Piggin spin_unlock(&vbq->lock); 2651db64fe02SNick Piggin } 2652cf725ce2SRoman Pen 2653db64fe02SNick Piggin spin_unlock(&vb->lock); 2654db64fe02SNick Piggin break; 2655db64fe02SNick Piggin } 265602b709dfSNick Piggin 2657db64fe02SNick Piggin rcu_read_unlock(); 2658db64fe02SNick Piggin 2659cf725ce2SRoman Pen /* Allocate new block if nothing was found */ 2660cf725ce2SRoman Pen if (!vaddr) 2661cf725ce2SRoman Pen vaddr = new_vmap_block(order, gfp_mask); 2662db64fe02SNick Piggin 2663cf725ce2SRoman Pen return vaddr; 2664db64fe02SNick Piggin } 2665db64fe02SNick Piggin 266678a0e8c4SChristoph Hellwig static void vb_free(unsigned long addr, unsigned long size) 2667db64fe02SNick Piggin { 2668db64fe02SNick Piggin unsigned long offset; 2669db64fe02SNick Piggin unsigned int order; 2670db64fe02SNick Piggin struct vmap_block *vb; 2671062eacf5SUladzislau Rezki (Sony) struct xarray *xa; 2672db64fe02SNick Piggin 2673891c49abSAlexander Kuleshov BUG_ON(offset_in_page(size)); 2674db64fe02SNick Piggin BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2675b29acbdcSNick Piggin 267678a0e8c4SChristoph Hellwig flush_cache_vunmap(addr, addr + size); 2677b29acbdcSNick Piggin 2678db64fe02SNick Piggin order = get_order(size); 267978a0e8c4SChristoph Hellwig offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; 2680062eacf5SUladzislau Rezki (Sony) 2681fa1c77c1SUladzislau Rezki (Sony) xa = addr_to_vb_xa(addr); 2682062eacf5SUladzislau Rezki (Sony) vb = xa_load(xa, addr_to_vb_idx(addr)); 2683062eacf5SUladzislau Rezki (Sony) 2684d76f9954SBaoquan He spin_lock(&vb->lock); 2685d76f9954SBaoquan He bitmap_clear(vb->used_map, offset, (1UL << order)); 2686d76f9954SBaoquan He spin_unlock(&vb->lock); 2687db64fe02SNick Piggin 26884ad0ae8cSNicholas Piggin vunmap_range_noflush(addr, addr + size); 268964141da5SJeremy Fitzhardinge 26908e57f8acSVlastimil Babka if (debug_pagealloc_enabled_static()) 269178a0e8c4SChristoph Hellwig flush_tlb_kernel_range(addr, addr + size); 269282a2e924SChintan Pandya 2693db64fe02SNick Piggin spin_lock(&vb->lock); 26947d61bfe8SRoman Pen 2695a09fad96SThomas Gleixner /* Expand the not yet TLB flushed dirty range */ 26967d61bfe8SRoman Pen vb->dirty_min = min(vb->dirty_min, offset); 26977d61bfe8SRoman Pen vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 2698d086817dSMinChan Kim 26997f48121eSThomas Gleixner WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order)); 2700db64fe02SNick Piggin if (vb->dirty == VMAP_BBMAP_BITS) { 2701de560423SNick Piggin BUG_ON(vb->free); 2702db64fe02SNick Piggin spin_unlock(&vb->lock); 2703db64fe02SNick Piggin free_vmap_block(vb); 2704db64fe02SNick Piggin } else 2705db64fe02SNick Piggin spin_unlock(&vb->lock); 2706db64fe02SNick Piggin } 2707db64fe02SNick Piggin 2708868b104dSRick Edgecombe static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) 2709db64fe02SNick Piggin { 2710ca5e46c3SThomas Gleixner LIST_HEAD(purge_list); 2711db64fe02SNick Piggin int cpu; 2712db64fe02SNick Piggin 27139b463334SJeremy Fitzhardinge if (unlikely(!vmap_initialized)) 27149b463334SJeremy Fitzhardinge return; 27159b463334SJeremy Fitzhardinge 2716ca5e46c3SThomas Gleixner mutex_lock(&vmap_purge_lock); 27175803ed29SChristoph Hellwig 2718db64fe02SNick Piggin for_each_possible_cpu(cpu) { 2719db64fe02SNick Piggin struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 2720db64fe02SNick Piggin struct vmap_block *vb; 2721fc1e0d98SThomas Gleixner unsigned long idx; 2722db64fe02SNick Piggin 2723db64fe02SNick Piggin rcu_read_lock(); 2724fc1e0d98SThomas Gleixner xa_for_each(&vbq->vmap_blocks, idx, vb) { 2725db64fe02SNick Piggin spin_lock(&vb->lock); 2726ca5e46c3SThomas Gleixner 2727ca5e46c3SThomas Gleixner /* 2728ca5e46c3SThomas Gleixner * Try to purge a fragmented block first. If it's 2729ca5e46c3SThomas Gleixner * not purgeable, check whether there is dirty 2730ca5e46c3SThomas Gleixner * space to be flushed. 2731ca5e46c3SThomas Gleixner */ 273277e50af0SThomas Gleixner if (!purge_fragmented_block(vb, vbq, &purge_list, false) && 2733a09fad96SThomas Gleixner vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) { 27347d61bfe8SRoman Pen unsigned long va_start = vb->va->va_start; 2735db64fe02SNick Piggin unsigned long s, e; 2736b136be5eSJoonsoo Kim 27377d61bfe8SRoman Pen s = va_start + (vb->dirty_min << PAGE_SHIFT); 27387d61bfe8SRoman Pen e = va_start + (vb->dirty_max << PAGE_SHIFT); 2739db64fe02SNick Piggin 27407d61bfe8SRoman Pen start = min(s, start); 27417d61bfe8SRoman Pen end = max(e, end); 27427d61bfe8SRoman Pen 2743a09fad96SThomas Gleixner /* Prevent that this is flushed again */ 2744a09fad96SThomas Gleixner vb->dirty_min = VMAP_BBMAP_BITS; 2745a09fad96SThomas Gleixner vb->dirty_max = 0; 2746a09fad96SThomas Gleixner 2747db64fe02SNick Piggin flush = 1; 2748db64fe02SNick Piggin } 2749db64fe02SNick Piggin spin_unlock(&vb->lock); 2750db64fe02SNick Piggin } 2751db64fe02SNick Piggin rcu_read_unlock(); 2752db64fe02SNick Piggin } 2753ca5e46c3SThomas Gleixner free_purged_blocks(&purge_list); 2754db64fe02SNick Piggin 275572210662SUladzislau Rezki (Sony) if (!__purge_vmap_area_lazy(start, end, false) && flush) 27560574ecd1SChristoph Hellwig flush_tlb_kernel_range(start, end); 2757f9e09977SChristoph Hellwig mutex_unlock(&vmap_purge_lock); 2758db64fe02SNick Piggin } 2759868b104dSRick Edgecombe 2760868b104dSRick Edgecombe /** 2761868b104dSRick Edgecombe * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 2762868b104dSRick Edgecombe * 2763868b104dSRick Edgecombe * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 2764868b104dSRick Edgecombe * to amortize TLB flushing overheads. What this means is that any page you 2765868b104dSRick Edgecombe * have now, may, in a former life, have been mapped into kernel virtual 2766868b104dSRick Edgecombe * address by the vmap layer and so there might be some CPUs with TLB entries 2767868b104dSRick Edgecombe * still referencing that page (additional to the regular 1:1 kernel mapping). 2768868b104dSRick Edgecombe * 2769868b104dSRick Edgecombe * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 2770868b104dSRick Edgecombe * be sure that none of the pages we have control over will have any aliases 2771868b104dSRick Edgecombe * from the vmap layer. 2772868b104dSRick Edgecombe */ 2773868b104dSRick Edgecombe void vm_unmap_aliases(void) 2774868b104dSRick Edgecombe { 2775868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 2776868b104dSRick Edgecombe int flush = 0; 2777868b104dSRick Edgecombe 2778868b104dSRick Edgecombe _vm_unmap_aliases(start, end, flush); 2779868b104dSRick Edgecombe } 2780db64fe02SNick Piggin EXPORT_SYMBOL_GPL(vm_unmap_aliases); 2781db64fe02SNick Piggin 2782db64fe02SNick Piggin /** 2783db64fe02SNick Piggin * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 2784db64fe02SNick Piggin * @mem: the pointer returned by vm_map_ram 2785db64fe02SNick Piggin * @count: the count passed to that vm_map_ram call (cannot unmap partial) 2786db64fe02SNick Piggin */ 2787db64fe02SNick Piggin void vm_unmap_ram(const void *mem, unsigned int count) 2788db64fe02SNick Piggin { 278965ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 27904aff1dc4SAndrey Konovalov unsigned long addr = (unsigned long)kasan_reset_tag(mem); 27919c3acf60SChristoph Hellwig struct vmap_area *va; 2792db64fe02SNick Piggin 27935803ed29SChristoph Hellwig might_sleep(); 2794db64fe02SNick Piggin BUG_ON(!addr); 2795db64fe02SNick Piggin BUG_ON(addr < VMALLOC_START); 2796db64fe02SNick Piggin BUG_ON(addr > VMALLOC_END); 2797a1c0b1a0SShawn Lin BUG_ON(!PAGE_ALIGNED(addr)); 2798db64fe02SNick Piggin 2799d98c9e83SAndrey Ryabinin kasan_poison_vmalloc(mem, size); 2800d98c9e83SAndrey Ryabinin 28019c3acf60SChristoph Hellwig if (likely(count <= VMAP_MAX_ALLOC)) { 280205e3ff95SChintan Pandya debug_check_no_locks_freed(mem, size); 280378a0e8c4SChristoph Hellwig vb_free(addr, size); 28049c3acf60SChristoph Hellwig return; 28059c3acf60SChristoph Hellwig } 28069c3acf60SChristoph Hellwig 2807edd89818SUladzislau Rezki (Sony) va = find_unlink_vmap_area(addr); 280814687619SUladzislau Rezki (Sony) if (WARN_ON_ONCE(!va)) 280914687619SUladzislau Rezki (Sony) return; 281014687619SUladzislau Rezki (Sony) 281105e3ff95SChintan Pandya debug_check_no_locks_freed((void *)va->va_start, 281205e3ff95SChintan Pandya (va->va_end - va->va_start)); 28139c3acf60SChristoph Hellwig free_unmap_vmap_area(va); 2814db64fe02SNick Piggin } 2815db64fe02SNick Piggin EXPORT_SYMBOL(vm_unmap_ram); 2816db64fe02SNick Piggin 2817db64fe02SNick Piggin /** 2818db64fe02SNick Piggin * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 2819db64fe02SNick Piggin * @pages: an array of pointers to the pages to be mapped 2820db64fe02SNick Piggin * @count: number of pages 2821db64fe02SNick Piggin * @node: prefer to allocate data structures on this node 2822e99c97adSRandy Dunlap * 282336437638SGioh Kim * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 282436437638SGioh Kim * faster than vmap so it's good. But if you mix long-life and short-life 282536437638SGioh Kim * objects with vm_map_ram(), it could consume lots of address space through 282636437638SGioh Kim * fragmentation (especially on a 32bit machine). You could see failures in 282736437638SGioh Kim * the end. Please use this function for short-lived objects. 282836437638SGioh Kim * 2829e99c97adSRandy Dunlap * Returns: a pointer to the address that has been mapped, or %NULL on failure 2830db64fe02SNick Piggin */ 2831d4efd79aSChristoph Hellwig void *vm_map_ram(struct page **pages, unsigned int count, int node) 2832db64fe02SNick Piggin { 283365ee03c4SGuillermo Julián Moreno unsigned long size = (unsigned long)count << PAGE_SHIFT; 2834db64fe02SNick Piggin unsigned long addr; 2835db64fe02SNick Piggin void *mem; 2836db64fe02SNick Piggin 2837db64fe02SNick Piggin if (likely(count <= VMAP_MAX_ALLOC)) { 2838db64fe02SNick Piggin mem = vb_alloc(size, GFP_KERNEL); 2839db64fe02SNick Piggin if (IS_ERR(mem)) 2840db64fe02SNick Piggin return NULL; 2841db64fe02SNick Piggin addr = (unsigned long)mem; 2842db64fe02SNick Piggin } else { 2843db64fe02SNick Piggin struct vmap_area *va; 2844db64fe02SNick Piggin va = alloc_vmap_area(size, PAGE_SIZE, 2845869176a0SBaoquan He VMALLOC_START, VMALLOC_END, 2846869176a0SBaoquan He node, GFP_KERNEL, VMAP_RAM); 2847db64fe02SNick Piggin if (IS_ERR(va)) 2848db64fe02SNick Piggin return NULL; 2849db64fe02SNick Piggin 2850db64fe02SNick Piggin addr = va->va_start; 2851db64fe02SNick Piggin mem = (void *)addr; 2852db64fe02SNick Piggin } 2853d98c9e83SAndrey Ryabinin 2854b67177ecSNicholas Piggin if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, 2855b67177ecSNicholas Piggin pages, PAGE_SHIFT) < 0) { 2856db64fe02SNick Piggin vm_unmap_ram(mem, count); 2857db64fe02SNick Piggin return NULL; 2858db64fe02SNick Piggin } 2859b67177ecSNicholas Piggin 286023689e91SAndrey Konovalov /* 286123689e91SAndrey Konovalov * Mark the pages as accessible, now that they are mapped. 286223689e91SAndrey Konovalov * With hardware tag-based KASAN, marking is skipped for 286323689e91SAndrey Konovalov * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 286423689e91SAndrey Konovalov */ 2865f6e39794SAndrey Konovalov mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL); 286619f1c3acSAndrey Konovalov 2867db64fe02SNick Piggin return mem; 2868db64fe02SNick Piggin } 2869db64fe02SNick Piggin EXPORT_SYMBOL(vm_map_ram); 2870db64fe02SNick Piggin 28714341fa45SJoonsoo Kim static struct vm_struct *vmlist __initdata; 287292eac168SMike Rapoport 2873121e6f32SNicholas Piggin static inline unsigned int vm_area_page_order(struct vm_struct *vm) 2874121e6f32SNicholas Piggin { 2875121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 2876121e6f32SNicholas Piggin return vm->page_order; 2877121e6f32SNicholas Piggin #else 2878121e6f32SNicholas Piggin return 0; 2879121e6f32SNicholas Piggin #endif 2880121e6f32SNicholas Piggin } 2881121e6f32SNicholas Piggin 2882121e6f32SNicholas Piggin static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order) 2883121e6f32SNicholas Piggin { 2884121e6f32SNicholas Piggin #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 2885121e6f32SNicholas Piggin vm->page_order = order; 2886121e6f32SNicholas Piggin #else 2887121e6f32SNicholas Piggin BUG_ON(order != 0); 2888121e6f32SNicholas Piggin #endif 2889121e6f32SNicholas Piggin } 2890121e6f32SNicholas Piggin 2891f0aa6617STejun Heo /** 2892be9b7335SNicolas Pitre * vm_area_add_early - add vmap area early during boot 2893be9b7335SNicolas Pitre * @vm: vm_struct to add 2894be9b7335SNicolas Pitre * 2895be9b7335SNicolas Pitre * This function is used to add fixed kernel vm area to vmlist before 2896be9b7335SNicolas Pitre * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 2897be9b7335SNicolas Pitre * should contain proper values and the other fields should be zero. 2898be9b7335SNicolas Pitre * 2899be9b7335SNicolas Pitre * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 2900be9b7335SNicolas Pitre */ 2901be9b7335SNicolas Pitre void __init vm_area_add_early(struct vm_struct *vm) 2902be9b7335SNicolas Pitre { 2903be9b7335SNicolas Pitre struct vm_struct *tmp, **p; 2904be9b7335SNicolas Pitre 2905be9b7335SNicolas Pitre BUG_ON(vmap_initialized); 2906be9b7335SNicolas Pitre for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 2907be9b7335SNicolas Pitre if (tmp->addr >= vm->addr) { 2908be9b7335SNicolas Pitre BUG_ON(tmp->addr < vm->addr + vm->size); 2909be9b7335SNicolas Pitre break; 2910be9b7335SNicolas Pitre } else 2911be9b7335SNicolas Pitre BUG_ON(tmp->addr + tmp->size > vm->addr); 2912be9b7335SNicolas Pitre } 2913be9b7335SNicolas Pitre vm->next = *p; 2914be9b7335SNicolas Pitre *p = vm; 2915be9b7335SNicolas Pitre } 2916be9b7335SNicolas Pitre 2917be9b7335SNicolas Pitre /** 2918f0aa6617STejun Heo * vm_area_register_early - register vmap area early during boot 2919f0aa6617STejun Heo * @vm: vm_struct to register 2920c0c0a293STejun Heo * @align: requested alignment 2921f0aa6617STejun Heo * 2922f0aa6617STejun Heo * This function is used to register kernel vm area before 2923f0aa6617STejun Heo * vmalloc_init() is called. @vm->size and @vm->flags should contain 2924f0aa6617STejun Heo * proper values on entry and other fields should be zero. On return, 2925f0aa6617STejun Heo * vm->addr contains the allocated address. 2926f0aa6617STejun Heo * 2927f0aa6617STejun Heo * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 2928f0aa6617STejun Heo */ 2929c0c0a293STejun Heo void __init vm_area_register_early(struct vm_struct *vm, size_t align) 2930f0aa6617STejun Heo { 29310eb68437SKefeng Wang unsigned long addr = ALIGN(VMALLOC_START, align); 29320eb68437SKefeng Wang struct vm_struct *cur, **p; 2933f0aa6617STejun Heo 29340eb68437SKefeng Wang BUG_ON(vmap_initialized); 2935c0c0a293STejun Heo 29360eb68437SKefeng Wang for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) { 29370eb68437SKefeng Wang if ((unsigned long)cur->addr - addr >= vm->size) 29380eb68437SKefeng Wang break; 29390eb68437SKefeng Wang addr = ALIGN((unsigned long)cur->addr + cur->size, align); 29400eb68437SKefeng Wang } 29410eb68437SKefeng Wang 29420eb68437SKefeng Wang BUG_ON(addr > VMALLOC_END - vm->size); 2943c0c0a293STejun Heo vm->addr = (void *)addr; 29440eb68437SKefeng Wang vm->next = *p; 29450eb68437SKefeng Wang *p = vm; 29463252b1d8SKefeng Wang kasan_populate_early_vm_area_shadow(vm->addr, vm->size); 2947f0aa6617STejun Heo } 2948f0aa6617STejun Heo 2949e36176beSUladzislau Rezki (Sony) static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, 2950e36176beSUladzislau Rezki (Sony) struct vmap_area *va, unsigned long flags, const void *caller) 2951cf88c790STejun Heo { 2952cf88c790STejun Heo vm->flags = flags; 2953cf88c790STejun Heo vm->addr = (void *)va->va_start; 2954cf88c790STejun Heo vm->size = va->va_end - va->va_start; 2955cf88c790STejun Heo vm->caller = caller; 2956db1aecafSMinchan Kim va->vm = vm; 2957e36176beSUladzislau Rezki (Sony) } 2958e36176beSUladzislau Rezki (Sony) 2959e36176beSUladzislau Rezki (Sony) static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 2960e36176beSUladzislau Rezki (Sony) unsigned long flags, const void *caller) 2961e36176beSUladzislau Rezki (Sony) { 2962d0936029SUladzislau Rezki (Sony) struct vmap_node *vn = addr_to_node(va->va_start); 2963d0936029SUladzislau Rezki (Sony) 2964d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 2965e36176beSUladzislau Rezki (Sony) setup_vmalloc_vm_locked(vm, va, flags, caller); 2966d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 2967f5252e00SMitsuo Hayasaka } 2968cf88c790STejun Heo 296920fc02b4SZhang Yanfei static void clear_vm_uninitialized_flag(struct vm_struct *vm) 2970f5252e00SMitsuo Hayasaka { 2971d4033afdSJoonsoo Kim /* 297220fc02b4SZhang Yanfei * Before removing VM_UNINITIALIZED, 2973d4033afdSJoonsoo Kim * we should make sure that vm has proper values. 2974d4033afdSJoonsoo Kim * Pair with smp_rmb() in show_numa_info(). 2975d4033afdSJoonsoo Kim */ 2976d4033afdSJoonsoo Kim smp_wmb(); 297720fc02b4SZhang Yanfei vm->flags &= ~VM_UNINITIALIZED; 2978cf88c790STejun Heo } 2979cf88c790STejun Heo 2980db64fe02SNick Piggin static struct vm_struct *__get_vm_area_node(unsigned long size, 29817ca3027bSDaniel Axtens unsigned long align, unsigned long shift, unsigned long flags, 29827ca3027bSDaniel Axtens unsigned long start, unsigned long end, int node, 29837ca3027bSDaniel Axtens gfp_t gfp_mask, const void *caller) 2984db64fe02SNick Piggin { 29850006526dSKautuk Consul struct vmap_area *va; 2986db64fe02SNick Piggin struct vm_struct *area; 2987d98c9e83SAndrey Ryabinin unsigned long requested_size = size; 29881da177e4SLinus Torvalds 298952fd24caSGiridhar Pemmasani BUG_ON(in_interrupt()); 29907ca3027bSDaniel Axtens size = ALIGN(size, 1ul << shift); 299131be8309SOGAWA Hirofumi if (unlikely(!size)) 299231be8309SOGAWA Hirofumi return NULL; 29931da177e4SLinus Torvalds 2994252e5c6eSzijun_hu if (flags & VM_IOREMAP) 2995252e5c6eSzijun_hu align = 1ul << clamp_t(int, get_count_order_long(size), 2996252e5c6eSzijun_hu PAGE_SHIFT, IOREMAP_MAX_ORDER); 2997252e5c6eSzijun_hu 2998cf88c790STejun Heo area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 29991da177e4SLinus Torvalds if (unlikely(!area)) 30001da177e4SLinus Torvalds return NULL; 30011da177e4SLinus Torvalds 300271394fe5SAndrey Ryabinin if (!(flags & VM_NO_GUARD)) 30031da177e4SLinus Torvalds size += PAGE_SIZE; 30041da177e4SLinus Torvalds 3005869176a0SBaoquan He va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0); 3006db64fe02SNick Piggin if (IS_ERR(va)) { 3007db64fe02SNick Piggin kfree(area); 3008db64fe02SNick Piggin return NULL; 30091da177e4SLinus Torvalds } 30101da177e4SLinus Torvalds 3011d98c9e83SAndrey Ryabinin setup_vmalloc_vm(area, va, flags, caller); 30123c5c3cfbSDaniel Axtens 301319f1c3acSAndrey Konovalov /* 301419f1c3acSAndrey Konovalov * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a 301519f1c3acSAndrey Konovalov * best-effort approach, as they can be mapped outside of vmalloc code. 301619f1c3acSAndrey Konovalov * For VM_ALLOC mappings, the pages are marked as accessible after 301719f1c3acSAndrey Konovalov * getting mapped in __vmalloc_node_range(). 301823689e91SAndrey Konovalov * With hardware tag-based KASAN, marking is skipped for 301923689e91SAndrey Konovalov * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 302019f1c3acSAndrey Konovalov */ 302119f1c3acSAndrey Konovalov if (!(flags & VM_ALLOC)) 302223689e91SAndrey Konovalov area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, 3023f6e39794SAndrey Konovalov KASAN_VMALLOC_PROT_NORMAL); 30241d96320fSAndrey Konovalov 30251da177e4SLinus Torvalds return area; 30261da177e4SLinus Torvalds } 30271da177e4SLinus Torvalds 3028c2968612SBenjamin Herrenschmidt struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 3029c2968612SBenjamin Herrenschmidt unsigned long start, unsigned long end, 30305e6cafc8SMarek Szyprowski const void *caller) 3031c2968612SBenjamin Herrenschmidt { 30327ca3027bSDaniel Axtens return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end, 30337ca3027bSDaniel Axtens NUMA_NO_NODE, GFP_KERNEL, caller); 3034c2968612SBenjamin Herrenschmidt } 3035c2968612SBenjamin Herrenschmidt 30361da177e4SLinus Torvalds /** 3037183ff22bSSimon Arlott * get_vm_area - reserve a contiguous kernel virtual area 30381da177e4SLinus Torvalds * @size: size of the area 30391da177e4SLinus Torvalds * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 30401da177e4SLinus Torvalds * 30411da177e4SLinus Torvalds * Search an area of @size in the kernel virtual mapping area, 30421da177e4SLinus Torvalds * and reserved it for out purposes. Returns the area descriptor 30431da177e4SLinus Torvalds * on success or %NULL on failure. 3044a862f68aSMike Rapoport * 3045a862f68aSMike Rapoport * Return: the area descriptor on success or %NULL on failure. 30461da177e4SLinus Torvalds */ 30471da177e4SLinus Torvalds struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 30481da177e4SLinus Torvalds { 30497ca3027bSDaniel Axtens return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 30507ca3027bSDaniel Axtens VMALLOC_START, VMALLOC_END, 305100ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, 305200ef2d2fSDavid Rientjes __builtin_return_address(0)); 305323016969SChristoph Lameter } 305423016969SChristoph Lameter 305523016969SChristoph Lameter struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 30565e6cafc8SMarek Szyprowski const void *caller) 305723016969SChristoph Lameter { 30587ca3027bSDaniel Axtens return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 30597ca3027bSDaniel Axtens VMALLOC_START, VMALLOC_END, 306000ef2d2fSDavid Rientjes NUMA_NO_NODE, GFP_KERNEL, caller); 30611da177e4SLinus Torvalds } 30621da177e4SLinus Torvalds 3063e9da6e99SMarek Szyprowski /** 3064e9da6e99SMarek Szyprowski * find_vm_area - find a continuous kernel virtual area 3065e9da6e99SMarek Szyprowski * @addr: base address 3066e9da6e99SMarek Szyprowski * 3067e9da6e99SMarek Szyprowski * Search for the kernel VM area starting at @addr, and return it. 3068e9da6e99SMarek Szyprowski * It is up to the caller to do all required locking to keep the returned 3069e9da6e99SMarek Szyprowski * pointer valid. 3070a862f68aSMike Rapoport * 307174640617SHui Su * Return: the area descriptor on success or %NULL on failure. 3072e9da6e99SMarek Szyprowski */ 3073e9da6e99SMarek Szyprowski struct vm_struct *find_vm_area(const void *addr) 307483342314SNick Piggin { 3075db64fe02SNick Piggin struct vmap_area *va; 307683342314SNick Piggin 3077db64fe02SNick Piggin va = find_vmap_area((unsigned long)addr); 3078688fcbfcSPengfei Li if (!va) 30797856dfebSAndi Kleen return NULL; 3080688fcbfcSPengfei Li 3081688fcbfcSPengfei Li return va->vm; 30827856dfebSAndi Kleen } 30837856dfebSAndi Kleen 30841da177e4SLinus Torvalds /** 3085183ff22bSSimon Arlott * remove_vm_area - find and remove a continuous kernel virtual area 30861da177e4SLinus Torvalds * @addr: base address 30871da177e4SLinus Torvalds * 30881da177e4SLinus Torvalds * Search for the kernel VM area starting at @addr, and remove it. 30891da177e4SLinus Torvalds * This function returns the found VM area, but using it is NOT safe 30907856dfebSAndi Kleen * on SMP machines, except for its size or flags. 3091a862f68aSMike Rapoport * 309274640617SHui Su * Return: the area descriptor on success or %NULL on failure. 30931da177e4SLinus Torvalds */ 3094b3bdda02SChristoph Lameter struct vm_struct *remove_vm_area(const void *addr) 30951da177e4SLinus Torvalds { 3096db64fe02SNick Piggin struct vmap_area *va; 309775c59ce7SChristoph Hellwig struct vm_struct *vm; 3098db64fe02SNick Piggin 30995803ed29SChristoph Hellwig might_sleep(); 31005803ed29SChristoph Hellwig 310117d3ef43SChristoph Hellwig if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 310217d3ef43SChristoph Hellwig addr)) 3103db64fe02SNick Piggin return NULL; 310417d3ef43SChristoph Hellwig 310575c59ce7SChristoph Hellwig va = find_unlink_vmap_area((unsigned long)addr); 310675c59ce7SChristoph Hellwig if (!va || !va->vm) 310775c59ce7SChristoph Hellwig return NULL; 310875c59ce7SChristoph Hellwig vm = va->vm; 310917d3ef43SChristoph Hellwig 311017d3ef43SChristoph Hellwig debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm)); 311117d3ef43SChristoph Hellwig debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm)); 311275c59ce7SChristoph Hellwig kasan_free_module_shadow(vm); 311317d3ef43SChristoph Hellwig kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm)); 311417d3ef43SChristoph Hellwig 311575c59ce7SChristoph Hellwig free_unmap_vmap_area(va); 311675c59ce7SChristoph Hellwig return vm; 31171da177e4SLinus Torvalds } 31181da177e4SLinus Torvalds 3119868b104dSRick Edgecombe static inline void set_area_direct_map(const struct vm_struct *area, 3120868b104dSRick Edgecombe int (*set_direct_map)(struct page *page)) 3121868b104dSRick Edgecombe { 3122868b104dSRick Edgecombe int i; 3123868b104dSRick Edgecombe 3124121e6f32SNicholas Piggin /* HUGE_VMALLOC passes small pages to set_direct_map */ 3125868b104dSRick Edgecombe for (i = 0; i < area->nr_pages; i++) 3126868b104dSRick Edgecombe if (page_address(area->pages[i])) 3127868b104dSRick Edgecombe set_direct_map(area->pages[i]); 3128868b104dSRick Edgecombe } 3129868b104dSRick Edgecombe 31309e5fa0aeSChristoph Hellwig /* 31319e5fa0aeSChristoph Hellwig * Flush the vm mapping and reset the direct map. 31329e5fa0aeSChristoph Hellwig */ 31339e5fa0aeSChristoph Hellwig static void vm_reset_perms(struct vm_struct *area) 3134868b104dSRick Edgecombe { 3135868b104dSRick Edgecombe unsigned long start = ULONG_MAX, end = 0; 3136121e6f32SNicholas Piggin unsigned int page_order = vm_area_page_order(area); 313731e67340SRick Edgecombe int flush_dmap = 0; 3138868b104dSRick Edgecombe int i; 3139868b104dSRick Edgecombe 3140868b104dSRick Edgecombe /* 31419e5fa0aeSChristoph Hellwig * Find the start and end range of the direct mappings to make sure that 3142868b104dSRick Edgecombe * the vm_unmap_aliases() flush includes the direct map. 3143868b104dSRick Edgecombe */ 3144121e6f32SNicholas Piggin for (i = 0; i < area->nr_pages; i += 1U << page_order) { 31458e41f872SRick Edgecombe unsigned long addr = (unsigned long)page_address(area->pages[i]); 31469e5fa0aeSChristoph Hellwig 31478e41f872SRick Edgecombe if (addr) { 3148121e6f32SNicholas Piggin unsigned long page_size; 3149121e6f32SNicholas Piggin 3150121e6f32SNicholas Piggin page_size = PAGE_SIZE << page_order; 3151868b104dSRick Edgecombe start = min(addr, start); 3152121e6f32SNicholas Piggin end = max(addr + page_size, end); 315331e67340SRick Edgecombe flush_dmap = 1; 3154868b104dSRick Edgecombe } 3155868b104dSRick Edgecombe } 3156868b104dSRick Edgecombe 3157868b104dSRick Edgecombe /* 3158868b104dSRick Edgecombe * Set direct map to something invalid so that it won't be cached if 3159868b104dSRick Edgecombe * there are any accesses after the TLB flush, then flush the TLB and 3160868b104dSRick Edgecombe * reset the direct map permissions to the default. 3161868b104dSRick Edgecombe */ 3162868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_invalid_noflush); 316331e67340SRick Edgecombe _vm_unmap_aliases(start, end, flush_dmap); 3164868b104dSRick Edgecombe set_area_direct_map(area, set_direct_map_default_noflush); 3165868b104dSRick Edgecombe } 3166868b104dSRick Edgecombe 3167208162f4SChristoph Hellwig static void delayed_vfree_work(struct work_struct *w) 31681da177e4SLinus Torvalds { 3169208162f4SChristoph Hellwig struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 3170208162f4SChristoph Hellwig struct llist_node *t, *llnode; 31711da177e4SLinus Torvalds 3172208162f4SChristoph Hellwig llist_for_each_safe(llnode, t, llist_del_all(&p->list)) 31735d3d31d6SChristoph Hellwig vfree(llnode); 3174bf22e37aSAndrey Ryabinin } 3175bf22e37aSAndrey Ryabinin 3176bf22e37aSAndrey Ryabinin /** 3177bf22e37aSAndrey Ryabinin * vfree_atomic - release memory allocated by vmalloc() 3178bf22e37aSAndrey Ryabinin * @addr: memory base address 3179bf22e37aSAndrey Ryabinin * 3180bf22e37aSAndrey Ryabinin * This one is just like vfree() but can be called in any atomic context 3181bf22e37aSAndrey Ryabinin * except NMIs. 3182bf22e37aSAndrey Ryabinin */ 3183bf22e37aSAndrey Ryabinin void vfree_atomic(const void *addr) 3184bf22e37aSAndrey Ryabinin { 318501e2e839SChristoph Hellwig struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 3186bf22e37aSAndrey Ryabinin 318701e2e839SChristoph Hellwig BUG_ON(in_nmi()); 3188bf22e37aSAndrey Ryabinin kmemleak_free(addr); 3189bf22e37aSAndrey Ryabinin 319001e2e839SChristoph Hellwig /* 319101e2e839SChristoph Hellwig * Use raw_cpu_ptr() because this can be called from preemptible 319201e2e839SChristoph Hellwig * context. Preemption is absolutely fine here, because the llist_add() 319301e2e839SChristoph Hellwig * implementation is lockless, so it works even if we are adding to 319401e2e839SChristoph Hellwig * another cpu's list. schedule_work() should be fine with this too. 319501e2e839SChristoph Hellwig */ 319601e2e839SChristoph Hellwig if (addr && llist_add((struct llist_node *)addr, &p->list)) 319701e2e839SChristoph Hellwig schedule_work(&p->wq); 3198c67dc624SRoman Penyaev } 3199c67dc624SRoman Penyaev 32001da177e4SLinus Torvalds /** 3201fa307474SMatthew Wilcox (Oracle) * vfree - Release memory allocated by vmalloc() 3202fa307474SMatthew Wilcox (Oracle) * @addr: Memory base address 32031da177e4SLinus Torvalds * 3204fa307474SMatthew Wilcox (Oracle) * Free the virtually continuous memory area starting at @addr, as obtained 3205fa307474SMatthew Wilcox (Oracle) * from one of the vmalloc() family of APIs. This will usually also free the 3206fa307474SMatthew Wilcox (Oracle) * physical memory underlying the virtual allocation, but that memory is 3207fa307474SMatthew Wilcox (Oracle) * reference counted, so it will not be freed until the last user goes away. 32081da177e4SLinus Torvalds * 3209fa307474SMatthew Wilcox (Oracle) * If @addr is NULL, no operation is performed. 321032fcfd40SAl Viro * 3211fa307474SMatthew Wilcox (Oracle) * Context: 32123ca4ea3aSAndrey Ryabinin * May sleep if called *not* from interrupt context. 3213fa307474SMatthew Wilcox (Oracle) * Must not be called in NMI context (strictly speaking, it could be 3214fa307474SMatthew Wilcox (Oracle) * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 3215f0953a1bSIngo Molnar * conventions for vfree() arch-dependent would be a really bad idea). 32161da177e4SLinus Torvalds */ 3217b3bdda02SChristoph Lameter void vfree(const void *addr) 32181da177e4SLinus Torvalds { 321979311c1fSChristoph Hellwig struct vm_struct *vm; 322079311c1fSChristoph Hellwig int i; 322179311c1fSChristoph Hellwig 322201e2e839SChristoph Hellwig if (unlikely(in_interrupt())) { 322301e2e839SChristoph Hellwig vfree_atomic(addr); 322432fcfd40SAl Viro return; 322501e2e839SChristoph Hellwig } 322601e2e839SChristoph Hellwig 32271da177e4SLinus Torvalds BUG_ON(in_nmi()); 322889219d37SCatalin Marinas kmemleak_free(addr); 322901e2e839SChristoph Hellwig might_sleep(); 323032fcfd40SAl Viro 3231bf22e37aSAndrey Ryabinin if (!addr) 3232bf22e37aSAndrey Ryabinin return; 3233c67dc624SRoman Penyaev 323479311c1fSChristoph Hellwig vm = remove_vm_area(addr); 323579311c1fSChristoph Hellwig if (unlikely(!vm)) { 323679311c1fSChristoph Hellwig WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 323779311c1fSChristoph Hellwig addr); 323879311c1fSChristoph Hellwig return; 323979311c1fSChristoph Hellwig } 324079311c1fSChristoph Hellwig 32419e5fa0aeSChristoph Hellwig if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS)) 32429e5fa0aeSChristoph Hellwig vm_reset_perms(vm); 324379311c1fSChristoph Hellwig for (i = 0; i < vm->nr_pages; i++) { 324479311c1fSChristoph Hellwig struct page *page = vm->pages[i]; 324579311c1fSChristoph Hellwig 324679311c1fSChristoph Hellwig BUG_ON(!page); 324779311c1fSChristoph Hellwig mod_memcg_page_state(page, MEMCG_VMALLOC, -1); 324879311c1fSChristoph Hellwig /* 324979311c1fSChristoph Hellwig * High-order allocs for huge vmallocs are split, so 325079311c1fSChristoph Hellwig * can be freed as an array of order-0 allocations 325179311c1fSChristoph Hellwig */ 3252dcc1be11SLorenzo Stoakes __free_page(page); 325379311c1fSChristoph Hellwig cond_resched(); 325479311c1fSChristoph Hellwig } 325579311c1fSChristoph Hellwig atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages); 325679311c1fSChristoph Hellwig kvfree(vm->pages); 325779311c1fSChristoph Hellwig kfree(vm); 32581da177e4SLinus Torvalds } 32591da177e4SLinus Torvalds EXPORT_SYMBOL(vfree); 32601da177e4SLinus Torvalds 32611da177e4SLinus Torvalds /** 32621da177e4SLinus Torvalds * vunmap - release virtual mapping obtained by vmap() 32631da177e4SLinus Torvalds * @addr: memory base address 32641da177e4SLinus Torvalds * 32651da177e4SLinus Torvalds * Free the virtually contiguous memory area starting at @addr, 32661da177e4SLinus Torvalds * which was created from the page array passed to vmap(). 32671da177e4SLinus Torvalds * 326880e93effSPekka Enberg * Must not be called in interrupt context. 32691da177e4SLinus Torvalds */ 3270b3bdda02SChristoph Lameter void vunmap(const void *addr) 32711da177e4SLinus Torvalds { 327279311c1fSChristoph Hellwig struct vm_struct *vm; 327379311c1fSChristoph Hellwig 32741da177e4SLinus Torvalds BUG_ON(in_interrupt()); 327534754b69SPeter Zijlstra might_sleep(); 327679311c1fSChristoph Hellwig 327779311c1fSChristoph Hellwig if (!addr) 327879311c1fSChristoph Hellwig return; 327979311c1fSChristoph Hellwig vm = remove_vm_area(addr); 328079311c1fSChristoph Hellwig if (unlikely(!vm)) { 328179311c1fSChristoph Hellwig WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n", 328279311c1fSChristoph Hellwig addr); 328379311c1fSChristoph Hellwig return; 328479311c1fSChristoph Hellwig } 328579311c1fSChristoph Hellwig kfree(vm); 32861da177e4SLinus Torvalds } 32871da177e4SLinus Torvalds EXPORT_SYMBOL(vunmap); 32881da177e4SLinus Torvalds 32891da177e4SLinus Torvalds /** 32901da177e4SLinus Torvalds * vmap - map an array of pages into virtually contiguous space 32911da177e4SLinus Torvalds * @pages: array of page pointers 32921da177e4SLinus Torvalds * @count: number of pages to map 32931da177e4SLinus Torvalds * @flags: vm_area->flags 32941da177e4SLinus Torvalds * @prot: page protection for the mapping 32951da177e4SLinus Torvalds * 3296b944afc9SChristoph Hellwig * Maps @count pages from @pages into contiguous kernel virtual space. 3297b944afc9SChristoph Hellwig * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself 3298b944afc9SChristoph Hellwig * (which must be kmalloc or vmalloc memory) and one reference per pages in it 3299b944afc9SChristoph Hellwig * are transferred from the caller to vmap(), and will be freed / dropped when 3300b944afc9SChristoph Hellwig * vfree() is called on the return value. 3301a862f68aSMike Rapoport * 3302a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 33031da177e4SLinus Torvalds */ 33041da177e4SLinus Torvalds void *vmap(struct page **pages, unsigned int count, 33051da177e4SLinus Torvalds unsigned long flags, pgprot_t prot) 33061da177e4SLinus Torvalds { 33071da177e4SLinus Torvalds struct vm_struct *area; 3308b67177ecSNicholas Piggin unsigned long addr; 330965ee03c4SGuillermo Julián Moreno unsigned long size; /* In bytes */ 33101da177e4SLinus Torvalds 331134754b69SPeter Zijlstra might_sleep(); 331234754b69SPeter Zijlstra 331337f3605eSChristoph Hellwig if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS)) 331437f3605eSChristoph Hellwig return NULL; 331537f3605eSChristoph Hellwig 3316bd1a8fb2SPeter Zijlstra /* 3317bd1a8fb2SPeter Zijlstra * Your top guard is someone else's bottom guard. Not having a top 3318bd1a8fb2SPeter Zijlstra * guard compromises someone else's mappings too. 3319bd1a8fb2SPeter Zijlstra */ 3320bd1a8fb2SPeter Zijlstra if (WARN_ON_ONCE(flags & VM_NO_GUARD)) 3321bd1a8fb2SPeter Zijlstra flags &= ~VM_NO_GUARD; 3322bd1a8fb2SPeter Zijlstra 3323ca79b0c2SArun KS if (count > totalram_pages()) 33241da177e4SLinus Torvalds return NULL; 33251da177e4SLinus Torvalds 332665ee03c4SGuillermo Julián Moreno size = (unsigned long)count << PAGE_SHIFT; 332765ee03c4SGuillermo Julián Moreno area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 33281da177e4SLinus Torvalds if (!area) 33291da177e4SLinus Torvalds return NULL; 333023016969SChristoph Lameter 3331b67177ecSNicholas Piggin addr = (unsigned long)area->addr; 3332b67177ecSNicholas Piggin if (vmap_pages_range(addr, addr + size, pgprot_nx(prot), 3333b67177ecSNicholas Piggin pages, PAGE_SHIFT) < 0) { 33341da177e4SLinus Torvalds vunmap(area->addr); 33351da177e4SLinus Torvalds return NULL; 33361da177e4SLinus Torvalds } 33371da177e4SLinus Torvalds 3338c22ee528SMiaohe Lin if (flags & VM_MAP_PUT_PAGES) { 3339b944afc9SChristoph Hellwig area->pages = pages; 3340c22ee528SMiaohe Lin area->nr_pages = count; 3341c22ee528SMiaohe Lin } 33421da177e4SLinus Torvalds return area->addr; 33431da177e4SLinus Torvalds } 33441da177e4SLinus Torvalds EXPORT_SYMBOL(vmap); 33451da177e4SLinus Torvalds 33463e9a9e25SChristoph Hellwig #ifdef CONFIG_VMAP_PFN 33473e9a9e25SChristoph Hellwig struct vmap_pfn_data { 33483e9a9e25SChristoph Hellwig unsigned long *pfns; 33493e9a9e25SChristoph Hellwig pgprot_t prot; 33503e9a9e25SChristoph Hellwig unsigned int idx; 33513e9a9e25SChristoph Hellwig }; 33523e9a9e25SChristoph Hellwig 33533e9a9e25SChristoph Hellwig static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) 33543e9a9e25SChristoph Hellwig { 33553e9a9e25SChristoph Hellwig struct vmap_pfn_data *data = private; 3356b3f78e74SRyan Roberts unsigned long pfn = data->pfns[data->idx]; 3357b3f78e74SRyan Roberts pte_t ptent; 33583e9a9e25SChristoph Hellwig 3359b3f78e74SRyan Roberts if (WARN_ON_ONCE(pfn_valid(pfn))) 33603e9a9e25SChristoph Hellwig return -EINVAL; 3361b3f78e74SRyan Roberts 3362b3f78e74SRyan Roberts ptent = pte_mkspecial(pfn_pte(pfn, data->prot)); 3363b3f78e74SRyan Roberts set_pte_at(&init_mm, addr, pte, ptent); 3364b3f78e74SRyan Roberts 3365b3f78e74SRyan Roberts data->idx++; 33663e9a9e25SChristoph Hellwig return 0; 33673e9a9e25SChristoph Hellwig } 33683e9a9e25SChristoph Hellwig 33693e9a9e25SChristoph Hellwig /** 33703e9a9e25SChristoph Hellwig * vmap_pfn - map an array of PFNs into virtually contiguous space 33713e9a9e25SChristoph Hellwig * @pfns: array of PFNs 33723e9a9e25SChristoph Hellwig * @count: number of pages to map 33733e9a9e25SChristoph Hellwig * @prot: page protection for the mapping 33743e9a9e25SChristoph Hellwig * 33753e9a9e25SChristoph Hellwig * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns 33763e9a9e25SChristoph Hellwig * the start address of the mapping. 33773e9a9e25SChristoph Hellwig */ 33783e9a9e25SChristoph Hellwig void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) 33793e9a9e25SChristoph Hellwig { 33803e9a9e25SChristoph Hellwig struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) }; 33813e9a9e25SChristoph Hellwig struct vm_struct *area; 33823e9a9e25SChristoph Hellwig 33833e9a9e25SChristoph Hellwig area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, 33843e9a9e25SChristoph Hellwig __builtin_return_address(0)); 33853e9a9e25SChristoph Hellwig if (!area) 33863e9a9e25SChristoph Hellwig return NULL; 33873e9a9e25SChristoph Hellwig if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 33883e9a9e25SChristoph Hellwig count * PAGE_SIZE, vmap_pfn_apply, &data)) { 33893e9a9e25SChristoph Hellwig free_vm_area(area); 33903e9a9e25SChristoph Hellwig return NULL; 33913e9a9e25SChristoph Hellwig } 3392a50420c7SAlexandre Ghiti 3393a50420c7SAlexandre Ghiti flush_cache_vmap((unsigned long)area->addr, 3394a50420c7SAlexandre Ghiti (unsigned long)area->addr + count * PAGE_SIZE); 3395a50420c7SAlexandre Ghiti 33963e9a9e25SChristoph Hellwig return area->addr; 33973e9a9e25SChristoph Hellwig } 33983e9a9e25SChristoph Hellwig EXPORT_SYMBOL_GPL(vmap_pfn); 33993e9a9e25SChristoph Hellwig #endif /* CONFIG_VMAP_PFN */ 34003e9a9e25SChristoph Hellwig 340112b9f873SUladzislau Rezki static inline unsigned int 340212b9f873SUladzislau Rezki vm_area_alloc_pages(gfp_t gfp, int nid, 3403343ab817SUladzislau Rezki (Sony) unsigned int order, unsigned int nr_pages, struct page **pages) 340412b9f873SUladzislau Rezki { 340512b9f873SUladzislau Rezki unsigned int nr_allocated = 0; 3406e9c3cda4SMichal Hocko gfp_t alloc_gfp = gfp; 3407e9c3cda4SMichal Hocko bool nofail = false; 3408ffb29b1cSChen Wandun struct page *page; 3409ffb29b1cSChen Wandun int i; 341012b9f873SUladzislau Rezki 341112b9f873SUladzislau Rezki /* 341212b9f873SUladzislau Rezki * For order-0 pages we make use of bulk allocator, if 341312b9f873SUladzislau Rezki * the page array is partly or not at all populated due 341412b9f873SUladzislau Rezki * to fails, fallback to a single page allocator that is 341512b9f873SUladzislau Rezki * more permissive. 341612b9f873SUladzislau Rezki */ 3417c00b6b96SChen Wandun if (!order) { 3418e9c3cda4SMichal Hocko /* bulk allocator doesn't support nofail req. officially */ 34199376130cSMichal Hocko gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL; 34209376130cSMichal Hocko 3421343ab817SUladzislau Rezki (Sony) while (nr_allocated < nr_pages) { 3422343ab817SUladzislau Rezki (Sony) unsigned int nr, nr_pages_request; 3423343ab817SUladzislau Rezki (Sony) 3424343ab817SUladzislau Rezki (Sony) /* 3425343ab817SUladzislau Rezki (Sony) * A maximum allowed request is hard-coded and is 100 3426343ab817SUladzislau Rezki (Sony) * pages per call. That is done in order to prevent a 3427343ab817SUladzislau Rezki (Sony) * long preemption off scenario in the bulk-allocator 3428343ab817SUladzislau Rezki (Sony) * so the range is [1:100]. 3429343ab817SUladzislau Rezki (Sony) */ 3430343ab817SUladzislau Rezki (Sony) nr_pages_request = min(100U, nr_pages - nr_allocated); 3431343ab817SUladzislau Rezki (Sony) 3432c00b6b96SChen Wandun /* memory allocation should consider mempolicy, we can't 3433c00b6b96SChen Wandun * wrongly use nearest node when nid == NUMA_NO_NODE, 3434c00b6b96SChen Wandun * otherwise memory may be allocated in only one node, 343598af39d5SYixuan Cao * but mempolicy wants to alloc memory by interleaving. 3436c00b6b96SChen Wandun */ 3437c00b6b96SChen Wandun if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE) 34389376130cSMichal Hocko nr = alloc_pages_bulk_array_mempolicy(bulk_gfp, 3439c00b6b96SChen Wandun nr_pages_request, 3440c00b6b96SChen Wandun pages + nr_allocated); 3441c00b6b96SChen Wandun 3442c00b6b96SChen Wandun else 34439376130cSMichal Hocko nr = alloc_pages_bulk_array_node(bulk_gfp, nid, 3444c00b6b96SChen Wandun nr_pages_request, 3445c00b6b96SChen Wandun pages + nr_allocated); 3446343ab817SUladzislau Rezki (Sony) 3447343ab817SUladzislau Rezki (Sony) nr_allocated += nr; 3448343ab817SUladzislau Rezki (Sony) cond_resched(); 3449343ab817SUladzislau Rezki (Sony) 3450343ab817SUladzislau Rezki (Sony) /* 3451343ab817SUladzislau Rezki (Sony) * If zero or pages were obtained partly, 3452343ab817SUladzislau Rezki (Sony) * fallback to a single page allocator. 3453343ab817SUladzislau Rezki (Sony) */ 3454343ab817SUladzislau Rezki (Sony) if (nr != nr_pages_request) 3455343ab817SUladzislau Rezki (Sony) break; 3456343ab817SUladzislau Rezki (Sony) } 3457e9c3cda4SMichal Hocko } else if (gfp & __GFP_NOFAIL) { 3458e9c3cda4SMichal Hocko /* 3459e9c3cda4SMichal Hocko * Higher order nofail allocations are really expensive and 3460e9c3cda4SMichal Hocko * potentially dangerous (pre-mature OOM, disruptive reclaim 3461e9c3cda4SMichal Hocko * and compaction etc. 3462e9c3cda4SMichal Hocko */ 3463e9c3cda4SMichal Hocko alloc_gfp &= ~__GFP_NOFAIL; 3464e9c3cda4SMichal Hocko nofail = true; 34653b8000aeSNicholas Piggin } 346612b9f873SUladzislau Rezki 346712b9f873SUladzislau Rezki /* High-order pages or fallback path if "bulk" fails. */ 3468ffb29b1cSChen Wandun while (nr_allocated < nr_pages) { 3469dd544141SVasily Averin if (fatal_signal_pending(current)) 3470dd544141SVasily Averin break; 3471dd544141SVasily Averin 3472ffb29b1cSChen Wandun if (nid == NUMA_NO_NODE) 3473e9c3cda4SMichal Hocko page = alloc_pages(alloc_gfp, order); 3474ffb29b1cSChen Wandun else 3475e9c3cda4SMichal Hocko page = alloc_pages_node(nid, alloc_gfp, order); 3476e9c3cda4SMichal Hocko if (unlikely(!page)) { 3477e9c3cda4SMichal Hocko if (!nofail) 347812b9f873SUladzislau Rezki break; 3479e9c3cda4SMichal Hocko 3480e9c3cda4SMichal Hocko /* fall back to the zero order allocations */ 3481e9c3cda4SMichal Hocko alloc_gfp |= __GFP_NOFAIL; 3482e9c3cda4SMichal Hocko order = 0; 3483e9c3cda4SMichal Hocko continue; 3484e9c3cda4SMichal Hocko } 3485e9c3cda4SMichal Hocko 34863b8000aeSNicholas Piggin /* 34873b8000aeSNicholas Piggin * Higher order allocations must be able to be treated as 34883b8000aeSNicholas Piggin * indepdenent small pages by callers (as they can with 34893b8000aeSNicholas Piggin * small-page vmallocs). Some drivers do their own refcounting 34903b8000aeSNicholas Piggin * on vmalloc_to_page() pages, some use page->mapping, 34913b8000aeSNicholas Piggin * page->lru, etc. 34923b8000aeSNicholas Piggin */ 34933b8000aeSNicholas Piggin if (order) 34943b8000aeSNicholas Piggin split_page(page, order); 349512b9f873SUladzislau Rezki 349612b9f873SUladzislau Rezki /* 349712b9f873SUladzislau Rezki * Careful, we allocate and map page-order pages, but 349812b9f873SUladzislau Rezki * tracking is done per PAGE_SIZE page so as to keep the 349912b9f873SUladzislau Rezki * vm_struct APIs independent of the physical/mapped size. 350012b9f873SUladzislau Rezki */ 350112b9f873SUladzislau Rezki for (i = 0; i < (1U << order); i++) 350212b9f873SUladzislau Rezki pages[nr_allocated + i] = page + i; 350312b9f873SUladzislau Rezki 350412b9f873SUladzislau Rezki cond_resched(); 350512b9f873SUladzislau Rezki nr_allocated += 1U << order; 350612b9f873SUladzislau Rezki } 350712b9f873SUladzislau Rezki 350812b9f873SUladzislau Rezki return nr_allocated; 350912b9f873SUladzislau Rezki } 351012b9f873SUladzislau Rezki 3511e31d9eb5SAdrian Bunk static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 3512121e6f32SNicholas Piggin pgprot_t prot, unsigned int page_shift, 3513121e6f32SNicholas Piggin int node) 35141da177e4SLinus Torvalds { 3515930f036bSDavid Rientjes const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 35169376130cSMichal Hocko bool nofail = gfp_mask & __GFP_NOFAIL; 3517121e6f32SNicholas Piggin unsigned long addr = (unsigned long)area->addr; 3518121e6f32SNicholas Piggin unsigned long size = get_vm_area_size(area); 351934fe6537SAndrew Morton unsigned long array_size; 3520121e6f32SNicholas Piggin unsigned int nr_small_pages = size >> PAGE_SHIFT; 3521121e6f32SNicholas Piggin unsigned int page_order; 3522451769ebSMichal Hocko unsigned int flags; 3523451769ebSMichal Hocko int ret; 35241da177e4SLinus Torvalds 3525121e6f32SNicholas Piggin array_size = (unsigned long)nr_small_pages * sizeof(struct page *); 352680b1d8fdSLorenzo Stoakes 3527f255935bSChristoph Hellwig if (!(gfp_mask & (GFP_DMA | GFP_DMA32))) 3528f255935bSChristoph Hellwig gfp_mask |= __GFP_HIGHMEM; 35291da177e4SLinus Torvalds 35301da177e4SLinus Torvalds /* Please note that the recursion is strictly bounded. */ 35318757d5faSJan Kiszka if (array_size > PAGE_SIZE) { 35325c1f4e69SUladzislau Rezki (Sony) area->pages = __vmalloc_node(array_size, 1, nested_gfp, node, 3533f255935bSChristoph Hellwig area->caller); 3534286e1ea3SAndrew Morton } else { 35355c1f4e69SUladzislau Rezki (Sony) area->pages = kmalloc_node(array_size, nested_gfp, node); 3536286e1ea3SAndrew Morton } 35377ea36242SAustin Kim 35385c1f4e69SUladzislau Rezki (Sony) if (!area->pages) { 3539c3d77172SUladzislau Rezki (Sony) warn_alloc(gfp_mask, NULL, 3540f4bdfeafSUladzislau Rezki (Sony) "vmalloc error: size %lu, failed to allocated page array size %lu", 3541d70bec8cSNicholas Piggin nr_small_pages * PAGE_SIZE, array_size); 3542cd61413bSUladzislau Rezki (Sony) free_vm_area(area); 35431da177e4SLinus Torvalds return NULL; 35441da177e4SLinus Torvalds } 35451da177e4SLinus Torvalds 3546121e6f32SNicholas Piggin set_vm_area_page_order(area, page_shift - PAGE_SHIFT); 3547121e6f32SNicholas Piggin page_order = vm_area_page_order(area); 3548121e6f32SNicholas Piggin 3549c3d77172SUladzislau Rezki (Sony) area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN, 3550c3d77172SUladzislau Rezki (Sony) node, page_order, nr_small_pages, area->pages); 35515c1f4e69SUladzislau Rezki (Sony) 355297105f0aSRoman Gushchin atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 35534e5aa1f4SShakeel Butt if (gfp_mask & __GFP_ACCOUNT) { 35543b8000aeSNicholas Piggin int i; 35554e5aa1f4SShakeel Butt 35563b8000aeSNicholas Piggin for (i = 0; i < area->nr_pages; i++) 35573b8000aeSNicholas Piggin mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1); 35584e5aa1f4SShakeel Butt } 35595c1f4e69SUladzislau Rezki (Sony) 35605c1f4e69SUladzislau Rezki (Sony) /* 35615c1f4e69SUladzislau Rezki (Sony) * If not enough pages were obtained to accomplish an 3562f41f036bSChristoph Hellwig * allocation request, free them via vfree() if any. 35635c1f4e69SUladzislau Rezki (Sony) */ 35645c1f4e69SUladzislau Rezki (Sony) if (area->nr_pages != nr_small_pages) { 356595a301eeSLorenzo Stoakes /* 356695a301eeSLorenzo Stoakes * vm_area_alloc_pages() can fail due to insufficient memory but 356795a301eeSLorenzo Stoakes * also:- 356895a301eeSLorenzo Stoakes * 356995a301eeSLorenzo Stoakes * - a pending fatal signal 357095a301eeSLorenzo Stoakes * - insufficient huge page-order pages 357195a301eeSLorenzo Stoakes * 357295a301eeSLorenzo Stoakes * Since we always retry allocations at order-0 in the huge page 357395a301eeSLorenzo Stoakes * case a warning for either is spurious. 357495a301eeSLorenzo Stoakes */ 357595a301eeSLorenzo Stoakes if (!fatal_signal_pending(current) && page_order == 0) 3576c3d77172SUladzislau Rezki (Sony) warn_alloc(gfp_mask, NULL, 357795a301eeSLorenzo Stoakes "vmalloc error: size %lu, failed to allocate pages", 357895a301eeSLorenzo Stoakes area->nr_pages * PAGE_SIZE); 35791da177e4SLinus Torvalds goto fail; 35801da177e4SLinus Torvalds } 3581121e6f32SNicholas Piggin 3582451769ebSMichal Hocko /* 3583451769ebSMichal Hocko * page tables allocations ignore external gfp mask, enforce it 3584451769ebSMichal Hocko * by the scope API 3585451769ebSMichal Hocko */ 3586451769ebSMichal Hocko if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 3587451769ebSMichal Hocko flags = memalloc_nofs_save(); 3588451769ebSMichal Hocko else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 3589451769ebSMichal Hocko flags = memalloc_noio_save(); 3590451769ebSMichal Hocko 35919376130cSMichal Hocko do { 3592451769ebSMichal Hocko ret = vmap_pages_range(addr, addr + size, prot, area->pages, 3593451769ebSMichal Hocko page_shift); 35949376130cSMichal Hocko if (nofail && (ret < 0)) 35959376130cSMichal Hocko schedule_timeout_uninterruptible(1); 35969376130cSMichal Hocko } while (nofail && (ret < 0)); 3597451769ebSMichal Hocko 3598451769ebSMichal Hocko if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 3599451769ebSMichal Hocko memalloc_nofs_restore(flags); 3600451769ebSMichal Hocko else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 3601451769ebSMichal Hocko memalloc_noio_restore(flags); 3602451769ebSMichal Hocko 3603451769ebSMichal Hocko if (ret < 0) { 3604c3d77172SUladzislau Rezki (Sony) warn_alloc(gfp_mask, NULL, 3605f4bdfeafSUladzislau Rezki (Sony) "vmalloc error: size %lu, failed to map pages", 3606d70bec8cSNicholas Piggin area->nr_pages * PAGE_SIZE); 36071da177e4SLinus Torvalds goto fail; 3608d70bec8cSNicholas Piggin } 3609ed1f324cSChristoph Hellwig 36101da177e4SLinus Torvalds return area->addr; 36111da177e4SLinus Torvalds 36121da177e4SLinus Torvalds fail: 3613f41f036bSChristoph Hellwig vfree(area->addr); 36141da177e4SLinus Torvalds return NULL; 36151da177e4SLinus Torvalds } 36161da177e4SLinus Torvalds 3617d0a21265SDavid Rientjes /** 3618d0a21265SDavid Rientjes * __vmalloc_node_range - allocate virtually contiguous memory 3619d0a21265SDavid Rientjes * @size: allocation size 3620d0a21265SDavid Rientjes * @align: desired alignment 3621d0a21265SDavid Rientjes * @start: vm area range start 3622d0a21265SDavid Rientjes * @end: vm area range end 3623d0a21265SDavid Rientjes * @gfp_mask: flags for the page level allocator 3624d0a21265SDavid Rientjes * @prot: protection mask for the allocated pages 3625cb9e3c29SAndrey Ryabinin * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 362600ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 3627d0a21265SDavid Rientjes * @caller: caller's return address 3628d0a21265SDavid Rientjes * 3629d0a21265SDavid Rientjes * Allocate enough pages to cover @size from the page level 3630b7d90e7aSMichal Hocko * allocator with @gfp_mask flags. Please note that the full set of gfp 363130d3f011SMichal Hocko * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all 363230d3f011SMichal Hocko * supported. 363330d3f011SMichal Hocko * Zone modifiers are not supported. From the reclaim modifiers 363430d3f011SMichal Hocko * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported) 363530d3f011SMichal Hocko * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and 363630d3f011SMichal Hocko * __GFP_RETRY_MAYFAIL are not supported). 363730d3f011SMichal Hocko * 363830d3f011SMichal Hocko * __GFP_NOWARN can be used to suppress failures messages. 3639b7d90e7aSMichal Hocko * 3640b7d90e7aSMichal Hocko * Map them into contiguous kernel virtual space, using a pagetable 3641b7d90e7aSMichal Hocko * protection of @prot. 3642a862f68aSMike Rapoport * 3643a862f68aSMike Rapoport * Return: the address of the area or %NULL on failure 3644d0a21265SDavid Rientjes */ 3645d0a21265SDavid Rientjes void *__vmalloc_node_range(unsigned long size, unsigned long align, 3646d0a21265SDavid Rientjes unsigned long start, unsigned long end, gfp_t gfp_mask, 3647cb9e3c29SAndrey Ryabinin pgprot_t prot, unsigned long vm_flags, int node, 3648cb9e3c29SAndrey Ryabinin const void *caller) 3649930fc45aSChristoph Lameter { 3650d0a21265SDavid Rientjes struct vm_struct *area; 365119f1c3acSAndrey Konovalov void *ret; 3652f6e39794SAndrey Konovalov kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE; 3653d0a21265SDavid Rientjes unsigned long real_size = size; 3654121e6f32SNicholas Piggin unsigned long real_align = align; 3655121e6f32SNicholas Piggin unsigned int shift = PAGE_SHIFT; 3656d0a21265SDavid Rientjes 3657d70bec8cSNicholas Piggin if (WARN_ON_ONCE(!size)) 3658d70bec8cSNicholas Piggin return NULL; 3659d70bec8cSNicholas Piggin 3660d70bec8cSNicholas Piggin if ((size >> PAGE_SHIFT) > totalram_pages()) { 3661d70bec8cSNicholas Piggin warn_alloc(gfp_mask, NULL, 3662f4bdfeafSUladzislau Rezki (Sony) "vmalloc error: size %lu, exceeds total pages", 3663f4bdfeafSUladzislau Rezki (Sony) real_size); 3664d70bec8cSNicholas Piggin return NULL; 3665121e6f32SNicholas Piggin } 3666d0a21265SDavid Rientjes 3667559089e0SSong Liu if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) { 3668121e6f32SNicholas Piggin unsigned long size_per_node; 3669121e6f32SNicholas Piggin 3670121e6f32SNicholas Piggin /* 3671121e6f32SNicholas Piggin * Try huge pages. Only try for PAGE_KERNEL allocations, 3672121e6f32SNicholas Piggin * others like modules don't yet expect huge pages in 3673121e6f32SNicholas Piggin * their allocations due to apply_to_page_range not 3674121e6f32SNicholas Piggin * supporting them. 3675121e6f32SNicholas Piggin */ 3676121e6f32SNicholas Piggin 3677121e6f32SNicholas Piggin size_per_node = size; 3678121e6f32SNicholas Piggin if (node == NUMA_NO_NODE) 3679121e6f32SNicholas Piggin size_per_node /= num_online_nodes(); 36803382bbeeSChristophe Leroy if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE) 3681121e6f32SNicholas Piggin shift = PMD_SHIFT; 36823382bbeeSChristophe Leroy else 36833382bbeeSChristophe Leroy shift = arch_vmap_pte_supported_shift(size_per_node); 36843382bbeeSChristophe Leroy 3685121e6f32SNicholas Piggin align = max(real_align, 1UL << shift); 3686121e6f32SNicholas Piggin size = ALIGN(real_size, 1UL << shift); 3687121e6f32SNicholas Piggin } 3688121e6f32SNicholas Piggin 3689121e6f32SNicholas Piggin again: 36907ca3027bSDaniel Axtens area = __get_vm_area_node(real_size, align, shift, VM_ALLOC | 36917ca3027bSDaniel Axtens VM_UNINITIALIZED | vm_flags, start, end, node, 36927ca3027bSDaniel Axtens gfp_mask, caller); 3693d70bec8cSNicholas Piggin if (!area) { 36949376130cSMichal Hocko bool nofail = gfp_mask & __GFP_NOFAIL; 3695d70bec8cSNicholas Piggin warn_alloc(gfp_mask, NULL, 36969376130cSMichal Hocko "vmalloc error: size %lu, vm_struct allocation failed%s", 36979376130cSMichal Hocko real_size, (nofail) ? ". Retrying." : ""); 36989376130cSMichal Hocko if (nofail) { 36999376130cSMichal Hocko schedule_timeout_uninterruptible(1); 37009376130cSMichal Hocko goto again; 37019376130cSMichal Hocko } 3702de7d2b56SJoe Perches goto fail; 3703d70bec8cSNicholas Piggin } 3704d0a21265SDavid Rientjes 3705f6e39794SAndrey Konovalov /* 3706f6e39794SAndrey Konovalov * Prepare arguments for __vmalloc_area_node() and 3707f6e39794SAndrey Konovalov * kasan_unpoison_vmalloc(). 3708f6e39794SAndrey Konovalov */ 3709f6e39794SAndrey Konovalov if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) { 3710f6e39794SAndrey Konovalov if (kasan_hw_tags_enabled()) { 371101d92c7fSAndrey Konovalov /* 371201d92c7fSAndrey Konovalov * Modify protection bits to allow tagging. 3713f6e39794SAndrey Konovalov * This must be done before mapping. 371401d92c7fSAndrey Konovalov */ 371501d92c7fSAndrey Konovalov prot = arch_vmap_pgprot_tagged(prot); 371601d92c7fSAndrey Konovalov 371723689e91SAndrey Konovalov /* 3718f6e39794SAndrey Konovalov * Skip page_alloc poisoning and zeroing for physical 3719f6e39794SAndrey Konovalov * pages backing VM_ALLOC mapping. Memory is instead 3720f6e39794SAndrey Konovalov * poisoned and zeroed by kasan_unpoison_vmalloc(). 372123689e91SAndrey Konovalov */ 37220a54864fSPeter Collingbourne gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO; 372323689e91SAndrey Konovalov } 372423689e91SAndrey Konovalov 3725f6e39794SAndrey Konovalov /* Take note that the mapping is PAGE_KERNEL. */ 3726f6e39794SAndrey Konovalov kasan_flags |= KASAN_VMALLOC_PROT_NORMAL; 3727f6e39794SAndrey Konovalov } 3728f6e39794SAndrey Konovalov 372901d92c7fSAndrey Konovalov /* Allocate physical pages and map them into vmalloc space. */ 373019f1c3acSAndrey Konovalov ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); 373119f1c3acSAndrey Konovalov if (!ret) 3732121e6f32SNicholas Piggin goto fail; 373389219d37SCatalin Marinas 373423689e91SAndrey Konovalov /* 373523689e91SAndrey Konovalov * Mark the pages as accessible, now that they are mapped. 37366c2f761dSAndrey Konovalov * The condition for setting KASAN_VMALLOC_INIT should complement the 37376c2f761dSAndrey Konovalov * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check 37386c2f761dSAndrey Konovalov * to make sure that memory is initialized under the same conditions. 3739f6e39794SAndrey Konovalov * Tag-based KASAN modes only assign tags to normal non-executable 3740f6e39794SAndrey Konovalov * allocations, see __kasan_unpoison_vmalloc(). 374123689e91SAndrey Konovalov */ 3742f6e39794SAndrey Konovalov kasan_flags |= KASAN_VMALLOC_VM_ALLOC; 37436c2f761dSAndrey Konovalov if (!want_init_on_free() && want_init_on_alloc(gfp_mask) && 37446c2f761dSAndrey Konovalov (gfp_mask & __GFP_SKIP_ZERO)) 374523689e91SAndrey Konovalov kasan_flags |= KASAN_VMALLOC_INIT; 3746f6e39794SAndrey Konovalov /* KASAN_VMALLOC_PROT_NORMAL already set if required. */ 374723689e91SAndrey Konovalov area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags); 374819f1c3acSAndrey Konovalov 374989219d37SCatalin Marinas /* 375020fc02b4SZhang Yanfei * In this function, newly allocated vm_struct has VM_UNINITIALIZED 375120fc02b4SZhang Yanfei * flag. It means that vm_struct is not fully initialized. 37524341fa45SJoonsoo Kim * Now, it is fully initialized, so remove this flag here. 3753f5252e00SMitsuo Hayasaka */ 375420fc02b4SZhang Yanfei clear_vm_uninitialized_flag(area); 3755f5252e00SMitsuo Hayasaka 37567ca3027bSDaniel Axtens size = PAGE_ALIGN(size); 375760115fa5SKefeng Wang if (!(vm_flags & VM_DEFER_KMEMLEAK)) 375894f4a161SCatalin Marinas kmemleak_vmalloc(area, size, gfp_mask); 375989219d37SCatalin Marinas 376019f1c3acSAndrey Konovalov return area->addr; 3761de7d2b56SJoe Perches 3762de7d2b56SJoe Perches fail: 3763121e6f32SNicholas Piggin if (shift > PAGE_SHIFT) { 3764121e6f32SNicholas Piggin shift = PAGE_SHIFT; 3765121e6f32SNicholas Piggin align = real_align; 3766121e6f32SNicholas Piggin size = real_size; 3767121e6f32SNicholas Piggin goto again; 3768121e6f32SNicholas Piggin } 3769121e6f32SNicholas Piggin 3770de7d2b56SJoe Perches return NULL; 3771930fc45aSChristoph Lameter } 3772930fc45aSChristoph Lameter 37731da177e4SLinus Torvalds /** 3774930fc45aSChristoph Lameter * __vmalloc_node - allocate virtually contiguous memory 37751da177e4SLinus Torvalds * @size: allocation size 37762dca6999SDavid Miller * @align: desired alignment 37771da177e4SLinus Torvalds * @gfp_mask: flags for the page level allocator 377800ef2d2fSDavid Rientjes * @node: node to use for allocation or NUMA_NO_NODE 3779c85d194bSRandy Dunlap * @caller: caller's return address 37801da177e4SLinus Torvalds * 3781f38fcb9cSChristoph Hellwig * Allocate enough pages to cover @size from the page level allocator with 3782f38fcb9cSChristoph Hellwig * @gfp_mask flags. Map them into contiguous kernel virtual space. 3783a7c3e901SMichal Hocko * 3784dcda9b04SMichal Hocko * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL 3785a7c3e901SMichal Hocko * and __GFP_NOFAIL are not supported 3786a7c3e901SMichal Hocko * 3787a7c3e901SMichal Hocko * Any use of gfp flags outside of GFP_KERNEL should be consulted 3788a7c3e901SMichal Hocko * with mm people. 3789a862f68aSMike Rapoport * 3790a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 37911da177e4SLinus Torvalds */ 37922b905948SChristoph Hellwig void *__vmalloc_node(unsigned long size, unsigned long align, 3793f38fcb9cSChristoph Hellwig gfp_t gfp_mask, int node, const void *caller) 37941da177e4SLinus Torvalds { 3795d0a21265SDavid Rientjes return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 3796f38fcb9cSChristoph Hellwig gfp_mask, PAGE_KERNEL, 0, node, caller); 37971da177e4SLinus Torvalds } 3798c3f896dcSChristoph Hellwig /* 3799c3f896dcSChristoph Hellwig * This is only for performance analysis of vmalloc and stress purpose. 3800c3f896dcSChristoph Hellwig * It is required by vmalloc test module, therefore do not use it other 3801c3f896dcSChristoph Hellwig * than that. 3802c3f896dcSChristoph Hellwig */ 3803c3f896dcSChristoph Hellwig #ifdef CONFIG_TEST_VMALLOC_MODULE 3804c3f896dcSChristoph Hellwig EXPORT_SYMBOL_GPL(__vmalloc_node); 3805c3f896dcSChristoph Hellwig #endif 38061da177e4SLinus Torvalds 380788dca4caSChristoph Hellwig void *__vmalloc(unsigned long size, gfp_t gfp_mask) 3808930fc45aSChristoph Lameter { 3809f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE, 381023016969SChristoph Lameter __builtin_return_address(0)); 3811930fc45aSChristoph Lameter } 38121da177e4SLinus Torvalds EXPORT_SYMBOL(__vmalloc); 38131da177e4SLinus Torvalds 38141da177e4SLinus Torvalds /** 38151da177e4SLinus Torvalds * vmalloc - allocate virtually contiguous memory 38161da177e4SLinus Torvalds * @size: allocation size 381792eac168SMike Rapoport * 38181da177e4SLinus Torvalds * Allocate enough pages to cover @size from the page level 38191da177e4SLinus Torvalds * allocator and map them into contiguous kernel virtual space. 38201da177e4SLinus Torvalds * 3821c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 38221da177e4SLinus Torvalds * use __vmalloc() instead. 3823a862f68aSMike Rapoport * 3824a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 38251da177e4SLinus Torvalds */ 38261da177e4SLinus Torvalds void *vmalloc(unsigned long size) 38271da177e4SLinus Torvalds { 38284d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE, 38294d39d728SChristoph Hellwig __builtin_return_address(0)); 38301da177e4SLinus Torvalds } 38311da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc); 38321da177e4SLinus Torvalds 3833930fc45aSChristoph Lameter /** 3834559089e0SSong Liu * vmalloc_huge - allocate virtually contiguous memory, allow huge pages 383515a64f5aSClaudio Imbrenda * @size: allocation size 3836559089e0SSong Liu * @gfp_mask: flags for the page level allocator 383715a64f5aSClaudio Imbrenda * 3838559089e0SSong Liu * Allocate enough pages to cover @size from the page level 383915a64f5aSClaudio Imbrenda * allocator and map them into contiguous kernel virtual space. 3840559089e0SSong Liu * If @size is greater than or equal to PMD_SIZE, allow using 3841559089e0SSong Liu * huge pages for the memory 384215a64f5aSClaudio Imbrenda * 384315a64f5aSClaudio Imbrenda * Return: pointer to the allocated memory or %NULL on error 384415a64f5aSClaudio Imbrenda */ 3845559089e0SSong Liu void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) 384615a64f5aSClaudio Imbrenda { 384715a64f5aSClaudio Imbrenda return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, 3848559089e0SSong Liu gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, 384915a64f5aSClaudio Imbrenda NUMA_NO_NODE, __builtin_return_address(0)); 385015a64f5aSClaudio Imbrenda } 3851559089e0SSong Liu EXPORT_SYMBOL_GPL(vmalloc_huge); 385215a64f5aSClaudio Imbrenda 385315a64f5aSClaudio Imbrenda /** 3854e1ca7788SDave Young * vzalloc - allocate virtually contiguous memory with zero fill 3855e1ca7788SDave Young * @size: allocation size 385692eac168SMike Rapoport * 3857e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 3858e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 3859e1ca7788SDave Young * The memory allocated is set to zero. 3860e1ca7788SDave Young * 3861e1ca7788SDave Young * For tight control over page level allocator and protection flags 3862e1ca7788SDave Young * use __vmalloc() instead. 3863a862f68aSMike Rapoport * 3864a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 3865e1ca7788SDave Young */ 3866e1ca7788SDave Young void *vzalloc(unsigned long size) 3867e1ca7788SDave Young { 38684d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 38694d39d728SChristoph Hellwig __builtin_return_address(0)); 3870e1ca7788SDave Young } 3871e1ca7788SDave Young EXPORT_SYMBOL(vzalloc); 3872e1ca7788SDave Young 3873e1ca7788SDave Young /** 3874ead04089SRolf Eike Beer * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 387583342314SNick Piggin * @size: allocation size 3876ead04089SRolf Eike Beer * 3877ead04089SRolf Eike Beer * The resulting memory area is zeroed so it can be mapped to userspace 3878ead04089SRolf Eike Beer * without leaking data. 3879a862f68aSMike Rapoport * 3880a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 388183342314SNick Piggin */ 388283342314SNick Piggin void *vmalloc_user(unsigned long size) 388383342314SNick Piggin { 3884bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 3885bc84c535SRoman Penyaev GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, 3886bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 388700ef2d2fSDavid Rientjes __builtin_return_address(0)); 388883342314SNick Piggin } 388983342314SNick Piggin EXPORT_SYMBOL(vmalloc_user); 389083342314SNick Piggin 389183342314SNick Piggin /** 3892930fc45aSChristoph Lameter * vmalloc_node - allocate memory on a specific node 3893930fc45aSChristoph Lameter * @size: allocation size 3894d44e0780SRandy Dunlap * @node: numa node 3895930fc45aSChristoph Lameter * 3896930fc45aSChristoph Lameter * Allocate enough pages to cover @size from the page level 3897930fc45aSChristoph Lameter * allocator and map them into contiguous kernel virtual space. 3898930fc45aSChristoph Lameter * 3899c1c8897fSMichael Opdenacker * For tight control over page level allocator and protection flags 3900930fc45aSChristoph Lameter * use __vmalloc() instead. 3901a862f68aSMike Rapoport * 3902a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 3903930fc45aSChristoph Lameter */ 3904930fc45aSChristoph Lameter void *vmalloc_node(unsigned long size, int node) 3905930fc45aSChristoph Lameter { 3906f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL, node, 3907f38fcb9cSChristoph Hellwig __builtin_return_address(0)); 3908930fc45aSChristoph Lameter } 3909930fc45aSChristoph Lameter EXPORT_SYMBOL(vmalloc_node); 3910930fc45aSChristoph Lameter 3911e1ca7788SDave Young /** 3912e1ca7788SDave Young * vzalloc_node - allocate memory on a specific node with zero fill 3913e1ca7788SDave Young * @size: allocation size 3914e1ca7788SDave Young * @node: numa node 3915e1ca7788SDave Young * 3916e1ca7788SDave Young * Allocate enough pages to cover @size from the page level 3917e1ca7788SDave Young * allocator and map them into contiguous kernel virtual space. 3918e1ca7788SDave Young * The memory allocated is set to zero. 3919e1ca7788SDave Young * 3920a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 3921e1ca7788SDave Young */ 3922e1ca7788SDave Young void *vzalloc_node(unsigned long size, int node) 3923e1ca7788SDave Young { 39244d39d728SChristoph Hellwig return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node, 39254d39d728SChristoph Hellwig __builtin_return_address(0)); 3926e1ca7788SDave Young } 3927e1ca7788SDave Young EXPORT_SYMBOL(vzalloc_node); 3928e1ca7788SDave Young 39290d08e0d3SAndi Kleen #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 3930698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 39310d08e0d3SAndi Kleen #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 3932698d0831SMichal Hocko #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) 39330d08e0d3SAndi Kleen #else 3934698d0831SMichal Hocko /* 3935698d0831SMichal Hocko * 64b systems should always have either DMA or DMA32 zones. For others 3936698d0831SMichal Hocko * GFP_DMA32 should do the right thing and use the normal zone. 3937698d0831SMichal Hocko */ 393868d68ff6SZhiyuan Dai #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 39390d08e0d3SAndi Kleen #endif 39400d08e0d3SAndi Kleen 39411da177e4SLinus Torvalds /** 39421da177e4SLinus Torvalds * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 39431da177e4SLinus Torvalds * @size: allocation size 39441da177e4SLinus Torvalds * 39451da177e4SLinus Torvalds * Allocate enough 32bit PA addressable pages to cover @size from the 39461da177e4SLinus Torvalds * page level allocator and map them into contiguous kernel virtual space. 3947a862f68aSMike Rapoport * 3948a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 39491da177e4SLinus Torvalds */ 39501da177e4SLinus Torvalds void *vmalloc_32(unsigned long size) 39511da177e4SLinus Torvalds { 3952f38fcb9cSChristoph Hellwig return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE, 3953f38fcb9cSChristoph Hellwig __builtin_return_address(0)); 39541da177e4SLinus Torvalds } 39551da177e4SLinus Torvalds EXPORT_SYMBOL(vmalloc_32); 39561da177e4SLinus Torvalds 395783342314SNick Piggin /** 3958ead04089SRolf Eike Beer * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 395983342314SNick Piggin * @size: allocation size 3960ead04089SRolf Eike Beer * 3961ead04089SRolf Eike Beer * The resulting memory area is 32bit addressable and zeroed so it can be 3962ead04089SRolf Eike Beer * mapped to userspace without leaking data. 3963a862f68aSMike Rapoport * 3964a862f68aSMike Rapoport * Return: pointer to the allocated memory or %NULL on error 396583342314SNick Piggin */ 396683342314SNick Piggin void *vmalloc_32_user(unsigned long size) 396783342314SNick Piggin { 3968bc84c535SRoman Penyaev return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 3969bc84c535SRoman Penyaev GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 3970bc84c535SRoman Penyaev VM_USERMAP, NUMA_NO_NODE, 39715a82ac71SRoman Penyaev __builtin_return_address(0)); 397283342314SNick Piggin } 397383342314SNick Piggin EXPORT_SYMBOL(vmalloc_32_user); 397483342314SNick Piggin 3975d0107eb0SKAMEZAWA Hiroyuki /* 39764c91c07cSLorenzo Stoakes * Atomically zero bytes in the iterator. 39774c91c07cSLorenzo Stoakes * 39784c91c07cSLorenzo Stoakes * Returns the number of zeroed bytes. 3979d0107eb0SKAMEZAWA Hiroyuki */ 39804c91c07cSLorenzo Stoakes static size_t zero_iter(struct iov_iter *iter, size_t count) 3981d0107eb0SKAMEZAWA Hiroyuki { 39824c91c07cSLorenzo Stoakes size_t remains = count; 3983d0107eb0SKAMEZAWA Hiroyuki 39844c91c07cSLorenzo Stoakes while (remains > 0) { 39854c91c07cSLorenzo Stoakes size_t num, copied; 39864c91c07cSLorenzo Stoakes 39870e4bc271SLu Hongfei num = min_t(size_t, remains, PAGE_SIZE); 39884c91c07cSLorenzo Stoakes copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter); 39894c91c07cSLorenzo Stoakes remains -= copied; 39904c91c07cSLorenzo Stoakes 39914c91c07cSLorenzo Stoakes if (copied < num) 39924c91c07cSLorenzo Stoakes break; 39934c91c07cSLorenzo Stoakes } 39944c91c07cSLorenzo Stoakes 39954c91c07cSLorenzo Stoakes return count - remains; 39964c91c07cSLorenzo Stoakes } 39974c91c07cSLorenzo Stoakes 39984c91c07cSLorenzo Stoakes /* 39994c91c07cSLorenzo Stoakes * small helper routine, copy contents to iter from addr. 40004c91c07cSLorenzo Stoakes * If the page is not present, fill zero. 40014c91c07cSLorenzo Stoakes * 40024c91c07cSLorenzo Stoakes * Returns the number of copied bytes. 40034c91c07cSLorenzo Stoakes */ 40044c91c07cSLorenzo Stoakes static size_t aligned_vread_iter(struct iov_iter *iter, 40054c91c07cSLorenzo Stoakes const char *addr, size_t count) 40064c91c07cSLorenzo Stoakes { 40074c91c07cSLorenzo Stoakes size_t remains = count; 40084c91c07cSLorenzo Stoakes struct page *page; 40094c91c07cSLorenzo Stoakes 40104c91c07cSLorenzo Stoakes while (remains > 0) { 4011d0107eb0SKAMEZAWA Hiroyuki unsigned long offset, length; 40124c91c07cSLorenzo Stoakes size_t copied = 0; 4013d0107eb0SKAMEZAWA Hiroyuki 4014891c49abSAlexander Kuleshov offset = offset_in_page(addr); 4015d0107eb0SKAMEZAWA Hiroyuki length = PAGE_SIZE - offset; 40164c91c07cSLorenzo Stoakes if (length > remains) 40174c91c07cSLorenzo Stoakes length = remains; 40184c91c07cSLorenzo Stoakes page = vmalloc_to_page(addr); 4019d0107eb0SKAMEZAWA Hiroyuki /* 40204c91c07cSLorenzo Stoakes * To do safe access to this _mapped_ area, we need lock. But 40214c91c07cSLorenzo Stoakes * adding lock here means that we need to add overhead of 40224c91c07cSLorenzo Stoakes * vmalloc()/vfree() calls for this _debug_ interface, rarely 40234c91c07cSLorenzo Stoakes * used. Instead of that, we'll use an local mapping via 40244c91c07cSLorenzo Stoakes * copy_page_to_iter_nofault() and accept a small overhead in 40254c91c07cSLorenzo Stoakes * this access function. 4026d0107eb0SKAMEZAWA Hiroyuki */ 40274c91c07cSLorenzo Stoakes if (page) 40284c91c07cSLorenzo Stoakes copied = copy_page_to_iter_nofault(page, offset, 40294c91c07cSLorenzo Stoakes length, iter); 40304c91c07cSLorenzo Stoakes else 40314c91c07cSLorenzo Stoakes copied = zero_iter(iter, length); 4032d0107eb0SKAMEZAWA Hiroyuki 40334c91c07cSLorenzo Stoakes addr += copied; 40344c91c07cSLorenzo Stoakes remains -= copied; 40354c91c07cSLorenzo Stoakes 40364c91c07cSLorenzo Stoakes if (copied != length) 40374c91c07cSLorenzo Stoakes break; 4038d0107eb0SKAMEZAWA Hiroyuki } 4039d0107eb0SKAMEZAWA Hiroyuki 40404c91c07cSLorenzo Stoakes return count - remains; 40414c91c07cSLorenzo Stoakes } 40424c91c07cSLorenzo Stoakes 40434c91c07cSLorenzo Stoakes /* 40444c91c07cSLorenzo Stoakes * Read from a vm_map_ram region of memory. 40454c91c07cSLorenzo Stoakes * 40464c91c07cSLorenzo Stoakes * Returns the number of copied bytes. 40474c91c07cSLorenzo Stoakes */ 40484c91c07cSLorenzo Stoakes static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr, 40494c91c07cSLorenzo Stoakes size_t count, unsigned long flags) 405006c89946SBaoquan He { 405106c89946SBaoquan He char *start; 405206c89946SBaoquan He struct vmap_block *vb; 4053062eacf5SUladzislau Rezki (Sony) struct xarray *xa; 405406c89946SBaoquan He unsigned long offset; 40554c91c07cSLorenzo Stoakes unsigned int rs, re; 40564c91c07cSLorenzo Stoakes size_t remains, n; 405706c89946SBaoquan He 405806c89946SBaoquan He /* 405906c89946SBaoquan He * If it's area created by vm_map_ram() interface directly, but 406006c89946SBaoquan He * not further subdividing and delegating management to vmap_block, 406106c89946SBaoquan He * handle it here. 406206c89946SBaoquan He */ 40634c91c07cSLorenzo Stoakes if (!(flags & VMAP_BLOCK)) 40644c91c07cSLorenzo Stoakes return aligned_vread_iter(iter, addr, count); 40654c91c07cSLorenzo Stoakes 40664c91c07cSLorenzo Stoakes remains = count; 406706c89946SBaoquan He 406806c89946SBaoquan He /* 406906c89946SBaoquan He * Area is split into regions and tracked with vmap_block, read out 407006c89946SBaoquan He * each region and zero fill the hole between regions. 407106c89946SBaoquan He */ 4072fa1c77c1SUladzislau Rezki (Sony) xa = addr_to_vb_xa((unsigned long) addr); 4073062eacf5SUladzislau Rezki (Sony) vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr)); 407406c89946SBaoquan He if (!vb) 40754c91c07cSLorenzo Stoakes goto finished_zero; 407606c89946SBaoquan He 407706c89946SBaoquan He spin_lock(&vb->lock); 407806c89946SBaoquan He if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) { 407906c89946SBaoquan He spin_unlock(&vb->lock); 40804c91c07cSLorenzo Stoakes goto finished_zero; 40814c91c07cSLorenzo Stoakes } 40824c91c07cSLorenzo Stoakes 40834c91c07cSLorenzo Stoakes for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) { 40844c91c07cSLorenzo Stoakes size_t copied; 40854c91c07cSLorenzo Stoakes 40864c91c07cSLorenzo Stoakes if (remains == 0) 40874c91c07cSLorenzo Stoakes goto finished; 40884c91c07cSLorenzo Stoakes 40894c91c07cSLorenzo Stoakes start = vmap_block_vaddr(vb->va->va_start, rs); 40904c91c07cSLorenzo Stoakes 40914c91c07cSLorenzo Stoakes if (addr < start) { 40924c91c07cSLorenzo Stoakes size_t to_zero = min_t(size_t, start - addr, remains); 40934c91c07cSLorenzo Stoakes size_t zeroed = zero_iter(iter, to_zero); 40944c91c07cSLorenzo Stoakes 40954c91c07cSLorenzo Stoakes addr += zeroed; 40964c91c07cSLorenzo Stoakes remains -= zeroed; 40974c91c07cSLorenzo Stoakes 40984c91c07cSLorenzo Stoakes if (remains == 0 || zeroed != to_zero) 409906c89946SBaoquan He goto finished; 410006c89946SBaoquan He } 41014c91c07cSLorenzo Stoakes 410206c89946SBaoquan He /*it could start reading from the middle of used region*/ 410306c89946SBaoquan He offset = offset_in_page(addr); 410406c89946SBaoquan He n = ((re - rs + 1) << PAGE_SHIFT) - offset; 41054c91c07cSLorenzo Stoakes if (n > remains) 41064c91c07cSLorenzo Stoakes n = remains; 410706c89946SBaoquan He 41084c91c07cSLorenzo Stoakes copied = aligned_vread_iter(iter, start + offset, n); 41094c91c07cSLorenzo Stoakes 41104c91c07cSLorenzo Stoakes addr += copied; 41114c91c07cSLorenzo Stoakes remains -= copied; 41124c91c07cSLorenzo Stoakes 41134c91c07cSLorenzo Stoakes if (copied != n) 41144c91c07cSLorenzo Stoakes goto finished; 411506c89946SBaoquan He } 41164c91c07cSLorenzo Stoakes 411706c89946SBaoquan He spin_unlock(&vb->lock); 411806c89946SBaoquan He 41194c91c07cSLorenzo Stoakes finished_zero: 412006c89946SBaoquan He /* zero-fill the left dirty or free regions */ 41214c91c07cSLorenzo Stoakes return count - remains + zero_iter(iter, remains); 41224c91c07cSLorenzo Stoakes finished: 41234c91c07cSLorenzo Stoakes /* We couldn't copy/zero everything */ 41244c91c07cSLorenzo Stoakes spin_unlock(&vb->lock); 41254c91c07cSLorenzo Stoakes return count - remains; 412606c89946SBaoquan He } 412706c89946SBaoquan He 4128d0107eb0SKAMEZAWA Hiroyuki /** 41294c91c07cSLorenzo Stoakes * vread_iter() - read vmalloc area in a safe way to an iterator. 41304c91c07cSLorenzo Stoakes * @iter: the iterator to which data should be written. 4131d0107eb0SKAMEZAWA Hiroyuki * @addr: vm address. 4132d0107eb0SKAMEZAWA Hiroyuki * @count: number of bytes to be read. 4133d0107eb0SKAMEZAWA Hiroyuki * 4134d0107eb0SKAMEZAWA Hiroyuki * This function checks that addr is a valid vmalloc'ed area, and 4135d0107eb0SKAMEZAWA Hiroyuki * copy data from that area to a given buffer. If the given memory range 4136d0107eb0SKAMEZAWA Hiroyuki * of [addr...addr+count) includes some valid address, data is copied to 4137d0107eb0SKAMEZAWA Hiroyuki * proper area of @buf. If there are memory holes, they'll be zero-filled. 4138d0107eb0SKAMEZAWA Hiroyuki * IOREMAP area is treated as memory hole and no copy is done. 4139d0107eb0SKAMEZAWA Hiroyuki * 4140d0107eb0SKAMEZAWA Hiroyuki * If [addr...addr+count) doesn't includes any intersects with alive 4141a8e5202dSCong Wang * vm_struct area, returns 0. @buf should be kernel's buffer. 4142d0107eb0SKAMEZAWA Hiroyuki * 4143d0107eb0SKAMEZAWA Hiroyuki * Note: In usual ops, vread() is never necessary because the caller 4144d0107eb0SKAMEZAWA Hiroyuki * should know vmalloc() area is valid and can use memcpy(). 4145d0107eb0SKAMEZAWA Hiroyuki * This is for routines which have to access vmalloc area without 4146bbcd53c9SDavid Hildenbrand * any information, as /proc/kcore. 4147a862f68aSMike Rapoport * 4148a862f68aSMike Rapoport * Return: number of bytes for which addr and buf should be increased 4149a862f68aSMike Rapoport * (same number as @count) or %0 if [addr...addr+count) doesn't 4150a862f68aSMike Rapoport * include any intersection with valid vmalloc area 4151d0107eb0SKAMEZAWA Hiroyuki */ 41524c91c07cSLorenzo Stoakes long vread_iter(struct iov_iter *iter, const char *addr, size_t count) 41531da177e4SLinus Torvalds { 4154d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 4155e81ce85fSJoonsoo Kim struct vmap_area *va; 4156e81ce85fSJoonsoo Kim struct vm_struct *vm; 41574c91c07cSLorenzo Stoakes char *vaddr; 41584c91c07cSLorenzo Stoakes size_t n, size, flags, remains; 415953becf32SUladzislau Rezki (Sony) unsigned long next; 41601da177e4SLinus Torvalds 41614aff1dc4SAndrey Konovalov addr = kasan_reset_tag(addr); 41624aff1dc4SAndrey Konovalov 41631da177e4SLinus Torvalds /* Don't allow overflow */ 41641da177e4SLinus Torvalds if ((unsigned long) addr + count < count) 41651da177e4SLinus Torvalds count = -(unsigned long) addr; 41661da177e4SLinus Torvalds 41674c91c07cSLorenzo Stoakes remains = count; 41684c91c07cSLorenzo Stoakes 416953becf32SUladzislau Rezki (Sony) vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va); 417053becf32SUladzislau Rezki (Sony) if (!vn) 41714c91c07cSLorenzo Stoakes goto finished_zero; 4172f181234aSChen Wandun 4173f181234aSChen Wandun /* no intersects with alive vmap_area */ 41744c91c07cSLorenzo Stoakes if ((unsigned long)addr + remains <= va->va_start) 41754c91c07cSLorenzo Stoakes goto finished_zero; 4176f181234aSChen Wandun 417753becf32SUladzislau Rezki (Sony) do { 41784c91c07cSLorenzo Stoakes size_t copied; 41794c91c07cSLorenzo Stoakes 41804c91c07cSLorenzo Stoakes if (remains == 0) 41814c91c07cSLorenzo Stoakes goto finished; 4182e81ce85fSJoonsoo Kim 418306c89946SBaoquan He vm = va->vm; 418406c89946SBaoquan He flags = va->flags & VMAP_FLAGS_MASK; 418506c89946SBaoquan He /* 418606c89946SBaoquan He * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need 418706c89946SBaoquan He * be set together with VMAP_RAM. 418806c89946SBaoquan He */ 418906c89946SBaoquan He WARN_ON(flags == VMAP_BLOCK); 419006c89946SBaoquan He 419106c89946SBaoquan He if (!vm && !flags) 419253becf32SUladzislau Rezki (Sony) goto next_va; 4193e81ce85fSJoonsoo Kim 419430a7a9b1SBaoquan He if (vm && (vm->flags & VM_UNINITIALIZED)) 419553becf32SUladzislau Rezki (Sony) goto next_va; 41964c91c07cSLorenzo Stoakes 419730a7a9b1SBaoquan He /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 419830a7a9b1SBaoquan He smp_rmb(); 419930a7a9b1SBaoquan He 420006c89946SBaoquan He vaddr = (char *) va->va_start; 420106c89946SBaoquan He size = vm ? get_vm_area_size(vm) : va_size(va); 420206c89946SBaoquan He 420306c89946SBaoquan He if (addr >= vaddr + size) 420453becf32SUladzislau Rezki (Sony) goto next_va; 42054c91c07cSLorenzo Stoakes 42064c91c07cSLorenzo Stoakes if (addr < vaddr) { 42074c91c07cSLorenzo Stoakes size_t to_zero = min_t(size_t, vaddr - addr, remains); 42084c91c07cSLorenzo Stoakes size_t zeroed = zero_iter(iter, to_zero); 42094c91c07cSLorenzo Stoakes 42104c91c07cSLorenzo Stoakes addr += zeroed; 42114c91c07cSLorenzo Stoakes remains -= zeroed; 42124c91c07cSLorenzo Stoakes 42134c91c07cSLorenzo Stoakes if (remains == 0 || zeroed != to_zero) 42141da177e4SLinus Torvalds goto finished; 42151da177e4SLinus Torvalds } 42164c91c07cSLorenzo Stoakes 421706c89946SBaoquan He n = vaddr + size - addr; 42184c91c07cSLorenzo Stoakes if (n > remains) 42194c91c07cSLorenzo Stoakes n = remains; 422006c89946SBaoquan He 422106c89946SBaoquan He if (flags & VMAP_RAM) 42224c91c07cSLorenzo Stoakes copied = vmap_ram_vread_iter(iter, addr, n, flags); 4223ca6c2ce1SBaoquan He else if (!(vm && (vm->flags & VM_IOREMAP))) 42244c91c07cSLorenzo Stoakes copied = aligned_vread_iter(iter, addr, n); 4225d0107eb0SKAMEZAWA Hiroyuki else /* IOREMAP area is treated as memory hole */ 42264c91c07cSLorenzo Stoakes copied = zero_iter(iter, n); 42274c91c07cSLorenzo Stoakes 42284c91c07cSLorenzo Stoakes addr += copied; 42294c91c07cSLorenzo Stoakes remains -= copied; 42304c91c07cSLorenzo Stoakes 42314c91c07cSLorenzo Stoakes if (copied != n) 42324c91c07cSLorenzo Stoakes goto finished; 423353becf32SUladzislau Rezki (Sony) 423453becf32SUladzislau Rezki (Sony) next_va: 423553becf32SUladzislau Rezki (Sony) next = va->va_end; 423653becf32SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 423753becf32SUladzislau Rezki (Sony) } while ((vn = find_vmap_area_exceed_addr_lock(next, &va))); 42384c91c07cSLorenzo Stoakes 42394c91c07cSLorenzo Stoakes finished_zero: 424053becf32SUladzislau Rezki (Sony) if (vn) 4241d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 424253becf32SUladzislau Rezki (Sony) 42434c91c07cSLorenzo Stoakes /* zero-fill memory holes */ 42444c91c07cSLorenzo Stoakes return count - remains + zero_iter(iter, remains); 42451da177e4SLinus Torvalds finished: 42464c91c07cSLorenzo Stoakes /* Nothing remains, or We couldn't copy/zero everything. */ 424753becf32SUladzislau Rezki (Sony) if (vn) 4248d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 4249d0107eb0SKAMEZAWA Hiroyuki 42504c91c07cSLorenzo Stoakes return count - remains; 42511da177e4SLinus Torvalds } 42521da177e4SLinus Torvalds 4253d0107eb0SKAMEZAWA Hiroyuki /** 4254e69e9d4aSHATAYAMA Daisuke * remap_vmalloc_range_partial - map vmalloc pages to userspace 4255e69e9d4aSHATAYAMA Daisuke * @vma: vma to cover 4256e69e9d4aSHATAYAMA Daisuke * @uaddr: target user address to start at 4257e69e9d4aSHATAYAMA Daisuke * @kaddr: virtual address of vmalloc kernel memory 4258bdebd6a2SJann Horn * @pgoff: offset from @kaddr to start at 4259e69e9d4aSHATAYAMA Daisuke * @size: size of map area 4260e69e9d4aSHATAYAMA Daisuke * 4261e69e9d4aSHATAYAMA Daisuke * Returns: 0 for success, -Exxx on failure 4262e69e9d4aSHATAYAMA Daisuke * 4263e69e9d4aSHATAYAMA Daisuke * This function checks that @kaddr is a valid vmalloc'ed area, 4264e69e9d4aSHATAYAMA Daisuke * and that it is big enough to cover the range starting at 4265e69e9d4aSHATAYAMA Daisuke * @uaddr in @vma. Will return failure if that criteria isn't 4266e69e9d4aSHATAYAMA Daisuke * met. 4267e69e9d4aSHATAYAMA Daisuke * 4268e69e9d4aSHATAYAMA Daisuke * Similar to remap_pfn_range() (see mm/memory.c) 4269e69e9d4aSHATAYAMA Daisuke */ 4270e69e9d4aSHATAYAMA Daisuke int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 4271bdebd6a2SJann Horn void *kaddr, unsigned long pgoff, 4272bdebd6a2SJann Horn unsigned long size) 4273e69e9d4aSHATAYAMA Daisuke { 4274e69e9d4aSHATAYAMA Daisuke struct vm_struct *area; 4275bdebd6a2SJann Horn unsigned long off; 4276bdebd6a2SJann Horn unsigned long end_index; 4277bdebd6a2SJann Horn 4278bdebd6a2SJann Horn if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) 4279bdebd6a2SJann Horn return -EINVAL; 4280e69e9d4aSHATAYAMA Daisuke 4281e69e9d4aSHATAYAMA Daisuke size = PAGE_ALIGN(size); 4282e69e9d4aSHATAYAMA Daisuke 4283e69e9d4aSHATAYAMA Daisuke if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 4284e69e9d4aSHATAYAMA Daisuke return -EINVAL; 4285e69e9d4aSHATAYAMA Daisuke 4286e69e9d4aSHATAYAMA Daisuke area = find_vm_area(kaddr); 4287e69e9d4aSHATAYAMA Daisuke if (!area) 4288e69e9d4aSHATAYAMA Daisuke return -EINVAL; 4289e69e9d4aSHATAYAMA Daisuke 4290fe9041c2SChristoph Hellwig if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) 4291e69e9d4aSHATAYAMA Daisuke return -EINVAL; 4292e69e9d4aSHATAYAMA Daisuke 4293bdebd6a2SJann Horn if (check_add_overflow(size, off, &end_index) || 4294bdebd6a2SJann Horn end_index > get_vm_area_size(area)) 4295e69e9d4aSHATAYAMA Daisuke return -EINVAL; 4296bdebd6a2SJann Horn kaddr += off; 4297e69e9d4aSHATAYAMA Daisuke 4298e69e9d4aSHATAYAMA Daisuke do { 4299e69e9d4aSHATAYAMA Daisuke struct page *page = vmalloc_to_page(kaddr); 4300e69e9d4aSHATAYAMA Daisuke int ret; 4301e69e9d4aSHATAYAMA Daisuke 4302e69e9d4aSHATAYAMA Daisuke ret = vm_insert_page(vma, uaddr, page); 4303e69e9d4aSHATAYAMA Daisuke if (ret) 4304e69e9d4aSHATAYAMA Daisuke return ret; 4305e69e9d4aSHATAYAMA Daisuke 4306e69e9d4aSHATAYAMA Daisuke uaddr += PAGE_SIZE; 4307e69e9d4aSHATAYAMA Daisuke kaddr += PAGE_SIZE; 4308e69e9d4aSHATAYAMA Daisuke size -= PAGE_SIZE; 4309e69e9d4aSHATAYAMA Daisuke } while (size > 0); 4310e69e9d4aSHATAYAMA Daisuke 43111c71222eSSuren Baghdasaryan vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); 4312e69e9d4aSHATAYAMA Daisuke 4313e69e9d4aSHATAYAMA Daisuke return 0; 4314e69e9d4aSHATAYAMA Daisuke } 4315e69e9d4aSHATAYAMA Daisuke 4316e69e9d4aSHATAYAMA Daisuke /** 431783342314SNick Piggin * remap_vmalloc_range - map vmalloc pages to userspace 431883342314SNick Piggin * @vma: vma to cover (map full range of vma) 431983342314SNick Piggin * @addr: vmalloc memory 432083342314SNick Piggin * @pgoff: number of pages into addr before first page to map 43217682486bSRandy Dunlap * 43227682486bSRandy Dunlap * Returns: 0 for success, -Exxx on failure 432383342314SNick Piggin * 432483342314SNick Piggin * This function checks that addr is a valid vmalloc'ed area, and 432583342314SNick Piggin * that it is big enough to cover the vma. Will return failure if 432683342314SNick Piggin * that criteria isn't met. 432783342314SNick Piggin * 432872fd4a35SRobert P. J. Day * Similar to remap_pfn_range() (see mm/memory.c) 432983342314SNick Piggin */ 433083342314SNick Piggin int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 433183342314SNick Piggin unsigned long pgoff) 433283342314SNick Piggin { 4333e69e9d4aSHATAYAMA Daisuke return remap_vmalloc_range_partial(vma, vma->vm_start, 4334bdebd6a2SJann Horn addr, pgoff, 4335e69e9d4aSHATAYAMA Daisuke vma->vm_end - vma->vm_start); 433683342314SNick Piggin } 433783342314SNick Piggin EXPORT_SYMBOL(remap_vmalloc_range); 433883342314SNick Piggin 43395f4352fbSJeremy Fitzhardinge void free_vm_area(struct vm_struct *area) 43405f4352fbSJeremy Fitzhardinge { 43415f4352fbSJeremy Fitzhardinge struct vm_struct *ret; 43425f4352fbSJeremy Fitzhardinge ret = remove_vm_area(area->addr); 43435f4352fbSJeremy Fitzhardinge BUG_ON(ret != area); 43445f4352fbSJeremy Fitzhardinge kfree(area); 43455f4352fbSJeremy Fitzhardinge } 43465f4352fbSJeremy Fitzhardinge EXPORT_SYMBOL_GPL(free_vm_area); 4347a10aa579SChristoph Lameter 43484f8b02b4STejun Heo #ifdef CONFIG_SMP 4349ca23e405STejun Heo static struct vmap_area *node_to_va(struct rb_node *n) 4350ca23e405STejun Heo { 43514583e773SGeliang Tang return rb_entry_safe(n, struct vmap_area, rb_node); 4352ca23e405STejun Heo } 4353ca23e405STejun Heo 4354ca23e405STejun Heo /** 435568ad4a33SUladzislau Rezki (Sony) * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to 435668ad4a33SUladzislau Rezki (Sony) * @addr: target address 4357ca23e405STejun Heo * 435868ad4a33SUladzislau Rezki (Sony) * Returns: vmap_area if it is found. If there is no such area 435968ad4a33SUladzislau Rezki (Sony) * the first highest(reverse order) vmap_area is returned 436068ad4a33SUladzislau Rezki (Sony) * i.e. va->va_start < addr && va->va_end < addr or NULL 436168ad4a33SUladzislau Rezki (Sony) * if there are no any areas before @addr. 4362ca23e405STejun Heo */ 436368ad4a33SUladzislau Rezki (Sony) static struct vmap_area * 436468ad4a33SUladzislau Rezki (Sony) pvm_find_va_enclose_addr(unsigned long addr) 4365ca23e405STejun Heo { 436668ad4a33SUladzislau Rezki (Sony) struct vmap_area *va, *tmp; 436768ad4a33SUladzislau Rezki (Sony) struct rb_node *n; 436868ad4a33SUladzislau Rezki (Sony) 436968ad4a33SUladzislau Rezki (Sony) n = free_vmap_area_root.rb_node; 437068ad4a33SUladzislau Rezki (Sony) va = NULL; 4371ca23e405STejun Heo 4372ca23e405STejun Heo while (n) { 437368ad4a33SUladzislau Rezki (Sony) tmp = rb_entry(n, struct vmap_area, rb_node); 437468ad4a33SUladzislau Rezki (Sony) if (tmp->va_start <= addr) { 437568ad4a33SUladzislau Rezki (Sony) va = tmp; 437668ad4a33SUladzislau Rezki (Sony) if (tmp->va_end >= addr) 4377ca23e405STejun Heo break; 4378ca23e405STejun Heo 437968ad4a33SUladzislau Rezki (Sony) n = n->rb_right; 4380ca23e405STejun Heo } else { 438168ad4a33SUladzislau Rezki (Sony) n = n->rb_left; 4382ca23e405STejun Heo } 438368ad4a33SUladzislau Rezki (Sony) } 438468ad4a33SUladzislau Rezki (Sony) 438568ad4a33SUladzislau Rezki (Sony) return va; 4386ca23e405STejun Heo } 4387ca23e405STejun Heo 4388ca23e405STejun Heo /** 438968ad4a33SUladzislau Rezki (Sony) * pvm_determine_end_from_reverse - find the highest aligned address 439068ad4a33SUladzislau Rezki (Sony) * of free block below VMALLOC_END 439168ad4a33SUladzislau Rezki (Sony) * @va: 439268ad4a33SUladzislau Rezki (Sony) * in - the VA we start the search(reverse order); 439368ad4a33SUladzislau Rezki (Sony) * out - the VA with the highest aligned end address. 4394799fa85dSAlex Shi * @align: alignment for required highest address 4395ca23e405STejun Heo * 439668ad4a33SUladzislau Rezki (Sony) * Returns: determined end address within vmap_area 4397ca23e405STejun Heo */ 439868ad4a33SUladzislau Rezki (Sony) static unsigned long 439968ad4a33SUladzislau Rezki (Sony) pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) 4400ca23e405STejun Heo { 440168ad4a33SUladzislau Rezki (Sony) unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 4402ca23e405STejun Heo unsigned long addr; 4403ca23e405STejun Heo 440468ad4a33SUladzislau Rezki (Sony) if (likely(*va)) { 440568ad4a33SUladzislau Rezki (Sony) list_for_each_entry_from_reverse((*va), 440668ad4a33SUladzislau Rezki (Sony) &free_vmap_area_list, list) { 440768ad4a33SUladzislau Rezki (Sony) addr = min((*va)->va_end & ~(align - 1), vmalloc_end); 440868ad4a33SUladzislau Rezki (Sony) if ((*va)->va_start < addr) 440968ad4a33SUladzislau Rezki (Sony) return addr; 441068ad4a33SUladzislau Rezki (Sony) } 4411ca23e405STejun Heo } 4412ca23e405STejun Heo 441368ad4a33SUladzislau Rezki (Sony) return 0; 4414ca23e405STejun Heo } 4415ca23e405STejun Heo 4416ca23e405STejun Heo /** 4417ca23e405STejun Heo * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 4418ca23e405STejun Heo * @offsets: array containing offset of each area 4419ca23e405STejun Heo * @sizes: array containing size of each area 4420ca23e405STejun Heo * @nr_vms: the number of areas to allocate 4421ca23e405STejun Heo * @align: alignment, all entries in @offsets and @sizes must be aligned to this 4422ca23e405STejun Heo * 4423ca23e405STejun Heo * Returns: kmalloc'd vm_struct pointer array pointing to allocated 4424ca23e405STejun Heo * vm_structs on success, %NULL on failure 4425ca23e405STejun Heo * 4426ca23e405STejun Heo * Percpu allocator wants to use congruent vm areas so that it can 4427ca23e405STejun Heo * maintain the offsets among percpu areas. This function allocates 4428ec3f64fcSDavid Rientjes * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 4429ec3f64fcSDavid Rientjes * be scattered pretty far, distance between two areas easily going up 4430ec3f64fcSDavid Rientjes * to gigabytes. To avoid interacting with regular vmallocs, these 4431ec3f64fcSDavid Rientjes * areas are allocated from top. 4432ca23e405STejun Heo * 4433ca23e405STejun Heo * Despite its complicated look, this allocator is rather simple. It 443468ad4a33SUladzislau Rezki (Sony) * does everything top-down and scans free blocks from the end looking 443568ad4a33SUladzislau Rezki (Sony) * for matching base. While scanning, if any of the areas do not fit the 443668ad4a33SUladzislau Rezki (Sony) * base address is pulled down to fit the area. Scanning is repeated till 443768ad4a33SUladzislau Rezki (Sony) * all the areas fit and then all necessary data structures are inserted 443868ad4a33SUladzislau Rezki (Sony) * and the result is returned. 4439ca23e405STejun Heo */ 4440ca23e405STejun Heo struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 4441ca23e405STejun Heo const size_t *sizes, int nr_vms, 4442ec3f64fcSDavid Rientjes size_t align) 4443ca23e405STejun Heo { 4444ca23e405STejun Heo const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 4445ca23e405STejun Heo const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 444668ad4a33SUladzislau Rezki (Sony) struct vmap_area **vas, *va; 4447ca23e405STejun Heo struct vm_struct **vms; 4448ca23e405STejun Heo int area, area2, last_area, term_area; 4449253a496dSDaniel Axtens unsigned long base, start, size, end, last_end, orig_start, orig_end; 4450ca23e405STejun Heo bool purged = false; 4451ca23e405STejun Heo 4452ca23e405STejun Heo /* verify parameters and allocate data structures */ 4453891c49abSAlexander Kuleshov BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 4454ca23e405STejun Heo for (last_area = 0, area = 0; area < nr_vms; area++) { 4455ca23e405STejun Heo start = offsets[area]; 4456ca23e405STejun Heo end = start + sizes[area]; 4457ca23e405STejun Heo 4458ca23e405STejun Heo /* is everything aligned properly? */ 4459ca23e405STejun Heo BUG_ON(!IS_ALIGNED(offsets[area], align)); 4460ca23e405STejun Heo BUG_ON(!IS_ALIGNED(sizes[area], align)); 4461ca23e405STejun Heo 4462ca23e405STejun Heo /* detect the area with the highest address */ 4463ca23e405STejun Heo if (start > offsets[last_area]) 4464ca23e405STejun Heo last_area = area; 4465ca23e405STejun Heo 4466c568da28SWei Yang for (area2 = area + 1; area2 < nr_vms; area2++) { 4467ca23e405STejun Heo unsigned long start2 = offsets[area2]; 4468ca23e405STejun Heo unsigned long end2 = start2 + sizes[area2]; 4469ca23e405STejun Heo 4470c568da28SWei Yang BUG_ON(start2 < end && start < end2); 4471ca23e405STejun Heo } 4472ca23e405STejun Heo } 4473ca23e405STejun Heo last_end = offsets[last_area] + sizes[last_area]; 4474ca23e405STejun Heo 4475ca23e405STejun Heo if (vmalloc_end - vmalloc_start < last_end) { 4476ca23e405STejun Heo WARN_ON(true); 4477ca23e405STejun Heo return NULL; 4478ca23e405STejun Heo } 4479ca23e405STejun Heo 44804d67d860SThomas Meyer vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 44814d67d860SThomas Meyer vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 4482ca23e405STejun Heo if (!vas || !vms) 4483f1db7afdSKautuk Consul goto err_free2; 4484ca23e405STejun Heo 4485ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 448668ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); 4487ec3f64fcSDavid Rientjes vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 4488ca23e405STejun Heo if (!vas[area] || !vms[area]) 4489ca23e405STejun Heo goto err_free; 4490ca23e405STejun Heo } 4491ca23e405STejun Heo retry: 4492e36176beSUladzislau Rezki (Sony) spin_lock(&free_vmap_area_lock); 4493ca23e405STejun Heo 4494ca23e405STejun Heo /* start scanning - we scan from the top, begin with the last area */ 4495ca23e405STejun Heo area = term_area = last_area; 4496ca23e405STejun Heo start = offsets[area]; 4497ca23e405STejun Heo end = start + sizes[area]; 4498ca23e405STejun Heo 449968ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(vmalloc_end); 450068ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end; 4501ca23e405STejun Heo 4502ca23e405STejun Heo while (true) { 4503ca23e405STejun Heo /* 4504ca23e405STejun Heo * base might have underflowed, add last_end before 4505ca23e405STejun Heo * comparing. 4506ca23e405STejun Heo */ 450768ad4a33SUladzislau Rezki (Sony) if (base + last_end < vmalloc_start + last_end) 450868ad4a33SUladzislau Rezki (Sony) goto overflow; 4509ca23e405STejun Heo 4510ca23e405STejun Heo /* 451168ad4a33SUladzislau Rezki (Sony) * Fitting base has not been found. 4512ca23e405STejun Heo */ 451368ad4a33SUladzislau Rezki (Sony) if (va == NULL) 451468ad4a33SUladzislau Rezki (Sony) goto overflow; 4515ca23e405STejun Heo 4516ca23e405STejun Heo /* 4517d8cc323dSQiujun Huang * If required width exceeds current VA block, move 45185336e52cSKuppuswamy Sathyanarayanan * base downwards and then recheck. 45195336e52cSKuppuswamy Sathyanarayanan */ 45205336e52cSKuppuswamy Sathyanarayanan if (base + end > va->va_end) { 45215336e52cSKuppuswamy Sathyanarayanan base = pvm_determine_end_from_reverse(&va, align) - end; 45225336e52cSKuppuswamy Sathyanarayanan term_area = area; 45235336e52cSKuppuswamy Sathyanarayanan continue; 45245336e52cSKuppuswamy Sathyanarayanan } 45255336e52cSKuppuswamy Sathyanarayanan 45265336e52cSKuppuswamy Sathyanarayanan /* 452768ad4a33SUladzislau Rezki (Sony) * If this VA does not fit, move base downwards and recheck. 4528ca23e405STejun Heo */ 45295336e52cSKuppuswamy Sathyanarayanan if (base + start < va->va_start) { 453068ad4a33SUladzislau Rezki (Sony) va = node_to_va(rb_prev(&va->rb_node)); 453168ad4a33SUladzislau Rezki (Sony) base = pvm_determine_end_from_reverse(&va, align) - end; 4532ca23e405STejun Heo term_area = area; 4533ca23e405STejun Heo continue; 4534ca23e405STejun Heo } 4535ca23e405STejun Heo 4536ca23e405STejun Heo /* 4537ca23e405STejun Heo * This area fits, move on to the previous one. If 4538ca23e405STejun Heo * the previous one is the terminal one, we're done. 4539ca23e405STejun Heo */ 4540ca23e405STejun Heo area = (area + nr_vms - 1) % nr_vms; 4541ca23e405STejun Heo if (area == term_area) 4542ca23e405STejun Heo break; 454368ad4a33SUladzislau Rezki (Sony) 4544ca23e405STejun Heo start = offsets[area]; 4545ca23e405STejun Heo end = start + sizes[area]; 454668ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(base + end); 4547ca23e405STejun Heo } 454868ad4a33SUladzislau Rezki (Sony) 4549ca23e405STejun Heo /* we've found a fitting base, insert all va's */ 4550ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 455168ad4a33SUladzislau Rezki (Sony) int ret; 4552ca23e405STejun Heo 455368ad4a33SUladzislau Rezki (Sony) start = base + offsets[area]; 455468ad4a33SUladzislau Rezki (Sony) size = sizes[area]; 455568ad4a33SUladzislau Rezki (Sony) 455668ad4a33SUladzislau Rezki (Sony) va = pvm_find_va_enclose_addr(start); 455768ad4a33SUladzislau Rezki (Sony) if (WARN_ON_ONCE(va == NULL)) 455868ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */ 455968ad4a33SUladzislau Rezki (Sony) goto recovery; 456068ad4a33SUladzislau Rezki (Sony) 45615b75b8e1SUladzislau Rezki (Sony) ret = va_clip(&free_vmap_area_root, 45625b75b8e1SUladzislau Rezki (Sony) &free_vmap_area_list, va, start, size); 45631b23ff80SBaoquan He if (WARN_ON_ONCE(unlikely(ret))) 456468ad4a33SUladzislau Rezki (Sony) /* It is a BUG(), but trigger recovery instead. */ 456568ad4a33SUladzislau Rezki (Sony) goto recovery; 456668ad4a33SUladzislau Rezki (Sony) 456768ad4a33SUladzislau Rezki (Sony) /* Allocated area. */ 456868ad4a33SUladzislau Rezki (Sony) va = vas[area]; 456968ad4a33SUladzislau Rezki (Sony) va->va_start = start; 457068ad4a33SUladzislau Rezki (Sony) va->va_end = start + size; 4571ca23e405STejun Heo } 4572ca23e405STejun Heo 4573e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 4574ca23e405STejun Heo 4575253a496dSDaniel Axtens /* populate the kasan shadow space */ 4576253a496dSDaniel Axtens for (area = 0; area < nr_vms; area++) { 4577253a496dSDaniel Axtens if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) 4578253a496dSDaniel Axtens goto err_free_shadow; 4579253a496dSDaniel Axtens } 4580253a496dSDaniel Axtens 4581ca23e405STejun Heo /* insert all vm's */ 4582e36176beSUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) { 4583d0936029SUladzislau Rezki (Sony) struct vmap_node *vn = addr_to_node(vas[area]->va_start); 4584e36176beSUladzislau Rezki (Sony) 4585d0936029SUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 4586d0936029SUladzislau Rezki (Sony) insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head); 4587e36176beSUladzislau Rezki (Sony) setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, 4588ca23e405STejun Heo pcpu_get_vm_areas); 4589d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 4590e36176beSUladzislau Rezki (Sony) } 4591ca23e405STejun Heo 459219f1c3acSAndrey Konovalov /* 459319f1c3acSAndrey Konovalov * Mark allocated areas as accessible. Do it now as a best-effort 459419f1c3acSAndrey Konovalov * approach, as they can be mapped outside of vmalloc code. 459523689e91SAndrey Konovalov * With hardware tag-based KASAN, marking is skipped for 459623689e91SAndrey Konovalov * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 459719f1c3acSAndrey Konovalov */ 45981d96320fSAndrey Konovalov for (area = 0; area < nr_vms; area++) 45991d96320fSAndrey Konovalov vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, 4600f6e39794SAndrey Konovalov vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); 46011d96320fSAndrey Konovalov 4602ca23e405STejun Heo kfree(vas); 4603ca23e405STejun Heo return vms; 4604ca23e405STejun Heo 460568ad4a33SUladzislau Rezki (Sony) recovery: 4606e36176beSUladzislau Rezki (Sony) /* 4607e36176beSUladzislau Rezki (Sony) * Remove previously allocated areas. There is no 4608e36176beSUladzislau Rezki (Sony) * need in removing these areas from the busy tree, 4609e36176beSUladzislau Rezki (Sony) * because they are inserted only on the final step 4610e36176beSUladzislau Rezki (Sony) * and when pcpu_get_vm_areas() is success. 4611e36176beSUladzislau Rezki (Sony) */ 461268ad4a33SUladzislau Rezki (Sony) while (area--) { 4613253a496dSDaniel Axtens orig_start = vas[area]->va_start; 4614253a496dSDaniel Axtens orig_end = vas[area]->va_end; 461596e2db45SUladzislau Rezki (Sony) va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 46163c5c3cfbSDaniel Axtens &free_vmap_area_list); 46179c801f61SUladzislau Rezki (Sony) if (va) 4618253a496dSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 4619253a496dSDaniel Axtens va->va_start, va->va_end); 462068ad4a33SUladzislau Rezki (Sony) vas[area] = NULL; 462168ad4a33SUladzislau Rezki (Sony) } 462268ad4a33SUladzislau Rezki (Sony) 462368ad4a33SUladzislau Rezki (Sony) overflow: 4624e36176beSUladzislau Rezki (Sony) spin_unlock(&free_vmap_area_lock); 462568ad4a33SUladzislau Rezki (Sony) if (!purged) { 462677e50af0SThomas Gleixner reclaim_and_purge_vmap_areas(); 462768ad4a33SUladzislau Rezki (Sony) purged = true; 462868ad4a33SUladzislau Rezki (Sony) 462968ad4a33SUladzislau Rezki (Sony) /* Before "retry", check if we recover. */ 463068ad4a33SUladzislau Rezki (Sony) for (area = 0; area < nr_vms; area++) { 463168ad4a33SUladzislau Rezki (Sony) if (vas[area]) 463268ad4a33SUladzislau Rezki (Sony) continue; 463368ad4a33SUladzislau Rezki (Sony) 463468ad4a33SUladzislau Rezki (Sony) vas[area] = kmem_cache_zalloc( 463568ad4a33SUladzislau Rezki (Sony) vmap_area_cachep, GFP_KERNEL); 463668ad4a33SUladzislau Rezki (Sony) if (!vas[area]) 463768ad4a33SUladzislau Rezki (Sony) goto err_free; 463868ad4a33SUladzislau Rezki (Sony) } 463968ad4a33SUladzislau Rezki (Sony) 464068ad4a33SUladzislau Rezki (Sony) goto retry; 464168ad4a33SUladzislau Rezki (Sony) } 464268ad4a33SUladzislau Rezki (Sony) 4643ca23e405STejun Heo err_free: 4644ca23e405STejun Heo for (area = 0; area < nr_vms; area++) { 464568ad4a33SUladzislau Rezki (Sony) if (vas[area]) 464668ad4a33SUladzislau Rezki (Sony) kmem_cache_free(vmap_area_cachep, vas[area]); 464768ad4a33SUladzislau Rezki (Sony) 4648ca23e405STejun Heo kfree(vms[area]); 4649ca23e405STejun Heo } 4650f1db7afdSKautuk Consul err_free2: 4651ca23e405STejun Heo kfree(vas); 4652ca23e405STejun Heo kfree(vms); 4653ca23e405STejun Heo return NULL; 4654253a496dSDaniel Axtens 4655253a496dSDaniel Axtens err_free_shadow: 4656253a496dSDaniel Axtens spin_lock(&free_vmap_area_lock); 4657253a496dSDaniel Axtens /* 4658253a496dSDaniel Axtens * We release all the vmalloc shadows, even the ones for regions that 4659253a496dSDaniel Axtens * hadn't been successfully added. This relies on kasan_release_vmalloc 4660253a496dSDaniel Axtens * being able to tolerate this case. 4661253a496dSDaniel Axtens */ 4662253a496dSDaniel Axtens for (area = 0; area < nr_vms; area++) { 4663253a496dSDaniel Axtens orig_start = vas[area]->va_start; 4664253a496dSDaniel Axtens orig_end = vas[area]->va_end; 466596e2db45SUladzislau Rezki (Sony) va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 4666253a496dSDaniel Axtens &free_vmap_area_list); 46679c801f61SUladzislau Rezki (Sony) if (va) 4668253a496dSDaniel Axtens kasan_release_vmalloc(orig_start, orig_end, 4669253a496dSDaniel Axtens va->va_start, va->va_end); 4670253a496dSDaniel Axtens vas[area] = NULL; 4671253a496dSDaniel Axtens kfree(vms[area]); 4672253a496dSDaniel Axtens } 4673253a496dSDaniel Axtens spin_unlock(&free_vmap_area_lock); 4674253a496dSDaniel Axtens kfree(vas); 4675253a496dSDaniel Axtens kfree(vms); 4676253a496dSDaniel Axtens return NULL; 4677ca23e405STejun Heo } 4678ca23e405STejun Heo 4679ca23e405STejun Heo /** 4680ca23e405STejun Heo * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 4681ca23e405STejun Heo * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 4682ca23e405STejun Heo * @nr_vms: the number of allocated areas 4683ca23e405STejun Heo * 4684ca23e405STejun Heo * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 4685ca23e405STejun Heo */ 4686ca23e405STejun Heo void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 4687ca23e405STejun Heo { 4688ca23e405STejun Heo int i; 4689ca23e405STejun Heo 4690ca23e405STejun Heo for (i = 0; i < nr_vms; i++) 4691ca23e405STejun Heo free_vm_area(vms[i]); 4692ca23e405STejun Heo kfree(vms); 4693ca23e405STejun Heo } 46944f8b02b4STejun Heo #endif /* CONFIG_SMP */ 4695a10aa579SChristoph Lameter 46965bb1bb35SPaul E. McKenney #ifdef CONFIG_PRINTK 469798f18083SPaul E. McKenney bool vmalloc_dump_obj(void *object) 469898f18083SPaul E. McKenney { 469998f18083SPaul E. McKenney void *objp = (void *)PAGE_ALIGN((unsigned long)object); 47000818e739SJoel Fernandes (Google) const void *caller; 47010818e739SJoel Fernandes (Google) struct vmap_area *va; 4702d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 47030818e739SJoel Fernandes (Google) unsigned long addr; 47040818e739SJoel Fernandes (Google) unsigned int nr_pages; 4705d0936029SUladzislau Rezki (Sony) bool success = false; 470698f18083SPaul E. McKenney 4707d0936029SUladzislau Rezki (Sony) vn = addr_to_node((unsigned long)objp); 4708d0936029SUladzislau Rezki (Sony) 4709d0936029SUladzislau Rezki (Sony) if (spin_trylock(&vn->busy.lock)) { 4710d0936029SUladzislau Rezki (Sony) va = __find_vmap_area((unsigned long)objp, &vn->busy.root); 4711d0936029SUladzislau Rezki (Sony) 4712d0936029SUladzislau Rezki (Sony) if (va && va->vm) { 4713d0936029SUladzislau Rezki (Sony) addr = (unsigned long)va->vm->addr; 4714d0936029SUladzislau Rezki (Sony) caller = va->vm->caller; 4715d0936029SUladzislau Rezki (Sony) nr_pages = va->vm->nr_pages; 4716d0936029SUladzislau Rezki (Sony) success = true; 47170818e739SJoel Fernandes (Google) } 47180818e739SJoel Fernandes (Google) 4719d0936029SUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 47200818e739SJoel Fernandes (Google) } 4721d0936029SUladzislau Rezki (Sony) 4722d0936029SUladzislau Rezki (Sony) if (success) 4723bd34dcd4SPaul E. McKenney pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n", 47240818e739SJoel Fernandes (Google) nr_pages, addr, caller); 4725d0936029SUladzislau Rezki (Sony) 4726d0936029SUladzislau Rezki (Sony) return success; 472798f18083SPaul E. McKenney } 47285bb1bb35SPaul E. McKenney #endif 472998f18083SPaul E. McKenney 4730a10aa579SChristoph Lameter #ifdef CONFIG_PROC_FS 4731a47a126aSEric Dumazet static void show_numa_info(struct seq_file *m, struct vm_struct *v) 4732a47a126aSEric Dumazet { 4733e5adfffcSKirill A. Shutemov if (IS_ENABLED(CONFIG_NUMA)) { 4734a47a126aSEric Dumazet unsigned int nr, *counters = m->private; 473551e50b3aSEric Dumazet unsigned int step = 1U << vm_area_page_order(v); 4736a47a126aSEric Dumazet 4737a47a126aSEric Dumazet if (!counters) 4738a47a126aSEric Dumazet return; 4739a47a126aSEric Dumazet 4740af12346cSWanpeng Li if (v->flags & VM_UNINITIALIZED) 4741af12346cSWanpeng Li return; 47427e5b528bSDmitry Vyukov /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 47437e5b528bSDmitry Vyukov smp_rmb(); 4744af12346cSWanpeng Li 4745a47a126aSEric Dumazet memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 4746a47a126aSEric Dumazet 474751e50b3aSEric Dumazet for (nr = 0; nr < v->nr_pages; nr += step) 474851e50b3aSEric Dumazet counters[page_to_nid(v->pages[nr])] += step; 4749a47a126aSEric Dumazet for_each_node_state(nr, N_HIGH_MEMORY) 4750a47a126aSEric Dumazet if (counters[nr]) 4751a47a126aSEric Dumazet seq_printf(m, " N%u=%u", nr, counters[nr]); 4752a47a126aSEric Dumazet } 4753a47a126aSEric Dumazet } 4754a47a126aSEric Dumazet 4755dd3b8353SUladzislau Rezki (Sony) static void show_purge_info(struct seq_file *m) 4756dd3b8353SUladzislau Rezki (Sony) { 4757282631cbSUladzislau Rezki (Sony) struct vmap_node *vn; 4758dd3b8353SUladzislau Rezki (Sony) struct vmap_area *va; 4759282631cbSUladzislau Rezki (Sony) int i; 4760dd3b8353SUladzislau Rezki (Sony) 4761282631cbSUladzislau Rezki (Sony) for (i = 0; i < nr_vmap_nodes; i++) { 4762282631cbSUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 4763282631cbSUladzislau Rezki (Sony) 4764282631cbSUladzislau Rezki (Sony) spin_lock(&vn->lazy.lock); 4765282631cbSUladzislau Rezki (Sony) list_for_each_entry(va, &vn->lazy.head, list) { 4766dd3b8353SUladzislau Rezki (Sony) seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", 4767dd3b8353SUladzislau Rezki (Sony) (void *)va->va_start, (void *)va->va_end, 4768dd3b8353SUladzislau Rezki (Sony) va->va_end - va->va_start); 4769dd3b8353SUladzislau Rezki (Sony) } 4770282631cbSUladzislau Rezki (Sony) spin_unlock(&vn->lazy.lock); 4771282631cbSUladzislau Rezki (Sony) } 4772dd3b8353SUladzislau Rezki (Sony) } 4773dd3b8353SUladzislau Rezki (Sony) 47748e1d743fSUladzislau Rezki (Sony) static int vmalloc_info_show(struct seq_file *m, void *p) 4775a10aa579SChristoph Lameter { 4776d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 47773f500069Szijun_hu struct vmap_area *va; 4778d4033afdSJoonsoo Kim struct vm_struct *v; 47798e1d743fSUladzislau Rezki (Sony) int i; 4780d4033afdSJoonsoo Kim 47818e1d743fSUladzislau Rezki (Sony) for (i = 0; i < nr_vmap_nodes; i++) { 47828e1d743fSUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 47833f500069Szijun_hu 47848e1d743fSUladzislau Rezki (Sony) spin_lock(&vn->busy.lock); 47858e1d743fSUladzislau Rezki (Sony) list_for_each_entry(va, &vn->busy.head, list) { 4786688fcbfcSPengfei Li if (!va->vm) { 4787bba9697bSBaoquan He if (va->flags & VMAP_RAM) 4788dd3b8353SUladzislau Rezki (Sony) seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", 478978c72746SYisheng Xie (void *)va->va_start, (void *)va->va_end, 4790dd3b8353SUladzislau Rezki (Sony) va->va_end - va->va_start); 479178c72746SYisheng Xie 47928e1d743fSUladzislau Rezki (Sony) continue; 479378c72746SYisheng Xie } 4794d4033afdSJoonsoo Kim 4795d4033afdSJoonsoo Kim v = va->vm; 4796a10aa579SChristoph Lameter 479745ec1690SKees Cook seq_printf(m, "0x%pK-0x%pK %7ld", 4798a10aa579SChristoph Lameter v->addr, v->addr + v->size, v->size); 4799a10aa579SChristoph Lameter 480062c70bceSJoe Perches if (v->caller) 480162c70bceSJoe Perches seq_printf(m, " %pS", v->caller); 480223016969SChristoph Lameter 4803a10aa579SChristoph Lameter if (v->nr_pages) 4804a10aa579SChristoph Lameter seq_printf(m, " pages=%d", v->nr_pages); 4805a10aa579SChristoph Lameter 4806a10aa579SChristoph Lameter if (v->phys_addr) 4807199eaa05SMiles Chen seq_printf(m, " phys=%pa", &v->phys_addr); 4808a10aa579SChristoph Lameter 4809a10aa579SChristoph Lameter if (v->flags & VM_IOREMAP) 4810f4527c90SFabian Frederick seq_puts(m, " ioremap"); 4811a10aa579SChristoph Lameter 4812a10aa579SChristoph Lameter if (v->flags & VM_ALLOC) 4813f4527c90SFabian Frederick seq_puts(m, " vmalloc"); 4814a10aa579SChristoph Lameter 4815a10aa579SChristoph Lameter if (v->flags & VM_MAP) 4816f4527c90SFabian Frederick seq_puts(m, " vmap"); 4817a10aa579SChristoph Lameter 4818a10aa579SChristoph Lameter if (v->flags & VM_USERMAP) 4819f4527c90SFabian Frederick seq_puts(m, " user"); 4820a10aa579SChristoph Lameter 4821fe9041c2SChristoph Hellwig if (v->flags & VM_DMA_COHERENT) 4822fe9041c2SChristoph Hellwig seq_puts(m, " dma-coherent"); 4823fe9041c2SChristoph Hellwig 4824244d63eeSDavid Rientjes if (is_vmalloc_addr(v->pages)) 4825f4527c90SFabian Frederick seq_puts(m, " vpages"); 4826a10aa579SChristoph Lameter 4827a47a126aSEric Dumazet show_numa_info(m, v); 4828a10aa579SChristoph Lameter seq_putc(m, '\n'); 48298e1d743fSUladzislau Rezki (Sony) } 48308e1d743fSUladzislau Rezki (Sony) spin_unlock(&vn->busy.lock); 48318e1d743fSUladzislau Rezki (Sony) } 4832dd3b8353SUladzislau Rezki (Sony) 4833dd3b8353SUladzislau Rezki (Sony) /* 483496e2db45SUladzislau Rezki (Sony) * As a final step, dump "unpurged" areas. 4835dd3b8353SUladzislau Rezki (Sony) */ 4836dd3b8353SUladzislau Rezki (Sony) show_purge_info(m); 4837a10aa579SChristoph Lameter return 0; 4838a10aa579SChristoph Lameter } 4839a10aa579SChristoph Lameter 48405f6a6a9cSAlexey Dobriyan static int __init proc_vmalloc_init(void) 48415f6a6a9cSAlexey Dobriyan { 48428e1d743fSUladzislau Rezki (Sony) void *priv_data = NULL; 48438e1d743fSUladzislau Rezki (Sony) 4844fddda2b7SChristoph Hellwig if (IS_ENABLED(CONFIG_NUMA)) 48458e1d743fSUladzislau Rezki (Sony) priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); 48468e1d743fSUladzislau Rezki (Sony) 48478e1d743fSUladzislau Rezki (Sony) proc_create_single_data("vmallocinfo", 48488e1d743fSUladzislau Rezki (Sony) 0400, NULL, vmalloc_info_show, priv_data); 48498e1d743fSUladzislau Rezki (Sony) 48505f6a6a9cSAlexey Dobriyan return 0; 48515f6a6a9cSAlexey Dobriyan } 48525f6a6a9cSAlexey Dobriyan module_init(proc_vmalloc_init); 4853db3808c1SJoonsoo Kim 4854a10aa579SChristoph Lameter #endif 4855208162f4SChristoph Hellwig 4856d0936029SUladzislau Rezki (Sony) static void __init vmap_init_free_space(void) 48577fa8cee0SUladzislau Rezki (Sony) { 48587fa8cee0SUladzislau Rezki (Sony) unsigned long vmap_start = 1; 48597fa8cee0SUladzislau Rezki (Sony) const unsigned long vmap_end = ULONG_MAX; 4860d0936029SUladzislau Rezki (Sony) struct vmap_area *free; 4861d0936029SUladzislau Rezki (Sony) struct vm_struct *busy; 48627fa8cee0SUladzislau Rezki (Sony) 48637fa8cee0SUladzislau Rezki (Sony) /* 48647fa8cee0SUladzislau Rezki (Sony) * B F B B B F 48657fa8cee0SUladzislau Rezki (Sony) * -|-----|.....|-----|-----|-----|.....|- 48667fa8cee0SUladzislau Rezki (Sony) * | The KVA space | 48677fa8cee0SUladzislau Rezki (Sony) * |<--------------------------------->| 48687fa8cee0SUladzislau Rezki (Sony) */ 4869d0936029SUladzislau Rezki (Sony) for (busy = vmlist; busy; busy = busy->next) { 4870d0936029SUladzislau Rezki (Sony) if ((unsigned long) busy->addr - vmap_start > 0) { 48717fa8cee0SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 48727fa8cee0SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) { 48737fa8cee0SUladzislau Rezki (Sony) free->va_start = vmap_start; 4874d0936029SUladzislau Rezki (Sony) free->va_end = (unsigned long) busy->addr; 48757fa8cee0SUladzislau Rezki (Sony) 48767fa8cee0SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL, 48777fa8cee0SUladzislau Rezki (Sony) &free_vmap_area_root, 48787fa8cee0SUladzislau Rezki (Sony) &free_vmap_area_list); 48797fa8cee0SUladzislau Rezki (Sony) } 48807fa8cee0SUladzislau Rezki (Sony) } 48817fa8cee0SUladzislau Rezki (Sony) 4882d0936029SUladzislau Rezki (Sony) vmap_start = (unsigned long) busy->addr + busy->size; 48837fa8cee0SUladzislau Rezki (Sony) } 48847fa8cee0SUladzislau Rezki (Sony) 48857fa8cee0SUladzislau Rezki (Sony) if (vmap_end - vmap_start > 0) { 48867fa8cee0SUladzislau Rezki (Sony) free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 48877fa8cee0SUladzislau Rezki (Sony) if (!WARN_ON_ONCE(!free)) { 48887fa8cee0SUladzislau Rezki (Sony) free->va_start = vmap_start; 48897fa8cee0SUladzislau Rezki (Sony) free->va_end = vmap_end; 48907fa8cee0SUladzislau Rezki (Sony) 48917fa8cee0SUladzislau Rezki (Sony) insert_vmap_area_augment(free, NULL, 48927fa8cee0SUladzislau Rezki (Sony) &free_vmap_area_root, 48937fa8cee0SUladzislau Rezki (Sony) &free_vmap_area_list); 48947fa8cee0SUladzislau Rezki (Sony) } 48957fa8cee0SUladzislau Rezki (Sony) } 48967fa8cee0SUladzislau Rezki (Sony) } 48977fa8cee0SUladzislau Rezki (Sony) 4898d0936029SUladzislau Rezki (Sony) static void vmap_init_nodes(void) 4899d0936029SUladzislau Rezki (Sony) { 4900d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 49018f33a2ffSUladzislau Rezki (Sony) int i, n; 4902d0936029SUladzislau Rezki (Sony) 49038f33a2ffSUladzislau Rezki (Sony) #if BITS_PER_LONG == 64 4904*15e02a39SUladzislau Rezki (Sony) /* 4905*15e02a39SUladzislau Rezki (Sony) * A high threshold of max nodes is fixed and bound to 128, 4906*15e02a39SUladzislau Rezki (Sony) * thus a scale factor is 1 for systems where number of cores 4907*15e02a39SUladzislau Rezki (Sony) * are less or equal to specified threshold. 4908*15e02a39SUladzislau Rezki (Sony) * 4909*15e02a39SUladzislau Rezki (Sony) * As for NUMA-aware notes. For bigger systems, for example 4910*15e02a39SUladzislau Rezki (Sony) * NUMA with multi-sockets, where we can end-up with thousands 4911*15e02a39SUladzislau Rezki (Sony) * of cores in total, a "sub-numa-clustering" should be added. 4912*15e02a39SUladzislau Rezki (Sony) * 4913*15e02a39SUladzislau Rezki (Sony) * In this case a NUMA domain is considered as a single entity 4914*15e02a39SUladzislau Rezki (Sony) * with dedicated sub-nodes in it which describe one group or 4915*15e02a39SUladzislau Rezki (Sony) * set of cores. Therefore a per-domain purging is supposed to 4916*15e02a39SUladzislau Rezki (Sony) * be added as well as a per-domain balancing. 4917*15e02a39SUladzislau Rezki (Sony) */ 49188f33a2ffSUladzislau Rezki (Sony) n = clamp_t(unsigned int, num_possible_cpus(), 1, 128); 49198f33a2ffSUladzislau Rezki (Sony) 49208f33a2ffSUladzislau Rezki (Sony) if (n > 1) { 49218f33a2ffSUladzislau Rezki (Sony) vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN); 49228f33a2ffSUladzislau Rezki (Sony) if (vn) { 49238f33a2ffSUladzislau Rezki (Sony) /* Node partition is 16 pages. */ 49248f33a2ffSUladzislau Rezki (Sony) vmap_zone_size = (1 << 4) * PAGE_SIZE; 49258f33a2ffSUladzislau Rezki (Sony) nr_vmap_nodes = n; 49268f33a2ffSUladzislau Rezki (Sony) vmap_nodes = vn; 49278f33a2ffSUladzislau Rezki (Sony) } else { 49288f33a2ffSUladzislau Rezki (Sony) pr_err("Failed to allocate an array. Disable a node layer\n"); 49298f33a2ffSUladzislau Rezki (Sony) } 49308f33a2ffSUladzislau Rezki (Sony) } 49318f33a2ffSUladzislau Rezki (Sony) #endif 49328f33a2ffSUladzislau Rezki (Sony) 49338f33a2ffSUladzislau Rezki (Sony) for (n = 0; n < nr_vmap_nodes; n++) { 49348f33a2ffSUladzislau Rezki (Sony) vn = &vmap_nodes[n]; 4935d0936029SUladzislau Rezki (Sony) vn->busy.root = RB_ROOT; 4936d0936029SUladzislau Rezki (Sony) INIT_LIST_HEAD(&vn->busy.head); 4937d0936029SUladzislau Rezki (Sony) spin_lock_init(&vn->busy.lock); 4938282631cbSUladzislau Rezki (Sony) 4939282631cbSUladzislau Rezki (Sony) vn->lazy.root = RB_ROOT; 4940282631cbSUladzislau Rezki (Sony) INIT_LIST_HEAD(&vn->lazy.head); 4941282631cbSUladzislau Rezki (Sony) spin_lock_init(&vn->lazy.lock); 494272210662SUladzislau Rezki (Sony) 49438f33a2ffSUladzislau Rezki (Sony) for (i = 0; i < MAX_VA_SIZE_PAGES; i++) { 49448f33a2ffSUladzislau Rezki (Sony) INIT_LIST_HEAD(&vn->pool[i].head); 49458f33a2ffSUladzislau Rezki (Sony) WRITE_ONCE(vn->pool[i].len, 0); 494672210662SUladzislau Rezki (Sony) } 494772210662SUladzislau Rezki (Sony) 494872210662SUladzislau Rezki (Sony) spin_lock_init(&vn->pool_lock); 4949d0936029SUladzislau Rezki (Sony) } 4950d0936029SUladzislau Rezki (Sony) } 4951d0936029SUladzislau Rezki (Sony) 49527679ba6bSUladzislau Rezki (Sony) static unsigned long 49537679ba6bSUladzislau Rezki (Sony) vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 49547679ba6bSUladzislau Rezki (Sony) { 49557679ba6bSUladzislau Rezki (Sony) unsigned long count; 49567679ba6bSUladzislau Rezki (Sony) struct vmap_node *vn; 49577679ba6bSUladzislau Rezki (Sony) int i, j; 49587679ba6bSUladzislau Rezki (Sony) 49597679ba6bSUladzislau Rezki (Sony) for (count = 0, i = 0; i < nr_vmap_nodes; i++) { 49607679ba6bSUladzislau Rezki (Sony) vn = &vmap_nodes[i]; 49617679ba6bSUladzislau Rezki (Sony) 49627679ba6bSUladzislau Rezki (Sony) for (j = 0; j < MAX_VA_SIZE_PAGES; j++) 49637679ba6bSUladzislau Rezki (Sony) count += READ_ONCE(vn->pool[j].len); 49647679ba6bSUladzislau Rezki (Sony) } 49657679ba6bSUladzislau Rezki (Sony) 49667679ba6bSUladzislau Rezki (Sony) return count ? count : SHRINK_EMPTY; 49677679ba6bSUladzislau Rezki (Sony) } 49687679ba6bSUladzislau Rezki (Sony) 49697679ba6bSUladzislau Rezki (Sony) static unsigned long 49707679ba6bSUladzislau Rezki (Sony) vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 49717679ba6bSUladzislau Rezki (Sony) { 49727679ba6bSUladzislau Rezki (Sony) int i; 49737679ba6bSUladzislau Rezki (Sony) 49747679ba6bSUladzislau Rezki (Sony) for (i = 0; i < nr_vmap_nodes; i++) 49757679ba6bSUladzislau Rezki (Sony) decay_va_pool_node(&vmap_nodes[i], true); 49767679ba6bSUladzislau Rezki (Sony) 49777679ba6bSUladzislau Rezki (Sony) return SHRINK_STOP; 49787679ba6bSUladzislau Rezki (Sony) } 49797679ba6bSUladzislau Rezki (Sony) 4980208162f4SChristoph Hellwig void __init vmalloc_init(void) 4981208162f4SChristoph Hellwig { 49827679ba6bSUladzislau Rezki (Sony) struct shrinker *vmap_node_shrinker; 4983208162f4SChristoph Hellwig struct vmap_area *va; 4984d0936029SUladzislau Rezki (Sony) struct vmap_node *vn; 4985208162f4SChristoph Hellwig struct vm_struct *tmp; 4986208162f4SChristoph Hellwig int i; 4987208162f4SChristoph Hellwig 4988208162f4SChristoph Hellwig /* 4989208162f4SChristoph Hellwig * Create the cache for vmap_area objects. 4990208162f4SChristoph Hellwig */ 4991208162f4SChristoph Hellwig vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); 4992208162f4SChristoph Hellwig 4993208162f4SChristoph Hellwig for_each_possible_cpu(i) { 4994208162f4SChristoph Hellwig struct vmap_block_queue *vbq; 4995208162f4SChristoph Hellwig struct vfree_deferred *p; 4996208162f4SChristoph Hellwig 4997208162f4SChristoph Hellwig vbq = &per_cpu(vmap_block_queue, i); 4998208162f4SChristoph Hellwig spin_lock_init(&vbq->lock); 4999208162f4SChristoph Hellwig INIT_LIST_HEAD(&vbq->free); 5000208162f4SChristoph Hellwig p = &per_cpu(vfree_deferred, i); 5001208162f4SChristoph Hellwig init_llist_head(&p->list); 5002208162f4SChristoph Hellwig INIT_WORK(&p->wq, delayed_vfree_work); 5003062eacf5SUladzislau Rezki (Sony) xa_init(&vbq->vmap_blocks); 5004208162f4SChristoph Hellwig } 5005208162f4SChristoph Hellwig 5006d0936029SUladzislau Rezki (Sony) /* 5007d0936029SUladzislau Rezki (Sony) * Setup nodes before importing vmlist. 5008d0936029SUladzislau Rezki (Sony) */ 5009d0936029SUladzislau Rezki (Sony) vmap_init_nodes(); 5010d0936029SUladzislau Rezki (Sony) 5011208162f4SChristoph Hellwig /* Import existing vmlist entries. */ 5012208162f4SChristoph Hellwig for (tmp = vmlist; tmp; tmp = tmp->next) { 5013208162f4SChristoph Hellwig va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 5014208162f4SChristoph Hellwig if (WARN_ON_ONCE(!va)) 5015208162f4SChristoph Hellwig continue; 5016208162f4SChristoph Hellwig 5017208162f4SChristoph Hellwig va->va_start = (unsigned long)tmp->addr; 5018208162f4SChristoph Hellwig va->va_end = va->va_start + tmp->size; 5019208162f4SChristoph Hellwig va->vm = tmp; 5020d0936029SUladzislau Rezki (Sony) 5021d0936029SUladzislau Rezki (Sony) vn = addr_to_node(va->va_start); 5022d0936029SUladzislau Rezki (Sony) insert_vmap_area(va, &vn->busy.root, &vn->busy.head); 5023208162f4SChristoph Hellwig } 5024208162f4SChristoph Hellwig 5025208162f4SChristoph Hellwig /* 5026208162f4SChristoph Hellwig * Now we can initialize a free vmap space. 5027208162f4SChristoph Hellwig */ 5028208162f4SChristoph Hellwig vmap_init_free_space(); 5029208162f4SChristoph Hellwig vmap_initialized = true; 50307679ba6bSUladzislau Rezki (Sony) 50317679ba6bSUladzislau Rezki (Sony) vmap_node_shrinker = shrinker_alloc(0, "vmap-node"); 50327679ba6bSUladzislau Rezki (Sony) if (!vmap_node_shrinker) { 50337679ba6bSUladzislau Rezki (Sony) pr_err("Failed to allocate vmap-node shrinker!\n"); 50347679ba6bSUladzislau Rezki (Sony) return; 50357679ba6bSUladzislau Rezki (Sony) } 50367679ba6bSUladzislau Rezki (Sony) 50377679ba6bSUladzislau Rezki (Sony) vmap_node_shrinker->count_objects = vmap_node_shrink_count; 50387679ba6bSUladzislau Rezki (Sony) vmap_node_shrinker->scan_objects = vmap_node_shrink_scan; 50397679ba6bSUladzislau Rezki (Sony) shrinker_register(vmap_node_shrinker); 5040208162f4SChristoph Hellwig } 5041