1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * mm/mmap.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Written by obz. 61da177e4SLinus Torvalds * 7046c6884SAlan Cox * Address space accounting code <alan@lxorguk.ukuu.org.uk> 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 10b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11b1de0d13SMitchel Humpherys 12e8420a8eSCyril Hrubis #include <linux/kernel.h> 131da177e4SLinus Torvalds #include <linux/slab.h> 144af3c9ccSAlexey Dobriyan #include <linux/backing-dev.h> 151da177e4SLinus Torvalds #include <linux/mm.h> 1617fca131SArnd Bergmann #include <linux/mm_inline.h> 17615d6e87SDavidlohr Bueso #include <linux/vmacache.h> 181da177e4SLinus Torvalds #include <linux/shm.h> 191da177e4SLinus Torvalds #include <linux/mman.h> 201da177e4SLinus Torvalds #include <linux/pagemap.h> 211da177e4SLinus Torvalds #include <linux/swap.h> 221da177e4SLinus Torvalds #include <linux/syscalls.h> 23c59ede7bSRandy.Dunlap #include <linux/capability.h> 241da177e4SLinus Torvalds #include <linux/init.h> 251da177e4SLinus Torvalds #include <linux/file.h> 261da177e4SLinus Torvalds #include <linux/fs.h> 271da177e4SLinus Torvalds #include <linux/personality.h> 281da177e4SLinus Torvalds #include <linux/security.h> 291da177e4SLinus Torvalds #include <linux/hugetlb.h> 30c01d5b30SHugh Dickins #include <linux/shmem_fs.h> 311da177e4SLinus Torvalds #include <linux/profile.h> 32b95f1b31SPaul Gortmaker #include <linux/export.h> 331da177e4SLinus Torvalds #include <linux/mount.h> 341da177e4SLinus Torvalds #include <linux/mempolicy.h> 351da177e4SLinus Torvalds #include <linux/rmap.h> 36cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 3782f71ae4SKonstantin Khlebnikov #include <linux/mmdebug.h> 38cdd6c482SIngo Molnar #include <linux/perf_event.h> 39120a795dSAl Viro #include <linux/audit.h> 40b15d00b6SAndrea Arcangeli #include <linux/khugepaged.h> 412b144498SSrikar Dronamraju #include <linux/uprobes.h> 42d3737187SMichel Lespinasse #include <linux/rbtree_augmented.h> 431640879aSAndrew Shewmaker #include <linux/notifier.h> 441640879aSAndrew Shewmaker #include <linux/memory.h> 45b1de0d13SMitchel Humpherys #include <linux/printk.h> 4619a809afSAndrea Arcangeli #include <linux/userfaultfd_k.h> 47d977d56cSKonstantin Khlebnikov #include <linux/moduleparam.h> 4862b5f7d0SDave Hansen #include <linux/pkeys.h> 4921292580SAndrea Arcangeli #include <linux/oom.h> 5004f5866eSAndrea Arcangeli #include <linux/sched/mm.h> 511da177e4SLinus Torvalds 527c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 531da177e4SLinus Torvalds #include <asm/cacheflush.h> 541da177e4SLinus Torvalds #include <asm/tlb.h> 55d6dd61c8SJeremy Fitzhardinge #include <asm/mmu_context.h> 561da177e4SLinus Torvalds 57df529cabSJaewon Kim #define CREATE_TRACE_POINTS 58df529cabSJaewon Kim #include <trace/events/mmap.h> 59df529cabSJaewon Kim 6042b77728SJan Beulich #include "internal.h" 6142b77728SJan Beulich 623a459756SKirill Korotaev #ifndef arch_mmap_check 633a459756SKirill Korotaev #define arch_mmap_check(addr, len, flags) (0) 643a459756SKirill Korotaev #endif 653a459756SKirill Korotaev 66d07e2259SDaniel Cashman #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 67d07e2259SDaniel Cashman const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; 68d07e2259SDaniel Cashman const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX; 69d07e2259SDaniel Cashman int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; 70d07e2259SDaniel Cashman #endif 71d07e2259SDaniel Cashman #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 72d07e2259SDaniel Cashman const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; 73d07e2259SDaniel Cashman const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; 74d07e2259SDaniel Cashman int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; 75d07e2259SDaniel Cashman #endif 76d07e2259SDaniel Cashman 77f4fcd558SKonstantin Khlebnikov static bool ignore_rlimit_data; 78d977d56cSKonstantin Khlebnikov core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); 79d07e2259SDaniel Cashman 80e0da382cSHugh Dickins static void unmap_region(struct mm_struct *mm, 81e0da382cSHugh Dickins struct vm_area_struct *vma, struct vm_area_struct *prev, 82e0da382cSHugh Dickins unsigned long start, unsigned long end); 83e0da382cSHugh Dickins 8464e45507SPeter Feiner static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) 8564e45507SPeter Feiner { 8664e45507SPeter Feiner return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); 8764e45507SPeter Feiner } 8864e45507SPeter Feiner 8964e45507SPeter Feiner /* Update vma->vm_page_prot to reflect vma->vm_flags. */ 9064e45507SPeter Feiner void vma_set_page_prot(struct vm_area_struct *vma) 9164e45507SPeter Feiner { 9264e45507SPeter Feiner unsigned long vm_flags = vma->vm_flags; 936d2329f8SAndrea Arcangeli pgprot_t vm_page_prot; 9464e45507SPeter Feiner 956d2329f8SAndrea Arcangeli vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); 966d2329f8SAndrea Arcangeli if (vma_wants_writenotify(vma, vm_page_prot)) { 9764e45507SPeter Feiner vm_flags &= ~VM_SHARED; 986d2329f8SAndrea Arcangeli vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); 9964e45507SPeter Feiner } 100c1e8d7c6SMichel Lespinasse /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ 1016d2329f8SAndrea Arcangeli WRITE_ONCE(vma->vm_page_prot, vm_page_prot); 10264e45507SPeter Feiner } 10364e45507SPeter Feiner 1041da177e4SLinus Torvalds /* 105c8c06efaSDavidlohr Bueso * Requires inode->i_mapping->i_mmap_rwsem 1061da177e4SLinus Torvalds */ 1071da177e4SLinus Torvalds static void __remove_shared_vm_struct(struct vm_area_struct *vma, 1081da177e4SLinus Torvalds struct file *file, struct address_space *mapping) 1091da177e4SLinus Torvalds { 1101da177e4SLinus Torvalds if (vma->vm_flags & VM_SHARED) 1114bb5f5d9SDavid Herrmann mapping_unmap_writable(mapping); 1121da177e4SLinus Torvalds 1131da177e4SLinus Torvalds flush_dcache_mmap_lock(mapping); 1146b2dbba8SMichel Lespinasse vma_interval_tree_remove(vma, &mapping->i_mmap); 1151da177e4SLinus Torvalds flush_dcache_mmap_unlock(mapping); 1161da177e4SLinus Torvalds } 1171da177e4SLinus Torvalds 1181da177e4SLinus Torvalds /* 1196b2dbba8SMichel Lespinasse * Unlink a file-based vm structure from its interval tree, to hide 120a8fb5618SHugh Dickins * vma from rmap and vmtruncate before freeing its page tables. 1211da177e4SLinus Torvalds */ 122a8fb5618SHugh Dickins void unlink_file_vma(struct vm_area_struct *vma) 1231da177e4SLinus Torvalds { 1241da177e4SLinus Torvalds struct file *file = vma->vm_file; 1251da177e4SLinus Torvalds 1261da177e4SLinus Torvalds if (file) { 1271da177e4SLinus Torvalds struct address_space *mapping = file->f_mapping; 12883cde9e8SDavidlohr Bueso i_mmap_lock_write(mapping); 1291da177e4SLinus Torvalds __remove_shared_vm_struct(vma, file, mapping); 13083cde9e8SDavidlohr Bueso i_mmap_unlock_write(mapping); 1311da177e4SLinus Torvalds } 132a8fb5618SHugh Dickins } 133a8fb5618SHugh Dickins 134a8fb5618SHugh Dickins /* 135a8fb5618SHugh Dickins * Close a vm structure and free it, returning the next. 136a8fb5618SHugh Dickins */ 137a8fb5618SHugh Dickins static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) 138a8fb5618SHugh Dickins { 139a8fb5618SHugh Dickins struct vm_area_struct *next = vma->vm_next; 140a8fb5618SHugh Dickins 141a8fb5618SHugh Dickins might_sleep(); 1421da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->close) 1431da177e4SLinus Torvalds vma->vm_ops->close(vma); 144e9714acfSKonstantin Khlebnikov if (vma->vm_file) 145a8fb5618SHugh Dickins fput(vma->vm_file); 146f0be3d32SLee Schermerhorn mpol_put(vma_policy(vma)); 1473928d4f5SLinus Torvalds vm_area_free(vma); 148a8fb5618SHugh Dickins return next; 1491da177e4SLinus Torvalds } 1501da177e4SLinus Torvalds 151bb177a73SMichal Hocko static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, 152bb177a73SMichal Hocko struct list_head *uf); 1536a6160a7SHeiko Carstens SYSCALL_DEFINE1(brk, unsigned long, brk) 1541da177e4SLinus Torvalds { 1559bc8039eSYang Shi unsigned long newbrk, oldbrk, origbrk; 1561da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 1571be7107fSHugh Dickins struct vm_area_struct *next; 158a5b4592cSJiri Kosina unsigned long min_brk; 159128557ffSMichel Lespinasse bool populate; 1609bc8039eSYang Shi bool downgraded = false; 161897ab3e0SMike Rapoport LIST_HEAD(uf); 1621da177e4SLinus Torvalds 163d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(mm)) 164dc0ef0dfSMichal Hocko return -EINTR; 1651da177e4SLinus Torvalds 1669bc8039eSYang Shi origbrk = mm->brk; 1679bc8039eSYang Shi 168a5b4592cSJiri Kosina #ifdef CONFIG_COMPAT_BRK 1695520e894SJiri Kosina /* 1705520e894SJiri Kosina * CONFIG_COMPAT_BRK can still be overridden by setting 1715520e894SJiri Kosina * randomize_va_space to 2, which will still cause mm->start_brk 1725520e894SJiri Kosina * to be arbitrarily shifted 1735520e894SJiri Kosina */ 1744471a675SJiri Kosina if (current->brk_randomized) 1755520e894SJiri Kosina min_brk = mm->start_brk; 1765520e894SJiri Kosina else 1775520e894SJiri Kosina min_brk = mm->end_data; 178a5b4592cSJiri Kosina #else 179a5b4592cSJiri Kosina min_brk = mm->start_brk; 180a5b4592cSJiri Kosina #endif 181a5b4592cSJiri Kosina if (brk < min_brk) 1821da177e4SLinus Torvalds goto out; 1831e624196SRam Gupta 1841e624196SRam Gupta /* 1851e624196SRam Gupta * Check against rlimit here. If this check is done later after the test 1861e624196SRam Gupta * of oldbrk with newbrk then it can escape the test and let the data 1871e624196SRam Gupta * segment grow beyond its set limit the in case where the limit is 1881e624196SRam Gupta * not page aligned -Ram Gupta 1891e624196SRam Gupta */ 1908764b338SCyrill Gorcunov if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, 1918764b338SCyrill Gorcunov mm->end_data, mm->start_data)) 1921e624196SRam Gupta goto out; 1931e624196SRam Gupta 1941da177e4SLinus Torvalds newbrk = PAGE_ALIGN(brk); 1951da177e4SLinus Torvalds oldbrk = PAGE_ALIGN(mm->brk); 1969bc8039eSYang Shi if (oldbrk == newbrk) { 1979bc8039eSYang Shi mm->brk = brk; 1989bc8039eSYang Shi goto success; 1999bc8039eSYang Shi } 2001da177e4SLinus Torvalds 2019bc8039eSYang Shi /* 2029bc8039eSYang Shi * Always allow shrinking brk. 203c1e8d7c6SMichel Lespinasse * __do_munmap() may downgrade mmap_lock to read. 2049bc8039eSYang Shi */ 2051da177e4SLinus Torvalds if (brk <= mm->brk) { 2069bc8039eSYang Shi int ret; 2079bc8039eSYang Shi 2089bc8039eSYang Shi /* 209c1e8d7c6SMichel Lespinasse * mm->brk must to be protected by write mmap_lock so update it 210c1e8d7c6SMichel Lespinasse * before downgrading mmap_lock. When __do_munmap() fails, 2119bc8039eSYang Shi * mm->brk will be restored from origbrk. 2129bc8039eSYang Shi */ 2139bc8039eSYang Shi mm->brk = brk; 2149bc8039eSYang Shi ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true); 2159bc8039eSYang Shi if (ret < 0) { 2169bc8039eSYang Shi mm->brk = origbrk; 2171da177e4SLinus Torvalds goto out; 2189bc8039eSYang Shi } else if (ret == 1) { 2199bc8039eSYang Shi downgraded = true; 2209bc8039eSYang Shi } 2219bc8039eSYang Shi goto success; 2221da177e4SLinus Torvalds } 2231da177e4SLinus Torvalds 2241da177e4SLinus Torvalds /* Check against existing mmap mappings. */ 2251be7107fSHugh Dickins next = find_vma(mm, oldbrk); 2261be7107fSHugh Dickins if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) 2271da177e4SLinus Torvalds goto out; 2281da177e4SLinus Torvalds 2291da177e4SLinus Torvalds /* Ok, looks good - let it rip. */ 230bb177a73SMichal Hocko if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0) 2311da177e4SLinus Torvalds goto out; 2321da177e4SLinus Torvalds mm->brk = brk; 2339bc8039eSYang Shi 2349bc8039eSYang Shi success: 235128557ffSMichel Lespinasse populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; 2369bc8039eSYang Shi if (downgraded) 237d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 2389bc8039eSYang Shi else 239d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 240897ab3e0SMike Rapoport userfaultfd_unmap_complete(mm, &uf); 241128557ffSMichel Lespinasse if (populate) 242128557ffSMichel Lespinasse mm_populate(oldbrk, newbrk - oldbrk); 243128557ffSMichel Lespinasse return brk; 244128557ffSMichel Lespinasse 2451da177e4SLinus Torvalds out: 246d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 247b7204006SAdrian Huang return origbrk; 2481da177e4SLinus Torvalds } 2491da177e4SLinus Torvalds 250315cc066SMichel Lespinasse static inline unsigned long vma_compute_gap(struct vm_area_struct *vma) 251d3737187SMichel Lespinasse { 252315cc066SMichel Lespinasse unsigned long gap, prev_end; 2531be7107fSHugh Dickins 2541be7107fSHugh Dickins /* 2551be7107fSHugh Dickins * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we 2561be7107fSHugh Dickins * allow two stack_guard_gaps between them here, and when choosing 2571be7107fSHugh Dickins * an unmapped area; whereas when expanding we only require one. 2581be7107fSHugh Dickins * That's a little inconsistent, but keeps the code here simpler. 2591be7107fSHugh Dickins */ 260315cc066SMichel Lespinasse gap = vm_start_gap(vma); 2611be7107fSHugh Dickins if (vma->vm_prev) { 2621be7107fSHugh Dickins prev_end = vm_end_gap(vma->vm_prev); 263315cc066SMichel Lespinasse if (gap > prev_end) 264315cc066SMichel Lespinasse gap -= prev_end; 2651be7107fSHugh Dickins else 266315cc066SMichel Lespinasse gap = 0; 2671be7107fSHugh Dickins } 268315cc066SMichel Lespinasse return gap; 269315cc066SMichel Lespinasse } 270315cc066SMichel Lespinasse 271315cc066SMichel Lespinasse #ifdef CONFIG_DEBUG_VM_RB 272315cc066SMichel Lespinasse static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma) 273315cc066SMichel Lespinasse { 274315cc066SMichel Lespinasse unsigned long max = vma_compute_gap(vma), subtree_gap; 275d3737187SMichel Lespinasse if (vma->vm_rb.rb_left) { 276d3737187SMichel Lespinasse subtree_gap = rb_entry(vma->vm_rb.rb_left, 277d3737187SMichel Lespinasse struct vm_area_struct, vm_rb)->rb_subtree_gap; 278d3737187SMichel Lespinasse if (subtree_gap > max) 279d3737187SMichel Lespinasse max = subtree_gap; 280d3737187SMichel Lespinasse } 281d3737187SMichel Lespinasse if (vma->vm_rb.rb_right) { 282d3737187SMichel Lespinasse subtree_gap = rb_entry(vma->vm_rb.rb_right, 283d3737187SMichel Lespinasse struct vm_area_struct, vm_rb)->rb_subtree_gap; 284d3737187SMichel Lespinasse if (subtree_gap > max) 285d3737187SMichel Lespinasse max = subtree_gap; 286d3737187SMichel Lespinasse } 287d3737187SMichel Lespinasse return max; 288d3737187SMichel Lespinasse } 289d3737187SMichel Lespinasse 290acf128d0SAndrea Arcangeli static int browse_rb(struct mm_struct *mm) 2911da177e4SLinus Torvalds { 292acf128d0SAndrea Arcangeli struct rb_root *root = &mm->mm_rb; 2935a0768f6SMichel Lespinasse int i = 0, j, bug = 0; 2941da177e4SLinus Torvalds struct rb_node *nd, *pn = NULL; 2951da177e4SLinus Torvalds unsigned long prev = 0, pend = 0; 2961da177e4SLinus Torvalds 2971da177e4SLinus Torvalds for (nd = rb_first(root); nd; nd = rb_next(nd)) { 2981da177e4SLinus Torvalds struct vm_area_struct *vma; 2991da177e4SLinus Torvalds vma = rb_entry(nd, struct vm_area_struct, vm_rb); 3005a0768f6SMichel Lespinasse if (vma->vm_start < prev) { 301ff26f70fSAndrew Morton pr_emerg("vm_start %lx < prev %lx\n", 302ff26f70fSAndrew Morton vma->vm_start, prev); 3035a0768f6SMichel Lespinasse bug = 1; 3045a0768f6SMichel Lespinasse } 3055a0768f6SMichel Lespinasse if (vma->vm_start < pend) { 306ff26f70fSAndrew Morton pr_emerg("vm_start %lx < pend %lx\n", 307ff26f70fSAndrew Morton vma->vm_start, pend); 3085a0768f6SMichel Lespinasse bug = 1; 3095a0768f6SMichel Lespinasse } 3105a0768f6SMichel Lespinasse if (vma->vm_start > vma->vm_end) { 311ff26f70fSAndrew Morton pr_emerg("vm_start %lx > vm_end %lx\n", 312ff26f70fSAndrew Morton vma->vm_start, vma->vm_end); 3135a0768f6SMichel Lespinasse bug = 1; 3145a0768f6SMichel Lespinasse } 315acf128d0SAndrea Arcangeli spin_lock(&mm->page_table_lock); 3165a0768f6SMichel Lespinasse if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { 3178542bdfcSSasha Levin pr_emerg("free gap %lx, correct %lx\n", 3185a0768f6SMichel Lespinasse vma->rb_subtree_gap, 3195a0768f6SMichel Lespinasse vma_compute_subtree_gap(vma)); 3205a0768f6SMichel Lespinasse bug = 1; 3215a0768f6SMichel Lespinasse } 322acf128d0SAndrea Arcangeli spin_unlock(&mm->page_table_lock); 3231da177e4SLinus Torvalds i++; 3241da177e4SLinus Torvalds pn = nd; 325d1af65d1SDavid Miller prev = vma->vm_start; 326d1af65d1SDavid Miller pend = vma->vm_end; 3271da177e4SLinus Torvalds } 3281da177e4SLinus Torvalds j = 0; 3295a0768f6SMichel Lespinasse for (nd = pn; nd; nd = rb_prev(nd)) 3301da177e4SLinus Torvalds j++; 3315a0768f6SMichel Lespinasse if (i != j) { 3328542bdfcSSasha Levin pr_emerg("backwards %d, forwards %d\n", j, i); 3335a0768f6SMichel Lespinasse bug = 1; 3341da177e4SLinus Torvalds } 3355a0768f6SMichel Lespinasse return bug ? -1 : i; 3361da177e4SLinus Torvalds } 337d4af56c5SLiam R. Howlett #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 338d4af56c5SLiam R. Howlett extern void mt_validate(struct maple_tree *mt); 339d4af56c5SLiam R. Howlett extern void mt_dump(const struct maple_tree *mt); 3401da177e4SLinus Torvalds 341d4af56c5SLiam R. Howlett /* Validate the maple tree */ 342d4af56c5SLiam R. Howlett static void validate_mm_mt(struct mm_struct *mm) 343d4af56c5SLiam R. Howlett { 344d4af56c5SLiam R. Howlett struct maple_tree *mt = &mm->mm_mt; 345d4af56c5SLiam R. Howlett struct vm_area_struct *vma_mt, *vma = mm->mmap; 346d4af56c5SLiam R. Howlett 347d4af56c5SLiam R. Howlett MA_STATE(mas, mt, 0, 0); 348d4af56c5SLiam R. Howlett 349d4af56c5SLiam R. Howlett mt_validate(&mm->mm_mt); 350d4af56c5SLiam R. Howlett mas_for_each(&mas, vma_mt, ULONG_MAX) { 351d4af56c5SLiam R. Howlett if (xa_is_zero(vma_mt)) 352d4af56c5SLiam R. Howlett continue; 353d4af56c5SLiam R. Howlett 354d4af56c5SLiam R. Howlett if (!vma) 355d4af56c5SLiam R. Howlett break; 356d4af56c5SLiam R. Howlett 357d4af56c5SLiam R. Howlett if ((vma != vma_mt) || 358d4af56c5SLiam R. Howlett (vma->vm_start != vma_mt->vm_start) || 359d4af56c5SLiam R. Howlett (vma->vm_end != vma_mt->vm_end) || 360d4af56c5SLiam R. Howlett (vma->vm_start != mas.index) || 361d4af56c5SLiam R. Howlett (vma->vm_end - 1 != mas.last)) { 362d4af56c5SLiam R. Howlett pr_emerg("issue in %s\n", current->comm); 363d4af56c5SLiam R. Howlett dump_stack(); 364d4af56c5SLiam R. Howlett #ifdef CONFIG_DEBUG_VM 365d4af56c5SLiam R. Howlett dump_vma(vma_mt); 366d4af56c5SLiam R. Howlett pr_emerg("and next in rb\n"); 367d4af56c5SLiam R. Howlett dump_vma(vma->vm_next); 368d4af56c5SLiam R. Howlett #endif 369d4af56c5SLiam R. Howlett pr_emerg("mt piv: %p %lu - %lu\n", vma_mt, 370d4af56c5SLiam R. Howlett mas.index, mas.last); 371d4af56c5SLiam R. Howlett pr_emerg("mt vma: %p %lu - %lu\n", vma_mt, 372d4af56c5SLiam R. Howlett vma_mt->vm_start, vma_mt->vm_end); 373d4af56c5SLiam R. Howlett pr_emerg("rb vma: %p %lu - %lu\n", vma, 374d4af56c5SLiam R. Howlett vma->vm_start, vma->vm_end); 375d4af56c5SLiam R. Howlett pr_emerg("rb->next = %p %lu - %lu\n", vma->vm_next, 376d4af56c5SLiam R. Howlett vma->vm_next->vm_start, vma->vm_next->vm_end); 377d4af56c5SLiam R. Howlett 378d4af56c5SLiam R. Howlett mt_dump(mas.tree); 379d4af56c5SLiam R. Howlett if (vma_mt->vm_end != mas.last + 1) { 380d4af56c5SLiam R. Howlett pr_err("vma: %p vma_mt %lu-%lu\tmt %lu-%lu\n", 381d4af56c5SLiam R. Howlett mm, vma_mt->vm_start, vma_mt->vm_end, 382d4af56c5SLiam R. Howlett mas.index, mas.last); 383d4af56c5SLiam R. Howlett mt_dump(mas.tree); 384d4af56c5SLiam R. Howlett } 385d4af56c5SLiam R. Howlett VM_BUG_ON_MM(vma_mt->vm_end != mas.last + 1, mm); 386d4af56c5SLiam R. Howlett if (vma_mt->vm_start != mas.index) { 387d4af56c5SLiam R. Howlett pr_err("vma: %p vma_mt %p %lu - %lu doesn't match\n", 388d4af56c5SLiam R. Howlett mm, vma_mt, vma_mt->vm_start, vma_mt->vm_end); 389d4af56c5SLiam R. Howlett mt_dump(mas.tree); 390d4af56c5SLiam R. Howlett } 391d4af56c5SLiam R. Howlett VM_BUG_ON_MM(vma_mt->vm_start != mas.index, mm); 392d4af56c5SLiam R. Howlett } 393d4af56c5SLiam R. Howlett VM_BUG_ON(vma != vma_mt); 394d4af56c5SLiam R. Howlett vma = vma->vm_next; 395d4af56c5SLiam R. Howlett 396d4af56c5SLiam R. Howlett } 397d4af56c5SLiam R. Howlett VM_BUG_ON(vma); 398d4af56c5SLiam R. Howlett } 399d4af56c5SLiam R. Howlett #else 400d4af56c5SLiam R. Howlett #define validate_mm_mt(root) do { } while (0) 401d4af56c5SLiam R. Howlett #endif 402d3737187SMichel Lespinasse static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore) 403d3737187SMichel Lespinasse { 404d3737187SMichel Lespinasse struct rb_node *nd; 405d3737187SMichel Lespinasse 406d3737187SMichel Lespinasse for (nd = rb_first(root); nd; nd = rb_next(nd)) { 407d3737187SMichel Lespinasse struct vm_area_struct *vma; 408d3737187SMichel Lespinasse vma = rb_entry(nd, struct vm_area_struct, vm_rb); 40996dad67fSSasha Levin VM_BUG_ON_VMA(vma != ignore && 41096dad67fSSasha Levin vma->rb_subtree_gap != vma_compute_subtree_gap(vma), 41196dad67fSSasha Levin vma); 412d3737187SMichel Lespinasse } 4131da177e4SLinus Torvalds } 4141da177e4SLinus Torvalds 415eafd4dc4SRashika Kheria static void validate_mm(struct mm_struct *mm) 4161da177e4SLinus Torvalds { 4171da177e4SLinus Torvalds int bug = 0; 4181da177e4SLinus Torvalds int i = 0; 4195a0768f6SMichel Lespinasse unsigned long highest_address = 0; 420ed8ea815SMichel Lespinasse struct vm_area_struct *vma = mm->mmap; 421ff26f70fSAndrew Morton 422ed8ea815SMichel Lespinasse while (vma) { 42312352d3cSKonstantin Khlebnikov struct anon_vma *anon_vma = vma->anon_vma; 424ed8ea815SMichel Lespinasse struct anon_vma_chain *avc; 425ff26f70fSAndrew Morton 42612352d3cSKonstantin Khlebnikov if (anon_vma) { 42712352d3cSKonstantin Khlebnikov anon_vma_lock_read(anon_vma); 428ed8ea815SMichel Lespinasse list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 429ed8ea815SMichel Lespinasse anon_vma_interval_tree_verify(avc); 43012352d3cSKonstantin Khlebnikov anon_vma_unlock_read(anon_vma); 43112352d3cSKonstantin Khlebnikov } 43212352d3cSKonstantin Khlebnikov 4331be7107fSHugh Dickins highest_address = vm_end_gap(vma); 434ed8ea815SMichel Lespinasse vma = vma->vm_next; 4351da177e4SLinus Torvalds i++; 4361da177e4SLinus Torvalds } 4375a0768f6SMichel Lespinasse if (i != mm->map_count) { 4388542bdfcSSasha Levin pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); 4395a0768f6SMichel Lespinasse bug = 1; 4405a0768f6SMichel Lespinasse } 4415a0768f6SMichel Lespinasse if (highest_address != mm->highest_vm_end) { 4428542bdfcSSasha Levin pr_emerg("mm->highest_vm_end %lx, found %lx\n", 4435a0768f6SMichel Lespinasse mm->highest_vm_end, highest_address); 4445a0768f6SMichel Lespinasse bug = 1; 4455a0768f6SMichel Lespinasse } 446acf128d0SAndrea Arcangeli i = browse_rb(mm); 4475a0768f6SMichel Lespinasse if (i != mm->map_count) { 448ff26f70fSAndrew Morton if (i != -1) 4498542bdfcSSasha Levin pr_emerg("map_count %d rb %d\n", mm->map_count, i); 4505a0768f6SMichel Lespinasse bug = 1; 4515a0768f6SMichel Lespinasse } 45296dad67fSSasha Levin VM_BUG_ON_MM(bug, mm); 4531da177e4SLinus Torvalds } 4541da177e4SLinus Torvalds #else 455d3737187SMichel Lespinasse #define validate_mm_rb(root, ignore) do { } while (0) 456d4af56c5SLiam R. Howlett #define validate_mm_mt(root) do { } while (0) 4571da177e4SLinus Torvalds #define validate_mm(mm) do { } while (0) 4581da177e4SLinus Torvalds #endif 4591da177e4SLinus Torvalds 460315cc066SMichel Lespinasse RB_DECLARE_CALLBACKS_MAX(static, vma_gap_callbacks, 461315cc066SMichel Lespinasse struct vm_area_struct, vm_rb, 462315cc066SMichel Lespinasse unsigned long, rb_subtree_gap, vma_compute_gap) 463d3737187SMichel Lespinasse 464d3737187SMichel Lespinasse /* 465d3737187SMichel Lespinasse * Update augmented rbtree rb_subtree_gap values after vma->vm_start or 466d3737187SMichel Lespinasse * vma->vm_prev->vm_end values changed, without modifying the vma's position 467d3737187SMichel Lespinasse * in the rbtree. 468d3737187SMichel Lespinasse */ 469d3737187SMichel Lespinasse static void vma_gap_update(struct vm_area_struct *vma) 470d3737187SMichel Lespinasse { 471d3737187SMichel Lespinasse /* 472315cc066SMichel Lespinasse * As it turns out, RB_DECLARE_CALLBACKS_MAX() already created 473315cc066SMichel Lespinasse * a callback function that does exactly what we want. 474d3737187SMichel Lespinasse */ 475d3737187SMichel Lespinasse vma_gap_callbacks_propagate(&vma->vm_rb, NULL); 476d3737187SMichel Lespinasse } 477d3737187SMichel Lespinasse 478d3737187SMichel Lespinasse static inline void vma_rb_insert(struct vm_area_struct *vma, 479d3737187SMichel Lespinasse struct rb_root *root) 480d3737187SMichel Lespinasse { 481d3737187SMichel Lespinasse /* All rb_subtree_gap values must be consistent prior to insertion */ 482d3737187SMichel Lespinasse validate_mm_rb(root, NULL); 483d3737187SMichel Lespinasse 484d3737187SMichel Lespinasse rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); 485d3737187SMichel Lespinasse } 486d3737187SMichel Lespinasse 4878f26e0b1SAndrea Arcangeli static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) 4888f26e0b1SAndrea Arcangeli { 4898f26e0b1SAndrea Arcangeli /* 4908f26e0b1SAndrea Arcangeli * Note rb_erase_augmented is a fairly large inline function, 4918f26e0b1SAndrea Arcangeli * so make sure we instantiate it only once with our desired 4928f26e0b1SAndrea Arcangeli * augmented rbtree callbacks. 4938f26e0b1SAndrea Arcangeli */ 4948f26e0b1SAndrea Arcangeli rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); 4958f26e0b1SAndrea Arcangeli } 4968f26e0b1SAndrea Arcangeli 4978f26e0b1SAndrea Arcangeli static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma, 4988f26e0b1SAndrea Arcangeli struct rb_root *root, 4998f26e0b1SAndrea Arcangeli struct vm_area_struct *ignore) 5008f26e0b1SAndrea Arcangeli { 5018f26e0b1SAndrea Arcangeli /* 5028f26e0b1SAndrea Arcangeli * All rb_subtree_gap values must be consistent prior to erase, 5034d1e7243SWei Yang * with the possible exception of 5044d1e7243SWei Yang * 5054d1e7243SWei Yang * a. the "next" vma being erased if next->vm_start was reduced in 5064d1e7243SWei Yang * __vma_adjust() -> __vma_unlink() 5074d1e7243SWei Yang * b. the vma being erased in detach_vmas_to_be_unmapped() -> 5084d1e7243SWei Yang * vma_rb_erase() 5098f26e0b1SAndrea Arcangeli */ 5108f26e0b1SAndrea Arcangeli validate_mm_rb(root, ignore); 5118f26e0b1SAndrea Arcangeli 5128f26e0b1SAndrea Arcangeli __vma_rb_erase(vma, root); 5138f26e0b1SAndrea Arcangeli } 5148f26e0b1SAndrea Arcangeli 5158f26e0b1SAndrea Arcangeli static __always_inline void vma_rb_erase(struct vm_area_struct *vma, 5168f26e0b1SAndrea Arcangeli struct rb_root *root) 517d3737187SMichel Lespinasse { 5184d1e7243SWei Yang vma_rb_erase_ignore(vma, root, vma); 519d3737187SMichel Lespinasse } 520d3737187SMichel Lespinasse 521bf181b9fSMichel Lespinasse /* 522bf181b9fSMichel Lespinasse * vma has some anon_vma assigned, and is already inserted on that 523bf181b9fSMichel Lespinasse * anon_vma's interval trees. 524bf181b9fSMichel Lespinasse * 525bf181b9fSMichel Lespinasse * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the 526bf181b9fSMichel Lespinasse * vma must be removed from the anon_vma's interval trees using 527bf181b9fSMichel Lespinasse * anon_vma_interval_tree_pre_update_vma(). 528bf181b9fSMichel Lespinasse * 529bf181b9fSMichel Lespinasse * After the update, the vma will be reinserted using 530bf181b9fSMichel Lespinasse * anon_vma_interval_tree_post_update_vma(). 531bf181b9fSMichel Lespinasse * 532c1e8d7c6SMichel Lespinasse * The entire update must be protected by exclusive mmap_lock and by 533bf181b9fSMichel Lespinasse * the root anon_vma's mutex. 534bf181b9fSMichel Lespinasse */ 535bf181b9fSMichel Lespinasse static inline void 536bf181b9fSMichel Lespinasse anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) 537bf181b9fSMichel Lespinasse { 538bf181b9fSMichel Lespinasse struct anon_vma_chain *avc; 539bf181b9fSMichel Lespinasse 540bf181b9fSMichel Lespinasse list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 541bf181b9fSMichel Lespinasse anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); 542bf181b9fSMichel Lespinasse } 543bf181b9fSMichel Lespinasse 544bf181b9fSMichel Lespinasse static inline void 545bf181b9fSMichel Lespinasse anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) 546bf181b9fSMichel Lespinasse { 547bf181b9fSMichel Lespinasse struct anon_vma_chain *avc; 548bf181b9fSMichel Lespinasse 549bf181b9fSMichel Lespinasse list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 550bf181b9fSMichel Lespinasse anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); 551bf181b9fSMichel Lespinasse } 552bf181b9fSMichel Lespinasse 5536597d783SHugh Dickins static int find_vma_links(struct mm_struct *mm, unsigned long addr, 5546597d783SHugh Dickins unsigned long end, struct vm_area_struct **pprev, 5556597d783SHugh Dickins struct rb_node ***rb_link, struct rb_node **rb_parent) 5561da177e4SLinus Torvalds { 5571da177e4SLinus Torvalds struct rb_node **__rb_link, *__rb_parent, *rb_prev; 5581da177e4SLinus Torvalds 5595b78ed24SLuigi Rizzo mmap_assert_locked(mm); 5601da177e4SLinus Torvalds __rb_link = &mm->mm_rb.rb_node; 5611da177e4SLinus Torvalds rb_prev = __rb_parent = NULL; 5621da177e4SLinus Torvalds 5631da177e4SLinus Torvalds while (*__rb_link) { 5641da177e4SLinus Torvalds struct vm_area_struct *vma_tmp; 5651da177e4SLinus Torvalds 5661da177e4SLinus Torvalds __rb_parent = *__rb_link; 5671da177e4SLinus Torvalds vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); 5681da177e4SLinus Torvalds 5691da177e4SLinus Torvalds if (vma_tmp->vm_end > addr) { 5706597d783SHugh Dickins /* Fail if an existing vma overlaps the area */ 5716597d783SHugh Dickins if (vma_tmp->vm_start < end) 5726597d783SHugh Dickins return -ENOMEM; 5731da177e4SLinus Torvalds __rb_link = &__rb_parent->rb_left; 5741da177e4SLinus Torvalds } else { 5751da177e4SLinus Torvalds rb_prev = __rb_parent; 5761da177e4SLinus Torvalds __rb_link = &__rb_parent->rb_right; 5771da177e4SLinus Torvalds } 5781da177e4SLinus Torvalds } 5791da177e4SLinus Torvalds 5801da177e4SLinus Torvalds *pprev = NULL; 5811da177e4SLinus Torvalds if (rb_prev) 5821da177e4SLinus Torvalds *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); 5831da177e4SLinus Torvalds *rb_link = __rb_link; 5841da177e4SLinus Torvalds *rb_parent = __rb_parent; 5856597d783SHugh Dickins return 0; 5861da177e4SLinus Torvalds } 5871da177e4SLinus Torvalds 5883903b55aSLiam R. Howlett /* 589f39af059SMatthew Wilcox (Oracle) * __vma_next() - Get the next VMA. 5903903b55aSLiam R. Howlett * @mm: The mm_struct. 5913903b55aSLiam R. Howlett * @vma: The current vma. 5923903b55aSLiam R. Howlett * 5933903b55aSLiam R. Howlett * If @vma is NULL, return the first vma in the mm. 5943903b55aSLiam R. Howlett * 5953903b55aSLiam R. Howlett * Returns: The next VMA after @vma. 5963903b55aSLiam R. Howlett */ 597f39af059SMatthew Wilcox (Oracle) static inline struct vm_area_struct *__vma_next(struct mm_struct *mm, 5983903b55aSLiam R. Howlett struct vm_area_struct *vma) 5993903b55aSLiam R. Howlett { 6003903b55aSLiam R. Howlett if (!vma) 6013903b55aSLiam R. Howlett return mm->mmap; 6023903b55aSLiam R. Howlett 6033903b55aSLiam R. Howlett return vma->vm_next; 6043903b55aSLiam R. Howlett } 605fb8090b6SLiam R. Howlett 606fb8090b6SLiam R. Howlett /* 607fb8090b6SLiam R. Howlett * munmap_vma_range() - munmap VMAs that overlap a range. 608fb8090b6SLiam R. Howlett * @mm: The mm struct 609fb8090b6SLiam R. Howlett * @start: The start of the range. 610fb8090b6SLiam R. Howlett * @len: The length of the range. 611fb8090b6SLiam R. Howlett * @pprev: pointer to the pointer that will be set to previous vm_area_struct 612fb8090b6SLiam R. Howlett * @rb_link: the rb_node 613fb8090b6SLiam R. Howlett * @rb_parent: the parent rb_node 614fb8090b6SLiam R. Howlett * 615fb8090b6SLiam R. Howlett * Find all the vm_area_struct that overlap from @start to 616fb8090b6SLiam R. Howlett * @end and munmap them. Set @pprev to the previous vm_area_struct. 617fb8090b6SLiam R. Howlett * 618fb8090b6SLiam R. Howlett * Returns: -ENOMEM on munmap failure or 0 on success. 619fb8090b6SLiam R. Howlett */ 620fb8090b6SLiam R. Howlett static inline int 621fb8090b6SLiam R. Howlett munmap_vma_range(struct mm_struct *mm, unsigned long start, unsigned long len, 622fb8090b6SLiam R. Howlett struct vm_area_struct **pprev, struct rb_node ***link, 623fb8090b6SLiam R. Howlett struct rb_node **parent, struct list_head *uf) 624fb8090b6SLiam R. Howlett { 625fb8090b6SLiam R. Howlett 626fb8090b6SLiam R. Howlett while (find_vma_links(mm, start, start + len, pprev, link, parent)) 627fb8090b6SLiam R. Howlett if (do_munmap(mm, start, len, uf)) 628fb8090b6SLiam R. Howlett return -ENOMEM; 629fb8090b6SLiam R. Howlett 630fb8090b6SLiam R. Howlett return 0; 631fb8090b6SLiam R. Howlett } 632*2e3af1dbSMatthew Wilcox (Oracle) 633e8420a8eSCyril Hrubis static unsigned long count_vma_pages_range(struct mm_struct *mm, 634e8420a8eSCyril Hrubis unsigned long addr, unsigned long end) 635e8420a8eSCyril Hrubis { 636*2e3af1dbSMatthew Wilcox (Oracle) VMA_ITERATOR(vmi, mm, addr); 637e8420a8eSCyril Hrubis struct vm_area_struct *vma; 638*2e3af1dbSMatthew Wilcox (Oracle) unsigned long nr_pages = 0; 639e8420a8eSCyril Hrubis 640*2e3af1dbSMatthew Wilcox (Oracle) for_each_vma_range(vmi, vma, end) { 641*2e3af1dbSMatthew Wilcox (Oracle) unsigned long vm_start = max(addr, vma->vm_start); 642*2e3af1dbSMatthew Wilcox (Oracle) unsigned long vm_end = min(end, vma->vm_end); 643e8420a8eSCyril Hrubis 644*2e3af1dbSMatthew Wilcox (Oracle) nr_pages += PHYS_PFN(vm_end - vm_start); 645e8420a8eSCyril Hrubis } 646e8420a8eSCyril Hrubis 647e8420a8eSCyril Hrubis return nr_pages; 648e8420a8eSCyril Hrubis } 649e8420a8eSCyril Hrubis 6501da177e4SLinus Torvalds void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, 6511da177e4SLinus Torvalds struct rb_node **rb_link, struct rb_node *rb_parent) 6521da177e4SLinus Torvalds { 653d3737187SMichel Lespinasse /* Update tracking information for the gap following the new vma. */ 654d3737187SMichel Lespinasse if (vma->vm_next) 655d3737187SMichel Lespinasse vma_gap_update(vma->vm_next); 656d3737187SMichel Lespinasse else 6571be7107fSHugh Dickins mm->highest_vm_end = vm_end_gap(vma); 658d3737187SMichel Lespinasse 659d3737187SMichel Lespinasse /* 660d3737187SMichel Lespinasse * vma->vm_prev wasn't known when we followed the rbtree to find the 661d3737187SMichel Lespinasse * correct insertion point for that vma. As a result, we could not 662d3737187SMichel Lespinasse * update the vma vm_rb parents rb_subtree_gap values on the way down. 663d3737187SMichel Lespinasse * So, we first insert the vma with a zero rb_subtree_gap value 664d3737187SMichel Lespinasse * (to be consistent with what we did on the way down), and then 665d3737187SMichel Lespinasse * immediately update the gap to the correct value. Finally we 666d3737187SMichel Lespinasse * rebalance the rbtree after all augmented values have been set. 667d3737187SMichel Lespinasse */ 6681da177e4SLinus Torvalds rb_link_node(&vma->vm_rb, rb_parent, rb_link); 669d3737187SMichel Lespinasse vma->rb_subtree_gap = 0; 670d3737187SMichel Lespinasse vma_gap_update(vma); 671d3737187SMichel Lespinasse vma_rb_insert(vma, &mm->mm_rb); 6721da177e4SLinus Torvalds } 6731da177e4SLinus Torvalds 674cb8f488cSDenys Vlasenko static void __vma_link_file(struct vm_area_struct *vma) 6751da177e4SLinus Torvalds { 6761da177e4SLinus Torvalds struct file *file; 6771da177e4SLinus Torvalds 6781da177e4SLinus Torvalds file = vma->vm_file; 6791da177e4SLinus Torvalds if (file) { 6801da177e4SLinus Torvalds struct address_space *mapping = file->f_mapping; 6811da177e4SLinus Torvalds 6821da177e4SLinus Torvalds if (vma->vm_flags & VM_SHARED) 683cf508b58SMiaohe Lin mapping_allow_writable(mapping); 6841da177e4SLinus Torvalds 6851da177e4SLinus Torvalds flush_dcache_mmap_lock(mapping); 6866b2dbba8SMichel Lespinasse vma_interval_tree_insert(vma, &mapping->i_mmap); 6871da177e4SLinus Torvalds flush_dcache_mmap_unlock(mapping); 6881da177e4SLinus Torvalds } 6891da177e4SLinus Torvalds } 6901da177e4SLinus Torvalds 691d4af56c5SLiam R. Howlett /* 692d4af56c5SLiam R. Howlett * vma_mas_store() - Store a VMA in the maple tree. 693d4af56c5SLiam R. Howlett * @vma: The vm_area_struct 694d4af56c5SLiam R. Howlett * @mas: The maple state 695d4af56c5SLiam R. Howlett * 696d4af56c5SLiam R. Howlett * Efficient way to store a VMA in the maple tree when the @mas has already 697d4af56c5SLiam R. Howlett * walked to the correct location. 698d4af56c5SLiam R. Howlett * 699d4af56c5SLiam R. Howlett * Note: the end address is inclusive in the maple tree. 700d4af56c5SLiam R. Howlett */ 701d4af56c5SLiam R. Howlett void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas) 702d4af56c5SLiam R. Howlett { 703d4af56c5SLiam R. Howlett trace_vma_store(mas->tree, vma); 704d4af56c5SLiam R. Howlett mas_set_range(mas, vma->vm_start, vma->vm_end - 1); 705d4af56c5SLiam R. Howlett mas_store_prealloc(mas, vma); 706d4af56c5SLiam R. Howlett } 707d4af56c5SLiam R. Howlett 708d4af56c5SLiam R. Howlett /* 709d4af56c5SLiam R. Howlett * vma_mas_remove() - Remove a VMA from the maple tree. 710d4af56c5SLiam R. Howlett * @vma: The vm_area_struct 711d4af56c5SLiam R. Howlett * @mas: The maple state 712d4af56c5SLiam R. Howlett * 713d4af56c5SLiam R. Howlett * Efficient way to remove a VMA from the maple tree when the @mas has already 714d4af56c5SLiam R. Howlett * been established and points to the correct location. 715d4af56c5SLiam R. Howlett * Note: the end address is inclusive in the maple tree. 716d4af56c5SLiam R. Howlett */ 717d4af56c5SLiam R. Howlett void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas) 718d4af56c5SLiam R. Howlett { 719d4af56c5SLiam R. Howlett trace_vma_mas_szero(mas->tree, vma->vm_start, vma->vm_end - 1); 720d4af56c5SLiam R. Howlett mas->index = vma->vm_start; 721d4af56c5SLiam R. Howlett mas->last = vma->vm_end - 1; 722d4af56c5SLiam R. Howlett mas_store_prealloc(mas, NULL); 723d4af56c5SLiam R. Howlett } 724d4af56c5SLiam R. Howlett 725d4af56c5SLiam R. Howlett /* 726d4af56c5SLiam R. Howlett * vma_mas_szero() - Set a given range to zero. Used when modifying a 727d4af56c5SLiam R. Howlett * vm_area_struct start or end. 728d4af56c5SLiam R. Howlett * 729d4af56c5SLiam R. Howlett * @mm: The struct_mm 730d4af56c5SLiam R. Howlett * @start: The start address to zero 731d4af56c5SLiam R. Howlett * @end: The end address to zero. 732d4af56c5SLiam R. Howlett */ 733d4af56c5SLiam R. Howlett static inline void vma_mas_szero(struct ma_state *mas, unsigned long start, 734d4af56c5SLiam R. Howlett unsigned long end) 735d4af56c5SLiam R. Howlett { 736d4af56c5SLiam R. Howlett trace_vma_mas_szero(mas->tree, start, end - 1); 737d4af56c5SLiam R. Howlett mas_set_range(mas, start, end - 1); 738d4af56c5SLiam R. Howlett mas_store_prealloc(mas, NULL); 739d4af56c5SLiam R. Howlett } 740d4af56c5SLiam R. Howlett 7411da177e4SLinus Torvalds static void 7421da177e4SLinus Torvalds __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, 7431da177e4SLinus Torvalds struct vm_area_struct *prev, struct rb_node **rb_link, 7441da177e4SLinus Torvalds struct rb_node *rb_parent) 7451da177e4SLinus Torvalds { 746aba6dfb7SWei Yang __vma_link_list(mm, vma, prev); 7471da177e4SLinus Torvalds __vma_link_rb(mm, vma, rb_link, rb_parent); 7481da177e4SLinus Torvalds } 7491da177e4SLinus Torvalds 750d4af56c5SLiam R. Howlett static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma, 7511da177e4SLinus Torvalds struct vm_area_struct *prev, struct rb_node **rb_link, 7521da177e4SLinus Torvalds struct rb_node *rb_parent) 7531da177e4SLinus Torvalds { 754d4af56c5SLiam R. Howlett MA_STATE(mas, &mm->mm_mt, 0, 0); 7551da177e4SLinus Torvalds struct address_space *mapping = NULL; 7561da177e4SLinus Torvalds 757d4af56c5SLiam R. Howlett if (mas_preallocate(&mas, vma, GFP_KERNEL)) 758d4af56c5SLiam R. Howlett return -ENOMEM; 759d4af56c5SLiam R. Howlett 76064ac4940SHuang Shijie if (vma->vm_file) { 7611da177e4SLinus Torvalds mapping = vma->vm_file->f_mapping; 76283cde9e8SDavidlohr Bueso i_mmap_lock_write(mapping); 76364ac4940SHuang Shijie } 7641da177e4SLinus Torvalds 765d4af56c5SLiam R. Howlett vma_mas_store(vma, &mas); 7661da177e4SLinus Torvalds __vma_link(mm, vma, prev, rb_link, rb_parent); 7671da177e4SLinus Torvalds __vma_link_file(vma); 7681da177e4SLinus Torvalds 7691da177e4SLinus Torvalds if (mapping) 77083cde9e8SDavidlohr Bueso i_mmap_unlock_write(mapping); 7711da177e4SLinus Torvalds 7721da177e4SLinus Torvalds mm->map_count++; 7731da177e4SLinus Torvalds validate_mm(mm); 774d4af56c5SLiam R. Howlett return 0; 7751da177e4SLinus Torvalds } 7761da177e4SLinus Torvalds 7771da177e4SLinus Torvalds /* 77888f6b4c3SKautuk Consul * Helper for vma_adjust() in the split_vma insert case: insert a vma into the 7796b2dbba8SMichel Lespinasse * mm's list and rbtree. It has already been inserted into the interval tree. 7801da177e4SLinus Torvalds */ 781d4af56c5SLiam R. Howlett static void __insert_vm_struct(struct mm_struct *mm, struct ma_state *mas, 782d4af56c5SLiam R. Howlett struct vm_area_struct *vma) 7831da177e4SLinus Torvalds { 7846597d783SHugh Dickins struct vm_area_struct *prev; 7851da177e4SLinus Torvalds struct rb_node **rb_link, *rb_parent; 7861da177e4SLinus Torvalds 7876597d783SHugh Dickins if (find_vma_links(mm, vma->vm_start, vma->vm_end, 7886597d783SHugh Dickins &prev, &rb_link, &rb_parent)) 7896597d783SHugh Dickins BUG(); 790d4af56c5SLiam R. Howlett 791d4af56c5SLiam R. Howlett vma_mas_store(vma, mas); 792d4af56c5SLiam R. Howlett __vma_link_list(mm, vma, prev); 793d4af56c5SLiam R. Howlett __vma_link_rb(mm, vma, rb_link, rb_parent); 7941da177e4SLinus Torvalds mm->map_count++; 7951da177e4SLinus Torvalds } 7961da177e4SLinus Torvalds 7977c61f917SWei Yang static __always_inline void __vma_unlink(struct mm_struct *mm, 798e86f15eeSAndrea Arcangeli struct vm_area_struct *vma, 7998f26e0b1SAndrea Arcangeli struct vm_area_struct *ignore) 8001da177e4SLinus Torvalds { 8018f26e0b1SAndrea Arcangeli vma_rb_erase_ignore(vma, &mm->mm_rb, ignore); 8021b9fc5b2SWei Yang __vma_unlink_list(mm, vma); 803615d6e87SDavidlohr Bueso /* Kill the cache */ 804615d6e87SDavidlohr Bueso vmacache_invalidate(mm); 8051da177e4SLinus Torvalds } 8061da177e4SLinus Torvalds 8071da177e4SLinus Torvalds /* 8081da177e4SLinus Torvalds * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that 8091da177e4SLinus Torvalds * is already present in an i_mmap tree without adjusting the tree. 8101da177e4SLinus Torvalds * The following helper function should be used when such adjustments 8111da177e4SLinus Torvalds * are necessary. The "insert" vma (if any) is to be inserted 8121da177e4SLinus Torvalds * before we drop the necessary locks. 8131da177e4SLinus Torvalds */ 814e86f15eeSAndrea Arcangeli int __vma_adjust(struct vm_area_struct *vma, unsigned long start, 815e86f15eeSAndrea Arcangeli unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, 816e86f15eeSAndrea Arcangeli struct vm_area_struct *expand) 8171da177e4SLinus Torvalds { 8181da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 819e86f15eeSAndrea Arcangeli struct vm_area_struct *next = vma->vm_next, *orig_vma = vma; 820d4af56c5SLiam R. Howlett struct vm_area_struct *next_next; 8211da177e4SLinus Torvalds struct address_space *mapping = NULL; 822f808c13fSDavidlohr Bueso struct rb_root_cached *root = NULL; 823012f1800SRik van Riel struct anon_vma *anon_vma = NULL; 8241da177e4SLinus Torvalds struct file *file = vma->vm_file; 825d3737187SMichel Lespinasse bool start_changed = false, end_changed = false; 8261da177e4SLinus Torvalds long adjust_next = 0; 8271da177e4SLinus Torvalds int remove_next = 0; 828d4af56c5SLiam R. Howlett MA_STATE(mas, &mm->mm_mt, 0, 0); 829734537c9SKirill A. Shutemov struct vm_area_struct *exporter = NULL, *importer = NULL; 830287d97acSLinus Torvalds 831d4af56c5SLiam R. Howlett validate_mm(mm); 832d4af56c5SLiam R. Howlett validate_mm_mt(mm); 833d4af56c5SLiam R. Howlett 834d4af56c5SLiam R. Howlett if (next && !insert) { 8351da177e4SLinus Torvalds if (end >= next->vm_end) { 8361da177e4SLinus Torvalds /* 8371da177e4SLinus Torvalds * vma expands, overlapping all the next, and 8381da177e4SLinus Torvalds * perhaps the one after too (mprotect case 6). 83986d12e47SAndrea Arcangeli * The only other cases that gets here are 840e86f15eeSAndrea Arcangeli * case 1, case 7 and case 8. 841e86f15eeSAndrea Arcangeli */ 842e86f15eeSAndrea Arcangeli if (next == expand) { 843e86f15eeSAndrea Arcangeli /* 844e86f15eeSAndrea Arcangeli * The only case where we don't expand "vma" 845e86f15eeSAndrea Arcangeli * and we expand "next" instead is case 8. 846e86f15eeSAndrea Arcangeli */ 847e86f15eeSAndrea Arcangeli VM_WARN_ON(end != next->vm_end); 848e86f15eeSAndrea Arcangeli /* 849e86f15eeSAndrea Arcangeli * remove_next == 3 means we're 850e86f15eeSAndrea Arcangeli * removing "vma" and that to do so we 851e86f15eeSAndrea Arcangeli * swapped "vma" and "next". 852e86f15eeSAndrea Arcangeli */ 853e86f15eeSAndrea Arcangeli remove_next = 3; 854e86f15eeSAndrea Arcangeli VM_WARN_ON(file != next->vm_file); 855e86f15eeSAndrea Arcangeli swap(vma, next); 856e86f15eeSAndrea Arcangeli } else { 857e86f15eeSAndrea Arcangeli VM_WARN_ON(expand != vma); 858e86f15eeSAndrea Arcangeli /* 859e86f15eeSAndrea Arcangeli * case 1, 6, 7, remove_next == 2 is case 6, 860e86f15eeSAndrea Arcangeli * remove_next == 1 is case 1 or 7. 8611da177e4SLinus Torvalds */ 862734537c9SKirill A. Shutemov remove_next = 1 + (end > next->vm_end); 863d4af56c5SLiam R. Howlett if (remove_next == 2) 864d4af56c5SLiam R. Howlett next_next = find_vma(mm, next->vm_end); 865d4af56c5SLiam R. Howlett 866e86f15eeSAndrea Arcangeli VM_WARN_ON(remove_next == 2 && 867e86f15eeSAndrea Arcangeli end != next->vm_next->vm_end); 868e86f15eeSAndrea Arcangeli } 869e86f15eeSAndrea Arcangeli 870287d97acSLinus Torvalds exporter = next; 8711da177e4SLinus Torvalds importer = vma; 872734537c9SKirill A. Shutemov 873734537c9SKirill A. Shutemov /* 874734537c9SKirill A. Shutemov * If next doesn't have anon_vma, import from vma after 875734537c9SKirill A. Shutemov * next, if the vma overlaps with it. 876734537c9SKirill A. Shutemov */ 87797a42cd4SAndrea Arcangeli if (remove_next == 2 && !next->anon_vma) 878734537c9SKirill A. Shutemov exporter = next->vm_next; 879734537c9SKirill A. Shutemov 8801da177e4SLinus Torvalds } else if (end > next->vm_start) { 8811da177e4SLinus Torvalds /* 8821da177e4SLinus Torvalds * vma expands, overlapping part of the next: 8831da177e4SLinus Torvalds * mprotect case 5 shifting the boundary up. 8841da177e4SLinus Torvalds */ 885f9d86a60SWei Yang adjust_next = (end - next->vm_start); 886287d97acSLinus Torvalds exporter = next; 8871da177e4SLinus Torvalds importer = vma; 888e86f15eeSAndrea Arcangeli VM_WARN_ON(expand != importer); 8891da177e4SLinus Torvalds } else if (end < vma->vm_end) { 8901da177e4SLinus Torvalds /* 8911da177e4SLinus Torvalds * vma shrinks, and !insert tells it's not 8921da177e4SLinus Torvalds * split_vma inserting another: so it must be 8931da177e4SLinus Torvalds * mprotect case 4 shifting the boundary down. 8941da177e4SLinus Torvalds */ 895f9d86a60SWei Yang adjust_next = -(vma->vm_end - end); 896287d97acSLinus Torvalds exporter = vma; 8971da177e4SLinus Torvalds importer = next; 898e86f15eeSAndrea Arcangeli VM_WARN_ON(expand != importer); 8991da177e4SLinus Torvalds } 9001da177e4SLinus Torvalds 9015beb4930SRik van Riel /* 9025beb4930SRik van Riel * Easily overlooked: when mprotect shifts the boundary, 9035beb4930SRik van Riel * make sure the expanding vma has anon_vma set if the 9045beb4930SRik van Riel * shrinking vma had, to cover any anon pages imported. 9055beb4930SRik van Riel */ 906287d97acSLinus Torvalds if (exporter && exporter->anon_vma && !importer->anon_vma) { 907c4ea95d7SDaniel Forrest int error; 908c4ea95d7SDaniel Forrest 909287d97acSLinus Torvalds importer->anon_vma = exporter->anon_vma; 910b800c91aSKonstantin Khlebnikov error = anon_vma_clone(importer, exporter); 9113fe89b3eSLeon Yu if (error) 912b800c91aSKonstantin Khlebnikov return error; 913b800c91aSKonstantin Khlebnikov } 9145beb4930SRik van Riel } 91537f9f559SKirill A. Shutemov 916d4af56c5SLiam R. Howlett if (mas_preallocate(&mas, vma, GFP_KERNEL)) 917d4af56c5SLiam R. Howlett return -ENOMEM; 918d4af56c5SLiam R. Howlett 919d4af56c5SLiam R. Howlett vma_adjust_trans_huge(orig_vma, start, end, adjust_next); 9201da177e4SLinus Torvalds if (file) { 9211da177e4SLinus Torvalds mapping = file->f_mapping; 9221da177e4SLinus Torvalds root = &mapping->i_mmap; 923cbc91f71SSrikar Dronamraju uprobe_munmap(vma, vma->vm_start, vma->vm_end); 924682968e0SSrikar Dronamraju 925682968e0SSrikar Dronamraju if (adjust_next) 92627ba0644SKirill A. Shutemov uprobe_munmap(next, next->vm_start, next->vm_end); 927682968e0SSrikar Dronamraju 92883cde9e8SDavidlohr Bueso i_mmap_lock_write(mapping); 9291da177e4SLinus Torvalds if (insert) { 9301da177e4SLinus Torvalds /* 9316b2dbba8SMichel Lespinasse * Put into interval tree now, so instantiated pages 9321da177e4SLinus Torvalds * are visible to arm/parisc __flush_dcache_page 9331da177e4SLinus Torvalds * throughout; but we cannot insert into address 9341da177e4SLinus Torvalds * space until vma start or end is updated. 9351da177e4SLinus Torvalds */ 9361da177e4SLinus Torvalds __vma_link_file(insert); 9371da177e4SLinus Torvalds } 9381da177e4SLinus Torvalds } 9391da177e4SLinus Torvalds 940012f1800SRik van Riel anon_vma = vma->anon_vma; 941bf181b9fSMichel Lespinasse if (!anon_vma && adjust_next) 942bf181b9fSMichel Lespinasse anon_vma = next->anon_vma; 943bf181b9fSMichel Lespinasse if (anon_vma) { 944e86f15eeSAndrea Arcangeli VM_WARN_ON(adjust_next && next->anon_vma && 945e86f15eeSAndrea Arcangeli anon_vma != next->anon_vma); 9464fc3f1d6SIngo Molnar anon_vma_lock_write(anon_vma); 947bf181b9fSMichel Lespinasse anon_vma_interval_tree_pre_update_vma(vma); 948bf181b9fSMichel Lespinasse if (adjust_next) 949bf181b9fSMichel Lespinasse anon_vma_interval_tree_pre_update_vma(next); 950bf181b9fSMichel Lespinasse } 951012f1800SRik van Riel 9520fc48a6eSWei Yang if (file) { 9531da177e4SLinus Torvalds flush_dcache_mmap_lock(mapping); 9546b2dbba8SMichel Lespinasse vma_interval_tree_remove(vma, root); 9551da177e4SLinus Torvalds if (adjust_next) 9566b2dbba8SMichel Lespinasse vma_interval_tree_remove(next, root); 9571da177e4SLinus Torvalds } 9581da177e4SLinus Torvalds 959d3737187SMichel Lespinasse if (start != vma->vm_start) { 960d4af56c5SLiam R. Howlett unsigned long old_start = vma->vm_start; 9611da177e4SLinus Torvalds vma->vm_start = start; 962d4af56c5SLiam R. Howlett if (old_start < start) 963d4af56c5SLiam R. Howlett vma_mas_szero(&mas, old_start, start); 964d3737187SMichel Lespinasse start_changed = true; 965d3737187SMichel Lespinasse } 966d3737187SMichel Lespinasse if (end != vma->vm_end) { 967d4af56c5SLiam R. Howlett unsigned long old_end = vma->vm_end; 9681da177e4SLinus Torvalds vma->vm_end = end; 969d4af56c5SLiam R. Howlett if (old_end > end) 970d4af56c5SLiam R. Howlett vma_mas_szero(&mas, end, old_end); 971d3737187SMichel Lespinasse end_changed = true; 972d3737187SMichel Lespinasse } 973d4af56c5SLiam R. Howlett 974d4af56c5SLiam R. Howlett if (end_changed || start_changed) 975d4af56c5SLiam R. Howlett vma_mas_store(vma, &mas); 976d4af56c5SLiam R. Howlett 9771da177e4SLinus Torvalds vma->vm_pgoff = pgoff; 9781da177e4SLinus Torvalds if (adjust_next) { 979f9d86a60SWei Yang next->vm_start += adjust_next; 980f9d86a60SWei Yang next->vm_pgoff += adjust_next >> PAGE_SHIFT; 981d4af56c5SLiam R. Howlett vma_mas_store(next, &mas); 9821da177e4SLinus Torvalds } 9831da177e4SLinus Torvalds 9840fc48a6eSWei Yang if (file) { 9851da177e4SLinus Torvalds if (adjust_next) 9866b2dbba8SMichel Lespinasse vma_interval_tree_insert(next, root); 9876b2dbba8SMichel Lespinasse vma_interval_tree_insert(vma, root); 9881da177e4SLinus Torvalds flush_dcache_mmap_unlock(mapping); 9891da177e4SLinus Torvalds } 9901da177e4SLinus Torvalds 9911da177e4SLinus Torvalds if (remove_next) { 9921da177e4SLinus Torvalds /* 9931da177e4SLinus Torvalds * vma_merge has merged next into vma, and needs 9941da177e4SLinus Torvalds * us to remove next before dropping the locks. 995d4af56c5SLiam R. Howlett * Since we have expanded over this vma, the maple tree will 996d4af56c5SLiam R. Howlett * have overwritten by storing the value 9971da177e4SLinus Torvalds */ 998d4af56c5SLiam R. Howlett if (remove_next != 3) { 9997c61f917SWei Yang __vma_unlink(mm, next, next); 1000d4af56c5SLiam R. Howlett if (remove_next == 2) 1001d4af56c5SLiam R. Howlett __vma_unlink(mm, next_next, next_next); 1002d4af56c5SLiam R. Howlett } else { 10038f26e0b1SAndrea Arcangeli /* 10048f26e0b1SAndrea Arcangeli * vma is not before next if they've been 10058f26e0b1SAndrea Arcangeli * swapped. 10068f26e0b1SAndrea Arcangeli * 10078f26e0b1SAndrea Arcangeli * pre-swap() next->vm_start was reduced so 10088f26e0b1SAndrea Arcangeli * tell validate_mm_rb to ignore pre-swap() 10098f26e0b1SAndrea Arcangeli * "next" (which is stored in post-swap() 10108f26e0b1SAndrea Arcangeli * "vma"). 10118f26e0b1SAndrea Arcangeli */ 10127c61f917SWei Yang __vma_unlink(mm, next, vma); 1013d4af56c5SLiam R. Howlett } 1014d4af56c5SLiam R. Howlett if (file) { 10151da177e4SLinus Torvalds __remove_shared_vm_struct(next, file, mapping); 1016d4af56c5SLiam R. Howlett if (remove_next == 2) 1017d4af56c5SLiam R. Howlett __remove_shared_vm_struct(next_next, file, mapping); 1018d4af56c5SLiam R. Howlett } 10191da177e4SLinus Torvalds } else if (insert) { 10201da177e4SLinus Torvalds /* 10211da177e4SLinus Torvalds * split_vma has split insert from vma, and needs 10221da177e4SLinus Torvalds * us to insert it before dropping the locks 10231da177e4SLinus Torvalds * (it may either follow vma or precede it). 10241da177e4SLinus Torvalds */ 1025d4af56c5SLiam R. Howlett __insert_vm_struct(mm, &mas, insert); 1026d3737187SMichel Lespinasse } else { 1027d3737187SMichel Lespinasse if (start_changed) 1028d3737187SMichel Lespinasse vma_gap_update(vma); 1029d3737187SMichel Lespinasse if (end_changed) { 1030d3737187SMichel Lespinasse if (!next) 10311be7107fSHugh Dickins mm->highest_vm_end = vm_end_gap(vma); 1032d3737187SMichel Lespinasse else if (!adjust_next) 1033d3737187SMichel Lespinasse vma_gap_update(next); 1034d3737187SMichel Lespinasse } 10351da177e4SLinus Torvalds } 10361da177e4SLinus Torvalds 1037bf181b9fSMichel Lespinasse if (anon_vma) { 1038bf181b9fSMichel Lespinasse anon_vma_interval_tree_post_update_vma(vma); 1039bf181b9fSMichel Lespinasse if (adjust_next) 1040bf181b9fSMichel Lespinasse anon_vma_interval_tree_post_update_vma(next); 104108b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 1042bf181b9fSMichel Lespinasse } 10431da177e4SLinus Torvalds 10440fc48a6eSWei Yang if (file) { 1045808fbdbeSWei Yang i_mmap_unlock_write(mapping); 10467b2d81d4SIngo Molnar uprobe_mmap(vma); 10472b144498SSrikar Dronamraju 10482b144498SSrikar Dronamraju if (adjust_next) 10497b2d81d4SIngo Molnar uprobe_mmap(next); 10502b144498SSrikar Dronamraju } 10512b144498SSrikar Dronamraju 10521da177e4SLinus Torvalds if (remove_next) { 1053d4af56c5SLiam R. Howlett again: 1054925d1c40SMatt Helsley if (file) { 1055cbc91f71SSrikar Dronamraju uprobe_munmap(next, next->vm_start, next->vm_end); 10561da177e4SLinus Torvalds fput(file); 1057925d1c40SMatt Helsley } 10585beb4930SRik van Riel if (next->anon_vma) 10595beb4930SRik van Riel anon_vma_merge(vma, next); 10601da177e4SLinus Torvalds mm->map_count--; 10613964acd0SOleg Nesterov mpol_put(vma_policy(next)); 10623928d4f5SLinus Torvalds vm_area_free(next); 10631da177e4SLinus Torvalds /* 10641da177e4SLinus Torvalds * In mprotect's case 6 (see comments on vma_merge), 10651da177e4SLinus Torvalds * we must remove another next too. It would clutter 10661da177e4SLinus Torvalds * up the code too much to do both in one go. 10671da177e4SLinus Torvalds */ 1068e86f15eeSAndrea Arcangeli if (remove_next != 3) { 1069e86f15eeSAndrea Arcangeli /* 1070e86f15eeSAndrea Arcangeli * If "next" was removed and vma->vm_end was 1071e86f15eeSAndrea Arcangeli * expanded (up) over it, in turn 1072e86f15eeSAndrea Arcangeli * "next->vm_prev->vm_end" changed and the 1073e86f15eeSAndrea Arcangeli * "vma->vm_next" gap must be updated. 1074e86f15eeSAndrea Arcangeli */ 1075d4af56c5SLiam R. Howlett next = next_next; 1076e86f15eeSAndrea Arcangeli } else { 1077e86f15eeSAndrea Arcangeli /* 1078e86f15eeSAndrea Arcangeli * For the scope of the comment "next" and 1079e86f15eeSAndrea Arcangeli * "vma" considered pre-swap(): if "vma" was 1080e86f15eeSAndrea Arcangeli * removed, next->vm_start was expanded (down) 1081e86f15eeSAndrea Arcangeli * over it and the "next" gap must be updated. 1082e86f15eeSAndrea Arcangeli * Because of the swap() the post-swap() "vma" 1083e86f15eeSAndrea Arcangeli * actually points to pre-swap() "next" 1084e86f15eeSAndrea Arcangeli * (post-swap() "next" as opposed is now a 1085e86f15eeSAndrea Arcangeli * dangling pointer). 1086e86f15eeSAndrea Arcangeli */ 1087e86f15eeSAndrea Arcangeli next = vma; 1088e86f15eeSAndrea Arcangeli } 1089734537c9SKirill A. Shutemov if (remove_next == 2) { 1090734537c9SKirill A. Shutemov remove_next = 1; 10911da177e4SLinus Torvalds goto again; 1092734537c9SKirill A. Shutemov } 1093d3737187SMichel Lespinasse else if (next) 1094d3737187SMichel Lespinasse vma_gap_update(next); 1095fb8c41e9SAndrea Arcangeli else { 1096fb8c41e9SAndrea Arcangeli /* 1097fb8c41e9SAndrea Arcangeli * If remove_next == 2 we obviously can't 1098fb8c41e9SAndrea Arcangeli * reach this path. 1099fb8c41e9SAndrea Arcangeli * 1100fb8c41e9SAndrea Arcangeli * If remove_next == 3 we can't reach this 1101fb8c41e9SAndrea Arcangeli * path because pre-swap() next is always not 1102fb8c41e9SAndrea Arcangeli * NULL. pre-swap() "next" is not being 1103fb8c41e9SAndrea Arcangeli * removed and its next->vm_end is not altered 1104fb8c41e9SAndrea Arcangeli * (and furthermore "end" already matches 1105fb8c41e9SAndrea Arcangeli * next->vm_end in remove_next == 3). 1106fb8c41e9SAndrea Arcangeli * 1107fb8c41e9SAndrea Arcangeli * We reach this only in the remove_next == 1 1108fb8c41e9SAndrea Arcangeli * case if the "next" vma that was removed was 1109fb8c41e9SAndrea Arcangeli * the highest vma of the mm. However in such 1110fb8c41e9SAndrea Arcangeli * case next->vm_end == "end" and the extended 1111fb8c41e9SAndrea Arcangeli * "vma" has vma->vm_end == next->vm_end so 1112fb8c41e9SAndrea Arcangeli * mm->highest_vm_end doesn't need any update 1113fb8c41e9SAndrea Arcangeli * in remove_next == 1 case. 1114fb8c41e9SAndrea Arcangeli */ 11151be7107fSHugh Dickins VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); 1116fb8c41e9SAndrea Arcangeli } 11171da177e4SLinus Torvalds } 11182b144498SSrikar Dronamraju if (insert && file) 11197b2d81d4SIngo Molnar uprobe_mmap(insert); 11201da177e4SLinus Torvalds 11211da177e4SLinus Torvalds validate_mm(mm); 1122d4af56c5SLiam R. Howlett validate_mm_mt(mm); 11235beb4930SRik van Riel 11245beb4930SRik van Riel return 0; 11251da177e4SLinus Torvalds } 11261da177e4SLinus Torvalds 11271da177e4SLinus Torvalds /* 11281da177e4SLinus Torvalds * If the vma has a ->close operation then the driver probably needs to release 11291da177e4SLinus Torvalds * per-vma resources, so we don't attempt to merge those. 11301da177e4SLinus Torvalds */ 11311da177e4SLinus Torvalds static inline int is_mergeable_vma(struct vm_area_struct *vma, 113219a809afSAndrea Arcangeli struct file *file, unsigned long vm_flags, 11339a10064fSColin Cross struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 11345c26f6acSSuren Baghdasaryan struct anon_vma_name *anon_name) 11351da177e4SLinus Torvalds { 113634228d47SCyrill Gorcunov /* 113734228d47SCyrill Gorcunov * VM_SOFTDIRTY should not prevent from VMA merging, if we 113834228d47SCyrill Gorcunov * match the flags but dirty bit -- the caller should mark 113934228d47SCyrill Gorcunov * merged VMA as dirty. If dirty bit won't be excluded from 11408bb4e7a2SWei Yang * comparison, we increase pressure on the memory system forcing 114134228d47SCyrill Gorcunov * the kernel to generate new VMAs when old one could be 114234228d47SCyrill Gorcunov * extended instead. 114334228d47SCyrill Gorcunov */ 114434228d47SCyrill Gorcunov if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) 11451da177e4SLinus Torvalds return 0; 11461da177e4SLinus Torvalds if (vma->vm_file != file) 11471da177e4SLinus Torvalds return 0; 11481da177e4SLinus Torvalds if (vma->vm_ops && vma->vm_ops->close) 11491da177e4SLinus Torvalds return 0; 115019a809afSAndrea Arcangeli if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) 115119a809afSAndrea Arcangeli return 0; 11525c26f6acSSuren Baghdasaryan if (!anon_vma_name_eq(anon_vma_name(vma), anon_name)) 11539a10064fSColin Cross return 0; 11541da177e4SLinus Torvalds return 1; 11551da177e4SLinus Torvalds } 11561da177e4SLinus Torvalds 11571da177e4SLinus Torvalds static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, 1158965f55deSShaohua Li struct anon_vma *anon_vma2, 1159965f55deSShaohua Li struct vm_area_struct *vma) 11601da177e4SLinus Torvalds { 1161965f55deSShaohua Li /* 1162965f55deSShaohua Li * The list_is_singular() test is to avoid merging VMA cloned from 1163965f55deSShaohua Li * parents. This can improve scalability caused by anon_vma lock. 1164965f55deSShaohua Li */ 1165965f55deSShaohua Li if ((!anon_vma1 || !anon_vma2) && (!vma || 1166965f55deSShaohua Li list_is_singular(&vma->anon_vma_chain))) 1167965f55deSShaohua Li return 1; 1168965f55deSShaohua Li return anon_vma1 == anon_vma2; 11691da177e4SLinus Torvalds } 11701da177e4SLinus Torvalds 11711da177e4SLinus Torvalds /* 11721da177e4SLinus Torvalds * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 11731da177e4SLinus Torvalds * in front of (at a lower virtual address and file offset than) the vma. 11741da177e4SLinus Torvalds * 11751da177e4SLinus Torvalds * We cannot merge two vmas if they have differently assigned (non-NULL) 11761da177e4SLinus Torvalds * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 11771da177e4SLinus Torvalds * 11781da177e4SLinus Torvalds * We don't check here for the merged mmap wrapping around the end of pagecache 117945e55300SPeter Collingbourne * indices (16TB on ia32) because do_mmap() does not permit mmap's which 11801da177e4SLinus Torvalds * wrap, nor mmaps which cover the final page at index -1UL. 11811da177e4SLinus Torvalds */ 11821da177e4SLinus Torvalds static int 11831da177e4SLinus Torvalds can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, 118419a809afSAndrea Arcangeli struct anon_vma *anon_vma, struct file *file, 118519a809afSAndrea Arcangeli pgoff_t vm_pgoff, 11869a10064fSColin Cross struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 11875c26f6acSSuren Baghdasaryan struct anon_vma_name *anon_name) 11881da177e4SLinus Torvalds { 11899a10064fSColin Cross if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) && 1190965f55deSShaohua Li is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { 11911da177e4SLinus Torvalds if (vma->vm_pgoff == vm_pgoff) 11921da177e4SLinus Torvalds return 1; 11931da177e4SLinus Torvalds } 11941da177e4SLinus Torvalds return 0; 11951da177e4SLinus Torvalds } 11961da177e4SLinus Torvalds 11971da177e4SLinus Torvalds /* 11981da177e4SLinus Torvalds * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 11991da177e4SLinus Torvalds * beyond (at a higher virtual address and file offset than) the vma. 12001da177e4SLinus Torvalds * 12011da177e4SLinus Torvalds * We cannot merge two vmas if they have differently assigned (non-NULL) 12021da177e4SLinus Torvalds * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 12031da177e4SLinus Torvalds */ 12041da177e4SLinus Torvalds static int 12051da177e4SLinus Torvalds can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, 120619a809afSAndrea Arcangeli struct anon_vma *anon_vma, struct file *file, 120719a809afSAndrea Arcangeli pgoff_t vm_pgoff, 12089a10064fSColin Cross struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 12095c26f6acSSuren Baghdasaryan struct anon_vma_name *anon_name) 12101da177e4SLinus Torvalds { 12119a10064fSColin Cross if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) && 1212965f55deSShaohua Li is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { 12131da177e4SLinus Torvalds pgoff_t vm_pglen; 1214d6e93217SLibin vm_pglen = vma_pages(vma); 12151da177e4SLinus Torvalds if (vma->vm_pgoff + vm_pglen == vm_pgoff) 12161da177e4SLinus Torvalds return 1; 12171da177e4SLinus Torvalds } 12181da177e4SLinus Torvalds return 0; 12191da177e4SLinus Torvalds } 12201da177e4SLinus Torvalds 12211da177e4SLinus Torvalds /* 12229a10064fSColin Cross * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name), 12239a10064fSColin Cross * figure out whether that can be merged with its predecessor or its 12249a10064fSColin Cross * successor. Or both (it neatly fills a hole). 12251da177e4SLinus Torvalds * 12261da177e4SLinus Torvalds * In most cases - when called for mmap, brk or mremap - [addr,end) is 12271da177e4SLinus Torvalds * certain not to be mapped by the time vma_merge is called; but when 12281da177e4SLinus Torvalds * called for mprotect, it is certain to be already mapped (either at 12291da177e4SLinus Torvalds * an offset within prev, or at the start of next), and the flags of 12301da177e4SLinus Torvalds * this area are about to be changed to vm_flags - and the no-change 12311da177e4SLinus Torvalds * case has already been eliminated. 12321da177e4SLinus Torvalds * 12331da177e4SLinus Torvalds * The following mprotect cases have to be considered, where AAAA is 12341da177e4SLinus Torvalds * the area passed down from mprotect_fixup, never extending beyond one 12351da177e4SLinus Torvalds * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after: 12361da177e4SLinus Torvalds * 12375d42ab29SWei Yang * AAAA AAAA AAAA 12385d42ab29SWei Yang * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN 12395d42ab29SWei Yang * cannot merge might become might become 12405d42ab29SWei Yang * PPNNNNNNNNNN PPPPPPPPPPNN 12415d42ab29SWei Yang * mmap, brk or case 4 below case 5 below 12425d42ab29SWei Yang * mremap move: 12435d42ab29SWei Yang * AAAA AAAA 12445d42ab29SWei Yang * PPPP NNNN PPPPNNNNXXXX 12455d42ab29SWei Yang * might become might become 12465d42ab29SWei Yang * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or 12475d42ab29SWei Yang * PPPPPPPPNNNN 2 or PPPPPPPPXXXX 7 or 12485d42ab29SWei Yang * PPPPNNNNNNNN 3 PPPPXXXXXXXX 8 12491da177e4SLinus Torvalds * 12508bb4e7a2SWei Yang * It is important for case 8 that the vma NNNN overlapping the 1251e86f15eeSAndrea Arcangeli * region AAAA is never going to extended over XXXX. Instead XXXX must 1252e86f15eeSAndrea Arcangeli * be extended in region AAAA and NNNN must be removed. This way in 1253e86f15eeSAndrea Arcangeli * all cases where vma_merge succeeds, the moment vma_adjust drops the 1254e86f15eeSAndrea Arcangeli * rmap_locks, the properties of the merged vma will be already 1255e86f15eeSAndrea Arcangeli * correct for the whole merged range. Some of those properties like 1256e86f15eeSAndrea Arcangeli * vm_page_prot/vm_flags may be accessed by rmap_walks and they must 1257e86f15eeSAndrea Arcangeli * be correct for the whole merged range immediately after the 1258e86f15eeSAndrea Arcangeli * rmap_locks are released. Otherwise if XXXX would be removed and 1259e86f15eeSAndrea Arcangeli * NNNN would be extended over the XXXX range, remove_migration_ptes 1260e86f15eeSAndrea Arcangeli * or other rmap walkers (if working on addresses beyond the "end" 1261e86f15eeSAndrea Arcangeli * parameter) may establish ptes with the wrong permissions of NNNN 1262e86f15eeSAndrea Arcangeli * instead of the right permissions of XXXX. 12631da177e4SLinus Torvalds */ 12641da177e4SLinus Torvalds struct vm_area_struct *vma_merge(struct mm_struct *mm, 12651da177e4SLinus Torvalds struct vm_area_struct *prev, unsigned long addr, 12661da177e4SLinus Torvalds unsigned long end, unsigned long vm_flags, 12671da177e4SLinus Torvalds struct anon_vma *anon_vma, struct file *file, 126819a809afSAndrea Arcangeli pgoff_t pgoff, struct mempolicy *policy, 12699a10064fSColin Cross struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 12705c26f6acSSuren Baghdasaryan struct anon_vma_name *anon_name) 12711da177e4SLinus Torvalds { 12721da177e4SLinus Torvalds pgoff_t pglen = (end - addr) >> PAGE_SHIFT; 12731da177e4SLinus Torvalds struct vm_area_struct *area, *next; 12745beb4930SRik van Riel int err; 12751da177e4SLinus Torvalds 1276d4af56c5SLiam R. Howlett validate_mm_mt(mm); 12771da177e4SLinus Torvalds /* 12781da177e4SLinus Torvalds * We later require that vma->vm_flags == vm_flags, 12791da177e4SLinus Torvalds * so this tests vma->vm_flags & VM_SPECIAL, too. 12801da177e4SLinus Torvalds */ 12811da177e4SLinus Torvalds if (vm_flags & VM_SPECIAL) 12821da177e4SLinus Torvalds return NULL; 12831da177e4SLinus Torvalds 1284f39af059SMatthew Wilcox (Oracle) next = __vma_next(mm, prev); 12851da177e4SLinus Torvalds area = next; 1286e86f15eeSAndrea Arcangeli if (area && area->vm_end == end) /* cases 6, 7, 8 */ 12871da177e4SLinus Torvalds next = next->vm_next; 12881da177e4SLinus Torvalds 1289e86f15eeSAndrea Arcangeli /* verify some invariant that must be enforced by the caller */ 1290e86f15eeSAndrea Arcangeli VM_WARN_ON(prev && addr <= prev->vm_start); 1291e86f15eeSAndrea Arcangeli VM_WARN_ON(area && end > area->vm_end); 1292e86f15eeSAndrea Arcangeli VM_WARN_ON(addr >= end); 1293e86f15eeSAndrea Arcangeli 12941da177e4SLinus Torvalds /* 12951da177e4SLinus Torvalds * Can it merge with the predecessor? 12961da177e4SLinus Torvalds */ 12971da177e4SLinus Torvalds if (prev && prev->vm_end == addr && 12981da177e4SLinus Torvalds mpol_equal(vma_policy(prev), policy) && 12991da177e4SLinus Torvalds can_vma_merge_after(prev, vm_flags, 130019a809afSAndrea Arcangeli anon_vma, file, pgoff, 13019a10064fSColin Cross vm_userfaultfd_ctx, anon_name)) { 13021da177e4SLinus Torvalds /* 13031da177e4SLinus Torvalds * OK, it can. Can we now merge in the successor as well? 13041da177e4SLinus Torvalds */ 13051da177e4SLinus Torvalds if (next && end == next->vm_start && 13061da177e4SLinus Torvalds mpol_equal(policy, vma_policy(next)) && 13071da177e4SLinus Torvalds can_vma_merge_before(next, vm_flags, 130819a809afSAndrea Arcangeli anon_vma, file, 130919a809afSAndrea Arcangeli pgoff+pglen, 13109a10064fSColin Cross vm_userfaultfd_ctx, anon_name) && 13111da177e4SLinus Torvalds is_mergeable_anon_vma(prev->anon_vma, 1312965f55deSShaohua Li next->anon_vma, NULL)) { 13131da177e4SLinus Torvalds /* cases 1, 6 */ 1314e86f15eeSAndrea Arcangeli err = __vma_adjust(prev, prev->vm_start, 1315e86f15eeSAndrea Arcangeli next->vm_end, prev->vm_pgoff, NULL, 1316e86f15eeSAndrea Arcangeli prev); 13171da177e4SLinus Torvalds } else /* cases 2, 5, 7 */ 1318e86f15eeSAndrea Arcangeli err = __vma_adjust(prev, prev->vm_start, 1319e86f15eeSAndrea Arcangeli end, prev->vm_pgoff, NULL, prev); 13205beb4930SRik van Riel if (err) 13215beb4930SRik van Riel return NULL; 1322c791576cSYang Shi khugepaged_enter_vma(prev, vm_flags); 13231da177e4SLinus Torvalds return prev; 13241da177e4SLinus Torvalds } 13251da177e4SLinus Torvalds 13261da177e4SLinus Torvalds /* 13271da177e4SLinus Torvalds * Can this new request be merged in front of next? 13281da177e4SLinus Torvalds */ 13291da177e4SLinus Torvalds if (next && end == next->vm_start && 13301da177e4SLinus Torvalds mpol_equal(policy, vma_policy(next)) && 13311da177e4SLinus Torvalds can_vma_merge_before(next, vm_flags, 133219a809afSAndrea Arcangeli anon_vma, file, pgoff+pglen, 13339a10064fSColin Cross vm_userfaultfd_ctx, anon_name)) { 13341da177e4SLinus Torvalds if (prev && addr < prev->vm_end) /* case 4 */ 1335e86f15eeSAndrea Arcangeli err = __vma_adjust(prev, prev->vm_start, 1336e86f15eeSAndrea Arcangeli addr, prev->vm_pgoff, NULL, next); 1337e86f15eeSAndrea Arcangeli else { /* cases 3, 8 */ 1338e86f15eeSAndrea Arcangeli err = __vma_adjust(area, addr, next->vm_end, 1339e86f15eeSAndrea Arcangeli next->vm_pgoff - pglen, NULL, next); 1340e86f15eeSAndrea Arcangeli /* 1341e86f15eeSAndrea Arcangeli * In case 3 area is already equal to next and 1342e86f15eeSAndrea Arcangeli * this is a noop, but in case 8 "area" has 1343e86f15eeSAndrea Arcangeli * been removed and next was expanded over it. 1344e86f15eeSAndrea Arcangeli */ 1345e86f15eeSAndrea Arcangeli area = next; 1346e86f15eeSAndrea Arcangeli } 13475beb4930SRik van Riel if (err) 13485beb4930SRik van Riel return NULL; 1349c791576cSYang Shi khugepaged_enter_vma(area, vm_flags); 13501da177e4SLinus Torvalds return area; 13511da177e4SLinus Torvalds } 1352d4af56c5SLiam R. Howlett validate_mm_mt(mm); 13531da177e4SLinus Torvalds 13541da177e4SLinus Torvalds return NULL; 13551da177e4SLinus Torvalds } 13561da177e4SLinus Torvalds 13571da177e4SLinus Torvalds /* 1358b4f315b4SEthon Paul * Rough compatibility check to quickly see if it's even worth looking 1359d0e9fe17SLinus Torvalds * at sharing an anon_vma. 1360d0e9fe17SLinus Torvalds * 1361d0e9fe17SLinus Torvalds * They need to have the same vm_file, and the flags can only differ 1362d0e9fe17SLinus Torvalds * in things that mprotect may change. 1363d0e9fe17SLinus Torvalds * 1364d0e9fe17SLinus Torvalds * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that 1365d0e9fe17SLinus Torvalds * we can merge the two vma's. For example, we refuse to merge a vma if 1366d0e9fe17SLinus Torvalds * there is a vm_ops->close() function, because that indicates that the 1367d0e9fe17SLinus Torvalds * driver is doing some kind of reference counting. But that doesn't 1368d0e9fe17SLinus Torvalds * really matter for the anon_vma sharing case. 1369d0e9fe17SLinus Torvalds */ 1370d0e9fe17SLinus Torvalds static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) 1371d0e9fe17SLinus Torvalds { 1372d0e9fe17SLinus Torvalds return a->vm_end == b->vm_start && 1373d0e9fe17SLinus Torvalds mpol_equal(vma_policy(a), vma_policy(b)) && 1374d0e9fe17SLinus Torvalds a->vm_file == b->vm_file && 13756cb4d9a2SAnshuman Khandual !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && 1376d0e9fe17SLinus Torvalds b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); 1377d0e9fe17SLinus Torvalds } 1378d0e9fe17SLinus Torvalds 1379d0e9fe17SLinus Torvalds /* 1380d0e9fe17SLinus Torvalds * Do some basic sanity checking to see if we can re-use the anon_vma 1381d0e9fe17SLinus Torvalds * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be 1382d0e9fe17SLinus Torvalds * the same as 'old', the other will be the new one that is trying 1383d0e9fe17SLinus Torvalds * to share the anon_vma. 1384d0e9fe17SLinus Torvalds * 13855b449489SFlorian Rommel * NOTE! This runs with mmap_lock held for reading, so it is possible that 1386d0e9fe17SLinus Torvalds * the anon_vma of 'old' is concurrently in the process of being set up 1387d0e9fe17SLinus Torvalds * by another page fault trying to merge _that_. But that's ok: if it 1388d0e9fe17SLinus Torvalds * is being set up, that automatically means that it will be a singleton 1389d0e9fe17SLinus Torvalds * acceptable for merging, so we can do all of this optimistically. But 13904db0c3c2SJason Low * we do that READ_ONCE() to make sure that we never re-load the pointer. 1391d0e9fe17SLinus Torvalds * 1392d0e9fe17SLinus Torvalds * IOW: that the "list_is_singular()" test on the anon_vma_chain only 1393d0e9fe17SLinus Torvalds * matters for the 'stable anon_vma' case (ie the thing we want to avoid 1394d0e9fe17SLinus Torvalds * is to return an anon_vma that is "complex" due to having gone through 1395d0e9fe17SLinus Torvalds * a fork). 1396d0e9fe17SLinus Torvalds * 1397d0e9fe17SLinus Torvalds * We also make sure that the two vma's are compatible (adjacent, 1398d0e9fe17SLinus Torvalds * and with the same memory policies). That's all stable, even with just 13995b449489SFlorian Rommel * a read lock on the mmap_lock. 1400d0e9fe17SLinus Torvalds */ 1401d0e9fe17SLinus Torvalds static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) 1402d0e9fe17SLinus Torvalds { 1403d0e9fe17SLinus Torvalds if (anon_vma_compatible(a, b)) { 14044db0c3c2SJason Low struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); 1405d0e9fe17SLinus Torvalds 1406d0e9fe17SLinus Torvalds if (anon_vma && list_is_singular(&old->anon_vma_chain)) 1407d0e9fe17SLinus Torvalds return anon_vma; 1408d0e9fe17SLinus Torvalds } 1409d0e9fe17SLinus Torvalds return NULL; 1410d0e9fe17SLinus Torvalds } 1411d0e9fe17SLinus Torvalds 1412d0e9fe17SLinus Torvalds /* 14131da177e4SLinus Torvalds * find_mergeable_anon_vma is used by anon_vma_prepare, to check 14141da177e4SLinus Torvalds * neighbouring vmas for a suitable anon_vma, before it goes off 14151da177e4SLinus Torvalds * to allocate a new anon_vma. It checks because a repetitive 14161da177e4SLinus Torvalds * sequence of mprotects and faults may otherwise lead to distinct 14171da177e4SLinus Torvalds * anon_vmas being allocated, preventing vma merge in subsequent 14181da177e4SLinus Torvalds * mprotect. 14191da177e4SLinus Torvalds */ 14201da177e4SLinus Torvalds struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) 14211da177e4SLinus Torvalds { 1422a67c8caaSMiaohe Lin struct anon_vma *anon_vma = NULL; 14231da177e4SLinus Torvalds 1424a67c8caaSMiaohe Lin /* Try next first. */ 1425a67c8caaSMiaohe Lin if (vma->vm_next) { 1426a67c8caaSMiaohe Lin anon_vma = reusable_anon_vma(vma->vm_next, vma, vma->vm_next); 1427d0e9fe17SLinus Torvalds if (anon_vma) 1428d0e9fe17SLinus Torvalds return anon_vma; 1429a67c8caaSMiaohe Lin } 14301da177e4SLinus Torvalds 1431a67c8caaSMiaohe Lin /* Try prev next. */ 1432a67c8caaSMiaohe Lin if (vma->vm_prev) 1433a67c8caaSMiaohe Lin anon_vma = reusable_anon_vma(vma->vm_prev, vma->vm_prev, vma); 1434a67c8caaSMiaohe Lin 14351da177e4SLinus Torvalds /* 1436a67c8caaSMiaohe Lin * We might reach here with anon_vma == NULL if we can't find 1437a67c8caaSMiaohe Lin * any reusable anon_vma. 14381da177e4SLinus Torvalds * There's no absolute need to look only at touching neighbours: 14391da177e4SLinus Torvalds * we could search further afield for "compatible" anon_vmas. 14401da177e4SLinus Torvalds * But it would probably just be a waste of time searching, 14411da177e4SLinus Torvalds * or lead to too many vmas hanging off the same anon_vma. 14421da177e4SLinus Torvalds * We're trying to allow mprotect remerging later on, 14431da177e4SLinus Torvalds * not trying to minimize memory used for anon_vmas. 14441da177e4SLinus Torvalds */ 1445a67c8caaSMiaohe Lin return anon_vma; 14461da177e4SLinus Torvalds } 14471da177e4SLinus Torvalds 14481da177e4SLinus Torvalds /* 144940401530SAl Viro * If a hint addr is less than mmap_min_addr change hint to be as 145040401530SAl Viro * low as possible but still greater than mmap_min_addr 145140401530SAl Viro */ 145240401530SAl Viro static inline unsigned long round_hint_to_min(unsigned long hint) 145340401530SAl Viro { 145440401530SAl Viro hint &= PAGE_MASK; 145540401530SAl Viro if (((void *)hint != NULL) && 145640401530SAl Viro (hint < mmap_min_addr)) 145740401530SAl Viro return PAGE_ALIGN(mmap_min_addr); 145840401530SAl Viro return hint; 145940401530SAl Viro } 146040401530SAl Viro 14616aeb2542SMike Rapoport int mlock_future_check(struct mm_struct *mm, unsigned long flags, 1462363ee17fSDavidlohr Bueso unsigned long len) 1463363ee17fSDavidlohr Bueso { 1464363ee17fSDavidlohr Bueso unsigned long locked, lock_limit; 1465363ee17fSDavidlohr Bueso 1466363ee17fSDavidlohr Bueso /* mlock MCL_FUTURE? */ 1467363ee17fSDavidlohr Bueso if (flags & VM_LOCKED) { 1468363ee17fSDavidlohr Bueso locked = len >> PAGE_SHIFT; 1469363ee17fSDavidlohr Bueso locked += mm->locked_vm; 1470363ee17fSDavidlohr Bueso lock_limit = rlimit(RLIMIT_MEMLOCK); 1471363ee17fSDavidlohr Bueso lock_limit >>= PAGE_SHIFT; 1472363ee17fSDavidlohr Bueso if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 1473363ee17fSDavidlohr Bueso return -EAGAIN; 1474363ee17fSDavidlohr Bueso } 1475363ee17fSDavidlohr Bueso return 0; 1476363ee17fSDavidlohr Bueso } 1477363ee17fSDavidlohr Bueso 1478be83bbf8SLinus Torvalds static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) 1479be83bbf8SLinus Torvalds { 1480be83bbf8SLinus Torvalds if (S_ISREG(inode->i_mode)) 1481423913adSLinus Torvalds return MAX_LFS_FILESIZE; 1482be83bbf8SLinus Torvalds 1483be83bbf8SLinus Torvalds if (S_ISBLK(inode->i_mode)) 1484be83bbf8SLinus Torvalds return MAX_LFS_FILESIZE; 1485be83bbf8SLinus Torvalds 148676f34950SIvan Khoronzhuk if (S_ISSOCK(inode->i_mode)) 148776f34950SIvan Khoronzhuk return MAX_LFS_FILESIZE; 148876f34950SIvan Khoronzhuk 1489be83bbf8SLinus Torvalds /* Special "we do even unsigned file positions" case */ 1490be83bbf8SLinus Torvalds if (file->f_mode & FMODE_UNSIGNED_OFFSET) 1491be83bbf8SLinus Torvalds return 0; 1492be83bbf8SLinus Torvalds 1493be83bbf8SLinus Torvalds /* Yes, random drivers might want more. But I'm tired of buggy drivers */ 1494be83bbf8SLinus Torvalds return ULONG_MAX; 1495be83bbf8SLinus Torvalds } 1496be83bbf8SLinus Torvalds 1497be83bbf8SLinus Torvalds static inline bool file_mmap_ok(struct file *file, struct inode *inode, 1498be83bbf8SLinus Torvalds unsigned long pgoff, unsigned long len) 1499be83bbf8SLinus Torvalds { 1500be83bbf8SLinus Torvalds u64 maxsize = file_mmap_size_max(file, inode); 1501be83bbf8SLinus Torvalds 1502be83bbf8SLinus Torvalds if (maxsize && len > maxsize) 1503be83bbf8SLinus Torvalds return false; 1504be83bbf8SLinus Torvalds maxsize -= len; 1505be83bbf8SLinus Torvalds if (pgoff > maxsize >> PAGE_SHIFT) 1506be83bbf8SLinus Torvalds return false; 1507be83bbf8SLinus Torvalds return true; 1508be83bbf8SLinus Torvalds } 1509be83bbf8SLinus Torvalds 151040401530SAl Viro /* 15113e4e28c5SMichel Lespinasse * The caller must write-lock current->mm->mmap_lock. 15121da177e4SLinus Torvalds */ 15131fcfd8dbSOleg Nesterov unsigned long do_mmap(struct file *file, unsigned long addr, 15141da177e4SLinus Torvalds unsigned long len, unsigned long prot, 151545e55300SPeter Collingbourne unsigned long flags, unsigned long pgoff, 151645e55300SPeter Collingbourne unsigned long *populate, struct list_head *uf) 15171da177e4SLinus Torvalds { 15181da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 151945e55300SPeter Collingbourne vm_flags_t vm_flags; 152062b5f7d0SDave Hansen int pkey = 0; 15211da177e4SLinus Torvalds 152241badc15SMichel Lespinasse *populate = 0; 1523bebeb3d6SMichel Lespinasse 1524e37609bbSPiotr Kwapulinski if (!len) 1525e37609bbSPiotr Kwapulinski return -EINVAL; 1526e37609bbSPiotr Kwapulinski 15271da177e4SLinus Torvalds /* 15281da177e4SLinus Torvalds * Does the application expect PROT_READ to imply PROT_EXEC? 15291da177e4SLinus Torvalds * 15301da177e4SLinus Torvalds * (the exception is when the underlying filesystem is noexec 15311da177e4SLinus Torvalds * mounted, in which case we dont add PROT_EXEC.) 15321da177e4SLinus Torvalds */ 15331da177e4SLinus Torvalds if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 153490f8572bSEric W. Biederman if (!(file && path_noexec(&file->f_path))) 15351da177e4SLinus Torvalds prot |= PROT_EXEC; 15361da177e4SLinus Torvalds 1537a4ff8e86SMichal Hocko /* force arch specific MAP_FIXED handling in get_unmapped_area */ 1538a4ff8e86SMichal Hocko if (flags & MAP_FIXED_NOREPLACE) 1539a4ff8e86SMichal Hocko flags |= MAP_FIXED; 1540a4ff8e86SMichal Hocko 15417cd94146SEric Paris if (!(flags & MAP_FIXED)) 15427cd94146SEric Paris addr = round_hint_to_min(addr); 15437cd94146SEric Paris 15441da177e4SLinus Torvalds /* Careful about overflows.. */ 15451da177e4SLinus Torvalds len = PAGE_ALIGN(len); 15469206de95SAl Viro if (!len) 15471da177e4SLinus Torvalds return -ENOMEM; 15481da177e4SLinus Torvalds 15491da177e4SLinus Torvalds /* offset overflow? */ 15501da177e4SLinus Torvalds if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) 15511da177e4SLinus Torvalds return -EOVERFLOW; 15521da177e4SLinus Torvalds 15531da177e4SLinus Torvalds /* Too many mappings? */ 15541da177e4SLinus Torvalds if (mm->map_count > sysctl_max_map_count) 15551da177e4SLinus Torvalds return -ENOMEM; 15561da177e4SLinus Torvalds 15571da177e4SLinus Torvalds /* Obtain the address to map to. we verify (or select) it and ensure 15581da177e4SLinus Torvalds * that it represents a valid section of the address space. 15591da177e4SLinus Torvalds */ 15601da177e4SLinus Torvalds addr = get_unmapped_area(file, addr, len, pgoff, flags); 1561ff68dac6SGaowei Pu if (IS_ERR_VALUE(addr)) 15621da177e4SLinus Torvalds return addr; 15631da177e4SLinus Torvalds 1564a4ff8e86SMichal Hocko if (flags & MAP_FIXED_NOREPLACE) { 156535e43c5fSLiam Howlett if (find_vma_intersection(mm, addr, addr + len)) 1566a4ff8e86SMichal Hocko return -EEXIST; 1567a4ff8e86SMichal Hocko } 1568a4ff8e86SMichal Hocko 156962b5f7d0SDave Hansen if (prot == PROT_EXEC) { 157062b5f7d0SDave Hansen pkey = execute_only_pkey(mm); 157162b5f7d0SDave Hansen if (pkey < 0) 157262b5f7d0SDave Hansen pkey = 0; 157362b5f7d0SDave Hansen } 157462b5f7d0SDave Hansen 15751da177e4SLinus Torvalds /* Do simple checking here so the lower-level routines won't have 15761da177e4SLinus Torvalds * to. we assume access permissions have been handled by the open 15771da177e4SLinus Torvalds * of the memory object, so we don't do any here. 15781da177e4SLinus Torvalds */ 157945e55300SPeter Collingbourne vm_flags = calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | 15801da177e4SLinus Torvalds mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 15811da177e4SLinus Torvalds 1582cdf7b341SHuang Shijie if (flags & MAP_LOCKED) 15831da177e4SLinus Torvalds if (!can_do_mlock()) 15841da177e4SLinus Torvalds return -EPERM; 1585ba470de4SRik van Riel 1586363ee17fSDavidlohr Bueso if (mlock_future_check(mm, vm_flags, len)) 15871da177e4SLinus Torvalds return -EAGAIN; 15881da177e4SLinus Torvalds 15891da177e4SLinus Torvalds if (file) { 1590077bf22bSOleg Nesterov struct inode *inode = file_inode(file); 15911c972597SDan Williams unsigned long flags_mask; 15921c972597SDan Williams 1593be83bbf8SLinus Torvalds if (!file_mmap_ok(file, inode, pgoff, len)) 1594be83bbf8SLinus Torvalds return -EOVERFLOW; 1595be83bbf8SLinus Torvalds 15961c972597SDan Williams flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags; 1597077bf22bSOleg Nesterov 15981da177e4SLinus Torvalds switch (flags & MAP_TYPE) { 15991da177e4SLinus Torvalds case MAP_SHARED: 16001c972597SDan Williams /* 16011c972597SDan Williams * Force use of MAP_SHARED_VALIDATE with non-legacy 16021c972597SDan Williams * flags. E.g. MAP_SYNC is dangerous to use with 16031c972597SDan Williams * MAP_SHARED as you don't know which consistency model 16041c972597SDan Williams * you will get. We silently ignore unsupported flags 16051c972597SDan Williams * with MAP_SHARED to preserve backward compatibility. 16061c972597SDan Williams */ 16071c972597SDan Williams flags &= LEGACY_MAP_MASK; 1608e4a9bc58SJoe Perches fallthrough; 16091c972597SDan Williams case MAP_SHARED_VALIDATE: 16101c972597SDan Williams if (flags & ~flags_mask) 16111c972597SDan Williams return -EOPNOTSUPP; 1612dc617f29SDarrick J. Wong if (prot & PROT_WRITE) { 1613dc617f29SDarrick J. Wong if (!(file->f_mode & FMODE_WRITE)) 16141da177e4SLinus Torvalds return -EACCES; 1615dc617f29SDarrick J. Wong if (IS_SWAPFILE(file->f_mapping->host)) 1616dc617f29SDarrick J. Wong return -ETXTBSY; 1617dc617f29SDarrick J. Wong } 16181da177e4SLinus Torvalds 16191da177e4SLinus Torvalds /* 16201da177e4SLinus Torvalds * Make sure we don't allow writing to an append-only 16211da177e4SLinus Torvalds * file.. 16221da177e4SLinus Torvalds */ 16231da177e4SLinus Torvalds if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) 16241da177e4SLinus Torvalds return -EACCES; 16251da177e4SLinus Torvalds 16261da177e4SLinus Torvalds vm_flags |= VM_SHARED | VM_MAYSHARE; 16271da177e4SLinus Torvalds if (!(file->f_mode & FMODE_WRITE)) 16281da177e4SLinus Torvalds vm_flags &= ~(VM_MAYWRITE | VM_SHARED); 1629e4a9bc58SJoe Perches fallthrough; 16301da177e4SLinus Torvalds case MAP_PRIVATE: 16311da177e4SLinus Torvalds if (!(file->f_mode & FMODE_READ)) 16321da177e4SLinus Torvalds return -EACCES; 163390f8572bSEric W. Biederman if (path_noexec(&file->f_path)) { 163480c5606cSLinus Torvalds if (vm_flags & VM_EXEC) 163580c5606cSLinus Torvalds return -EPERM; 163680c5606cSLinus Torvalds vm_flags &= ~VM_MAYEXEC; 163780c5606cSLinus Torvalds } 163880c5606cSLinus Torvalds 163972c2d531SAl Viro if (!file->f_op->mmap) 164080c5606cSLinus Torvalds return -ENODEV; 1641b2c56e4fSOleg Nesterov if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 1642b2c56e4fSOleg Nesterov return -EINVAL; 16431da177e4SLinus Torvalds break; 16441da177e4SLinus Torvalds 16451da177e4SLinus Torvalds default: 16461da177e4SLinus Torvalds return -EINVAL; 16471da177e4SLinus Torvalds } 16481da177e4SLinus Torvalds } else { 16491da177e4SLinus Torvalds switch (flags & MAP_TYPE) { 16501da177e4SLinus Torvalds case MAP_SHARED: 1651b2c56e4fSOleg Nesterov if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 1652b2c56e4fSOleg Nesterov return -EINVAL; 1653ce363942STejun Heo /* 1654ce363942STejun Heo * Ignore pgoff. 1655ce363942STejun Heo */ 1656ce363942STejun Heo pgoff = 0; 16571da177e4SLinus Torvalds vm_flags |= VM_SHARED | VM_MAYSHARE; 16581da177e4SLinus Torvalds break; 16591da177e4SLinus Torvalds case MAP_PRIVATE: 16601da177e4SLinus Torvalds /* 16611da177e4SLinus Torvalds * Set pgoff according to addr for anon_vma. 16621da177e4SLinus Torvalds */ 16631da177e4SLinus Torvalds pgoff = addr >> PAGE_SHIFT; 16641da177e4SLinus Torvalds break; 16651da177e4SLinus Torvalds default: 16661da177e4SLinus Torvalds return -EINVAL; 16671da177e4SLinus Torvalds } 16681da177e4SLinus Torvalds } 16691da177e4SLinus Torvalds 1670c22c0d63SMichel Lespinasse /* 1671c22c0d63SMichel Lespinasse * Set 'VM_NORESERVE' if we should not account for the 1672c22c0d63SMichel Lespinasse * memory use of this mapping. 1673c22c0d63SMichel Lespinasse */ 1674c22c0d63SMichel Lespinasse if (flags & MAP_NORESERVE) { 1675c22c0d63SMichel Lespinasse /* We honor MAP_NORESERVE if allowed to overcommit */ 1676c22c0d63SMichel Lespinasse if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) 1677c22c0d63SMichel Lespinasse vm_flags |= VM_NORESERVE; 1678c22c0d63SMichel Lespinasse 1679c22c0d63SMichel Lespinasse /* hugetlb applies strict overcommit unless MAP_NORESERVE */ 1680c22c0d63SMichel Lespinasse if (file && is_file_hugepages(file)) 1681c22c0d63SMichel Lespinasse vm_flags |= VM_NORESERVE; 1682c22c0d63SMichel Lespinasse } 1683c22c0d63SMichel Lespinasse 1684897ab3e0SMike Rapoport addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); 168509a9f1d2SMichel Lespinasse if (!IS_ERR_VALUE(addr) && 168609a9f1d2SMichel Lespinasse ((vm_flags & VM_LOCKED) || 168709a9f1d2SMichel Lespinasse (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) 168841badc15SMichel Lespinasse *populate = len; 1689bebeb3d6SMichel Lespinasse return addr; 16900165ab44SMiklos Szeredi } 16916be5ceb0SLinus Torvalds 1692a90f590aSDominik Brodowski unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, 1693a90f590aSDominik Brodowski unsigned long prot, unsigned long flags, 1694a90f590aSDominik Brodowski unsigned long fd, unsigned long pgoff) 169566f0dc48SHugh Dickins { 169666f0dc48SHugh Dickins struct file *file = NULL; 16971e3ee14bSChen Gang unsigned long retval; 169866f0dc48SHugh Dickins 169966f0dc48SHugh Dickins if (!(flags & MAP_ANONYMOUS)) { 1700120a795dSAl Viro audit_mmap_fd(fd, flags); 170166f0dc48SHugh Dickins file = fget(fd); 170266f0dc48SHugh Dickins if (!file) 17031e3ee14bSChen Gang return -EBADF; 17047bba8f0eSZhen Lei if (is_file_hugepages(file)) { 1705af73e4d9SNaoya Horiguchi len = ALIGN(len, huge_page_size(hstate_file(file))); 17067bba8f0eSZhen Lei } else if (unlikely(flags & MAP_HUGETLB)) { 1707493af578SJörn Engel retval = -EINVAL; 1708493af578SJörn Engel goto out_fput; 17097bba8f0eSZhen Lei } 171066f0dc48SHugh Dickins } else if (flags & MAP_HUGETLB) { 1711c103a4dcSAndrew Morton struct hstate *hs; 1712af73e4d9SNaoya Horiguchi 171320ac2893SAnshuman Khandual hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 1714091d0d55SLi Zefan if (!hs) 1715091d0d55SLi Zefan return -EINVAL; 1716091d0d55SLi Zefan 1717091d0d55SLi Zefan len = ALIGN(len, huge_page_size(hs)); 171866f0dc48SHugh Dickins /* 171966f0dc48SHugh Dickins * VM_NORESERVE is used because the reservations will be 172066f0dc48SHugh Dickins * taken when vm_ops->mmap() is called 172166f0dc48SHugh Dickins */ 1722af73e4d9SNaoya Horiguchi file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, 172342d7395fSAndi Kleen VM_NORESERVE, 172483c1fd76Szhangyiru HUGETLB_ANONHUGE_INODE, 172542d7395fSAndi Kleen (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 172666f0dc48SHugh Dickins if (IS_ERR(file)) 172766f0dc48SHugh Dickins return PTR_ERR(file); 172866f0dc48SHugh Dickins } 172966f0dc48SHugh Dickins 17309fbeb5abSMichal Hocko retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); 1731493af578SJörn Engel out_fput: 173266f0dc48SHugh Dickins if (file) 173366f0dc48SHugh Dickins fput(file); 173466f0dc48SHugh Dickins return retval; 173566f0dc48SHugh Dickins } 173666f0dc48SHugh Dickins 1737a90f590aSDominik Brodowski SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 1738a90f590aSDominik Brodowski unsigned long, prot, unsigned long, flags, 1739a90f590aSDominik Brodowski unsigned long, fd, unsigned long, pgoff) 1740a90f590aSDominik Brodowski { 1741a90f590aSDominik Brodowski return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); 1742a90f590aSDominik Brodowski } 1743a90f590aSDominik Brodowski 1744a4679373SChristoph Hellwig #ifdef __ARCH_WANT_SYS_OLD_MMAP 1745a4679373SChristoph Hellwig struct mmap_arg_struct { 1746a4679373SChristoph Hellwig unsigned long addr; 1747a4679373SChristoph Hellwig unsigned long len; 1748a4679373SChristoph Hellwig unsigned long prot; 1749a4679373SChristoph Hellwig unsigned long flags; 1750a4679373SChristoph Hellwig unsigned long fd; 1751a4679373SChristoph Hellwig unsigned long offset; 1752a4679373SChristoph Hellwig }; 1753a4679373SChristoph Hellwig 1754a4679373SChristoph Hellwig SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) 1755a4679373SChristoph Hellwig { 1756a4679373SChristoph Hellwig struct mmap_arg_struct a; 1757a4679373SChristoph Hellwig 1758a4679373SChristoph Hellwig if (copy_from_user(&a, arg, sizeof(a))) 1759a4679373SChristoph Hellwig return -EFAULT; 1760de1741a1SAlexander Kuleshov if (offset_in_page(a.offset)) 1761a4679373SChristoph Hellwig return -EINVAL; 1762a4679373SChristoph Hellwig 1763a90f590aSDominik Brodowski return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, 1764a4679373SChristoph Hellwig a.offset >> PAGE_SHIFT); 1765a4679373SChristoph Hellwig } 1766a4679373SChristoph Hellwig #endif /* __ARCH_WANT_SYS_OLD_MMAP */ 1767a4679373SChristoph Hellwig 17684e950f6fSAlexey Dobriyan /* 17698bb4e7a2SWei Yang * Some shared mappings will want the pages marked read-only 17704e950f6fSAlexey Dobriyan * to track write events. If so, we'll downgrade vm_page_prot 17714e950f6fSAlexey Dobriyan * to the private version (using protection_map[] without the 17724e950f6fSAlexey Dobriyan * VM_SHARED bit). 17734e950f6fSAlexey Dobriyan */ 17746d2329f8SAndrea Arcangeli int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) 17754e950f6fSAlexey Dobriyan { 1776ca16d140SKOSAKI Motohiro vm_flags_t vm_flags = vma->vm_flags; 17778a04446aSKirill A. Shutemov const struct vm_operations_struct *vm_ops = vma->vm_ops; 17784e950f6fSAlexey Dobriyan 17794e950f6fSAlexey Dobriyan /* If it was private or non-writable, the write bit is already clear */ 17804e950f6fSAlexey Dobriyan if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) 17814e950f6fSAlexey Dobriyan return 0; 17824e950f6fSAlexey Dobriyan 17834e950f6fSAlexey Dobriyan /* The backer wishes to know when pages are first written to? */ 17848a04446aSKirill A. Shutemov if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite)) 17854e950f6fSAlexey Dobriyan return 1; 17864e950f6fSAlexey Dobriyan 178764e45507SPeter Feiner /* The open routine did something to the protections that pgprot_modify 178864e45507SPeter Feiner * won't preserve? */ 17896d2329f8SAndrea Arcangeli if (pgprot_val(vm_page_prot) != 17906d2329f8SAndrea Arcangeli pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags))) 17914e950f6fSAlexey Dobriyan return 0; 17924e950f6fSAlexey Dobriyan 1793f96f7a40SDavid Hildenbrand /* 1794f96f7a40SDavid Hildenbrand * Do we need to track softdirty? hugetlb does not support softdirty 1795f96f7a40SDavid Hildenbrand * tracking yet. 1796f96f7a40SDavid Hildenbrand */ 1797f96f7a40SDavid Hildenbrand if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) 179864e45507SPeter Feiner return 1; 179964e45507SPeter Feiner 18004e950f6fSAlexey Dobriyan /* Specialty mapping? */ 18014b6e1e37SKonstantin Khlebnikov if (vm_flags & VM_PFNMAP) 18024e950f6fSAlexey Dobriyan return 0; 18034e950f6fSAlexey Dobriyan 18044e950f6fSAlexey Dobriyan /* Can the mapping track the dirty pages? */ 18054e950f6fSAlexey Dobriyan return vma->vm_file && vma->vm_file->f_mapping && 1806f56753acSChristoph Hellwig mapping_can_writeback(vma->vm_file->f_mapping); 18074e950f6fSAlexey Dobriyan } 18084e950f6fSAlexey Dobriyan 1809fc8744adSLinus Torvalds /* 1810fc8744adSLinus Torvalds * We account for memory if it's a private writeable mapping, 18115a6fe125SMel Gorman * not hugepages and VM_NORESERVE wasn't set. 1812fc8744adSLinus Torvalds */ 1813ca16d140SKOSAKI Motohiro static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) 1814fc8744adSLinus Torvalds { 18155a6fe125SMel Gorman /* 18165a6fe125SMel Gorman * hugetlb has its own accounting separate from the core VM 18175a6fe125SMel Gorman * VM_HUGETLB may not be set yet so we cannot check for that flag. 18185a6fe125SMel Gorman */ 18195a6fe125SMel Gorman if (file && is_file_hugepages(file)) 18205a6fe125SMel Gorman return 0; 18215a6fe125SMel Gorman 1822fc8744adSLinus Torvalds return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; 1823fc8744adSLinus Torvalds } 1824fc8744adSLinus Torvalds 18250165ab44SMiklos Szeredi unsigned long mmap_region(struct file *file, unsigned long addr, 1826897ab3e0SMike Rapoport unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 1827897ab3e0SMike Rapoport struct list_head *uf) 18280165ab44SMiklos Szeredi { 18290165ab44SMiklos Szeredi struct mm_struct *mm = current->mm; 1830d70cec89SMiaohe Lin struct vm_area_struct *vma, *prev, *merge; 18310165ab44SMiklos Szeredi int error; 18320165ab44SMiklos Szeredi struct rb_node **rb_link, *rb_parent; 18330165ab44SMiklos Szeredi unsigned long charged = 0; 18340165ab44SMiklos Szeredi 1835d4af56c5SLiam R. Howlett validate_mm_mt(mm); 1836e8420a8eSCyril Hrubis /* Check against address space limit. */ 183784638335SKonstantin Khlebnikov if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { 1838e8420a8eSCyril Hrubis unsigned long nr_pages; 1839e8420a8eSCyril Hrubis 1840e8420a8eSCyril Hrubis /* 1841e8420a8eSCyril Hrubis * MAP_FIXED may remove pages of mappings that intersects with 1842e8420a8eSCyril Hrubis * requested mapping. Account for the pages it would unmap. 1843e8420a8eSCyril Hrubis */ 1844e8420a8eSCyril Hrubis nr_pages = count_vma_pages_range(mm, addr, addr + len); 1845e8420a8eSCyril Hrubis 184684638335SKonstantin Khlebnikov if (!may_expand_vm(mm, vm_flags, 184784638335SKonstantin Khlebnikov (len >> PAGE_SHIFT) - nr_pages)) 1848e8420a8eSCyril Hrubis return -ENOMEM; 1849e8420a8eSCyril Hrubis } 1850e8420a8eSCyril Hrubis 1851fb8090b6SLiam R. Howlett /* Clear old maps, set up prev, rb_link, rb_parent, and uf */ 1852fb8090b6SLiam R. Howlett if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf)) 18531da177e4SLinus Torvalds return -ENOMEM; 1854fc8744adSLinus Torvalds /* 18551da177e4SLinus Torvalds * Private writable mapping: check memory availability 18561da177e4SLinus Torvalds */ 18575a6fe125SMel Gorman if (accountable_mapping(file, vm_flags)) { 18581da177e4SLinus Torvalds charged = len >> PAGE_SHIFT; 1859191c5424SAl Viro if (security_vm_enough_memory_mm(mm, charged)) 18601da177e4SLinus Torvalds return -ENOMEM; 18611da177e4SLinus Torvalds vm_flags |= VM_ACCOUNT; 18621da177e4SLinus Torvalds } 18631da177e4SLinus Torvalds 18641da177e4SLinus Torvalds /* 1865de33c8dbSLinus Torvalds * Can we just expand an old mapping? 18661da177e4SLinus Torvalds */ 186719a809afSAndrea Arcangeli vma = vma_merge(mm, prev, addr, addr + len, vm_flags, 18689a10064fSColin Cross NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX, NULL); 1869ba470de4SRik van Riel if (vma) 18701da177e4SLinus Torvalds goto out; 18711da177e4SLinus Torvalds 18721da177e4SLinus Torvalds /* 18731da177e4SLinus Torvalds * Determine the object being mapped and call the appropriate 18741da177e4SLinus Torvalds * specific mapper. the address has already been validated, but 18751da177e4SLinus Torvalds * not unmapped, but the maps are removed from the list. 18761da177e4SLinus Torvalds */ 1877490fc053SLinus Torvalds vma = vm_area_alloc(mm); 18781da177e4SLinus Torvalds if (!vma) { 18791da177e4SLinus Torvalds error = -ENOMEM; 18801da177e4SLinus Torvalds goto unacct_error; 18811da177e4SLinus Torvalds } 18821da177e4SLinus Torvalds 18831da177e4SLinus Torvalds vma->vm_start = addr; 18841da177e4SLinus Torvalds vma->vm_end = addr + len; 18851da177e4SLinus Torvalds vma->vm_flags = vm_flags; 18863ed75eb8SColy Li vma->vm_page_prot = vm_get_page_prot(vm_flags); 18871da177e4SLinus Torvalds vma->vm_pgoff = pgoff; 18881da177e4SLinus Torvalds 18891da177e4SLinus Torvalds if (file) { 18904bb5f5d9SDavid Herrmann if (vm_flags & VM_SHARED) { 18914bb5f5d9SDavid Herrmann error = mapping_map_writable(file->f_mapping); 18924bb5f5d9SDavid Herrmann if (error) 18938d0920bdSDavid Hildenbrand goto free_vma; 18944bb5f5d9SDavid Herrmann } 18954bb5f5d9SDavid Herrmann 1896cb0942b8SAl Viro vma->vm_file = get_file(file); 1897f74ac015SMiklos Szeredi error = call_mmap(file, vma); 18981da177e4SLinus Torvalds if (error) 18991da177e4SLinus Torvalds goto unmap_and_free_vma; 19001da177e4SLinus Torvalds 1901309d08d9SLiu Zixian /* Can addr have changed?? 1902309d08d9SLiu Zixian * 1903309d08d9SLiu Zixian * Answer: Yes, several device drivers can do it in their 1904309d08d9SLiu Zixian * f_op->mmap method. -DaveM 1905309d08d9SLiu Zixian * Bug: If addr is changed, prev, rb_link, rb_parent should 1906309d08d9SLiu Zixian * be updated for vma_link() 1907309d08d9SLiu Zixian */ 1908309d08d9SLiu Zixian WARN_ON_ONCE(addr != vma->vm_start); 1909309d08d9SLiu Zixian 1910309d08d9SLiu Zixian addr = vma->vm_start; 1911309d08d9SLiu Zixian 1912d70cec89SMiaohe Lin /* If vm_flags changed after call_mmap(), we should try merge vma again 1913d70cec89SMiaohe Lin * as we may succeed this time. 1914d70cec89SMiaohe Lin */ 1915d70cec89SMiaohe Lin if (unlikely(vm_flags != vma->vm_flags && prev)) { 1916d70cec89SMiaohe Lin merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags, 19179a10064fSColin Cross NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX, NULL); 1918d70cec89SMiaohe Lin if (merge) { 1919bc4fe4cdSMiaohe Lin /* ->mmap() can change vma->vm_file and fput the original file. So 1920bc4fe4cdSMiaohe Lin * fput the vma->vm_file here or we would add an extra fput for file 1921bc4fe4cdSMiaohe Lin * and cause general protection fault ultimately. 1922bc4fe4cdSMiaohe Lin */ 1923bc4fe4cdSMiaohe Lin fput(vma->vm_file); 1924d70cec89SMiaohe Lin vm_area_free(vma); 1925d70cec89SMiaohe Lin vma = merge; 1926309d08d9SLiu Zixian /* Update vm_flags to pick up the change. */ 1927d70cec89SMiaohe Lin vm_flags = vma->vm_flags; 1928d70cec89SMiaohe Lin goto unmap_writable; 1929d70cec89SMiaohe Lin } 1930d70cec89SMiaohe Lin } 1931d70cec89SMiaohe Lin 19321da177e4SLinus Torvalds vm_flags = vma->vm_flags; 1933f8dbf0a7SHuang Shijie } else if (vm_flags & VM_SHARED) { 1934f8dbf0a7SHuang Shijie error = shmem_zero_setup(vma); 1935f8dbf0a7SHuang Shijie if (error) 1936f8dbf0a7SHuang Shijie goto free_vma; 1937bfd40eafSKirill A. Shutemov } else { 1938bfd40eafSKirill A. Shutemov vma_set_anonymous(vma); 1939f8dbf0a7SHuang Shijie } 19401da177e4SLinus Torvalds 1941c462ac28SCatalin Marinas /* Allow architectures to sanity-check the vm_flags */ 1942c462ac28SCatalin Marinas if (!arch_validate_flags(vma->vm_flags)) { 1943c462ac28SCatalin Marinas error = -EINVAL; 1944c462ac28SCatalin Marinas if (file) 1945c462ac28SCatalin Marinas goto unmap_and_free_vma; 1946c462ac28SCatalin Marinas else 1947c462ac28SCatalin Marinas goto free_vma; 1948c462ac28SCatalin Marinas } 1949c462ac28SCatalin Marinas 1950d4af56c5SLiam R. Howlett if (vma_link(mm, vma, prev, rb_link, rb_parent)) { 1951d4af56c5SLiam R. Howlett error = -ENOMEM; 1952d4af56c5SLiam R. Howlett if (file) 1953d4af56c5SLiam R. Howlett goto unmap_and_free_vma; 1954d4af56c5SLiam R. Howlett else 1955d4af56c5SLiam R. Howlett goto free_vma; 1956d4af56c5SLiam R. Howlett } 1957613bec09SYang Shi 1958613bec09SYang Shi /* 1959613bec09SYang Shi * vma_merge() calls khugepaged_enter_vma() either, the below 1960613bec09SYang Shi * call covers the non-merge case. 1961613bec09SYang Shi */ 1962613bec09SYang Shi khugepaged_enter_vma(vma, vma->vm_flags); 1963613bec09SYang Shi 19644d3d5b41SOleg Nesterov /* Once vma denies write, undo our temporary denial count */ 1965d70cec89SMiaohe Lin unmap_writable: 19668d0920bdSDavid Hildenbrand if (file && vm_flags & VM_SHARED) 19674bb5f5d9SDavid Herrmann mapping_unmap_writable(file->f_mapping); 1968e8686772SOleg Nesterov file = vma->vm_file; 19691da177e4SLinus Torvalds out: 1970cdd6c482SIngo Molnar perf_event_mmap(vma); 19710a4a9391SPeter Zijlstra 197284638335SKonstantin Khlebnikov vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); 19731da177e4SLinus Torvalds if (vm_flags & VM_LOCKED) { 1974e1fb4a08SDave Jiang if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || 1975e1fb4a08SDave Jiang is_vm_hugetlb_page(vma) || 1976e1fb4a08SDave Jiang vma == get_gate_vma(current->mm)) 1977de60f5f1SEric B Munson vma->vm_flags &= VM_LOCKED_CLEAR_MASK; 1978e1fb4a08SDave Jiang else 1979e1fb4a08SDave Jiang mm->locked_vm += (len >> PAGE_SHIFT); 1980bebeb3d6SMichel Lespinasse } 19812b144498SSrikar Dronamraju 1982c7a3a88cSOleg Nesterov if (file) 1983c7a3a88cSOleg Nesterov uprobe_mmap(vma); 19842b144498SSrikar Dronamraju 1985d9104d1cSCyrill Gorcunov /* 1986d9104d1cSCyrill Gorcunov * New (or expanded) vma always get soft dirty status. 1987d9104d1cSCyrill Gorcunov * Otherwise user-space soft-dirty page tracker won't 1988d9104d1cSCyrill Gorcunov * be able to distinguish situation when vma area unmapped, 1989d9104d1cSCyrill Gorcunov * then new mapped in-place (which must be aimed as 1990d9104d1cSCyrill Gorcunov * a completely new data area). 1991d9104d1cSCyrill Gorcunov */ 1992d9104d1cSCyrill Gorcunov vma->vm_flags |= VM_SOFTDIRTY; 1993d9104d1cSCyrill Gorcunov 199464e45507SPeter Feiner vma_set_page_prot(vma); 199564e45507SPeter Feiner 1996d4af56c5SLiam R. Howlett validate_mm_mt(mm); 19971da177e4SLinus Torvalds return addr; 19981da177e4SLinus Torvalds 19991da177e4SLinus Torvalds unmap_and_free_vma: 20001527f926SChristian König fput(vma->vm_file); 20011da177e4SLinus Torvalds vma->vm_file = NULL; 20021da177e4SLinus Torvalds 20031da177e4SLinus Torvalds /* Undo any partial mapping done by a device driver. */ 2004e0da382cSHugh Dickins unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); 20054bb5f5d9SDavid Herrmann if (vm_flags & VM_SHARED) 20064bb5f5d9SDavid Herrmann mapping_unmap_writable(file->f_mapping); 20071da177e4SLinus Torvalds free_vma: 20083928d4f5SLinus Torvalds vm_area_free(vma); 20091da177e4SLinus Torvalds unacct_error: 20101da177e4SLinus Torvalds if (charged) 20111da177e4SLinus Torvalds vm_unacct_memory(charged); 2012d4af56c5SLiam R. Howlett validate_mm_mt(mm); 20131da177e4SLinus Torvalds return error; 20141da177e4SLinus Torvalds } 20151da177e4SLinus Torvalds 2016baceaf1cSJaewon Kim static unsigned long unmapped_area(struct vm_unmapped_area_info *info) 2017db4fbfb9SMichel Lespinasse { 2018db4fbfb9SMichel Lespinasse /* 2019db4fbfb9SMichel Lespinasse * We implement the search by looking for an rbtree node that 2020db4fbfb9SMichel Lespinasse * immediately follows a suitable gap. That is, 2021db4fbfb9SMichel Lespinasse * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length; 2022db4fbfb9SMichel Lespinasse * - gap_end = vma->vm_start >= info->low_limit + length; 2023db4fbfb9SMichel Lespinasse * - gap_end - gap_start >= length 2024db4fbfb9SMichel Lespinasse */ 2025db4fbfb9SMichel Lespinasse 2026db4fbfb9SMichel Lespinasse struct mm_struct *mm = current->mm; 2027db4fbfb9SMichel Lespinasse struct vm_area_struct *vma; 2028db4fbfb9SMichel Lespinasse unsigned long length, low_limit, high_limit, gap_start, gap_end; 2029d4af56c5SLiam R. Howlett unsigned long gap; 2030d4af56c5SLiam R. Howlett MA_STATE(mas, &mm->mm_mt, 0, 0); 2031db4fbfb9SMichel Lespinasse 2032db4fbfb9SMichel Lespinasse /* Adjust search length to account for worst case alignment overhead */ 2033db4fbfb9SMichel Lespinasse length = info->length + info->align_mask; 2034db4fbfb9SMichel Lespinasse if (length < info->length) 2035db4fbfb9SMichel Lespinasse return -ENOMEM; 2036db4fbfb9SMichel Lespinasse 2037d4af56c5SLiam R. Howlett mas_empty_area(&mas, info->low_limit, info->high_limit - 1, 2038d4af56c5SLiam R. Howlett length); 2039d4af56c5SLiam R. Howlett gap = mas.index; 2040d4af56c5SLiam R. Howlett gap += (info->align_offset - gap) & info->align_mask; 2041d4af56c5SLiam R. Howlett 2042db4fbfb9SMichel Lespinasse /* Adjust search limits by the desired length */ 2043db4fbfb9SMichel Lespinasse if (info->high_limit < length) 2044db4fbfb9SMichel Lespinasse return -ENOMEM; 2045db4fbfb9SMichel Lespinasse high_limit = info->high_limit - length; 2046db4fbfb9SMichel Lespinasse 2047db4fbfb9SMichel Lespinasse if (info->low_limit > high_limit) 2048db4fbfb9SMichel Lespinasse return -ENOMEM; 2049db4fbfb9SMichel Lespinasse low_limit = info->low_limit + length; 2050db4fbfb9SMichel Lespinasse 2051db4fbfb9SMichel Lespinasse /* Check if rbtree root looks promising */ 2052db4fbfb9SMichel Lespinasse if (RB_EMPTY_ROOT(&mm->mm_rb)) 2053db4fbfb9SMichel Lespinasse goto check_highest; 2054db4fbfb9SMichel Lespinasse vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); 2055db4fbfb9SMichel Lespinasse if (vma->rb_subtree_gap < length) 2056db4fbfb9SMichel Lespinasse goto check_highest; 2057db4fbfb9SMichel Lespinasse 2058db4fbfb9SMichel Lespinasse while (true) { 2059db4fbfb9SMichel Lespinasse /* Visit left subtree if it looks promising */ 20601be7107fSHugh Dickins gap_end = vm_start_gap(vma); 2061db4fbfb9SMichel Lespinasse if (gap_end >= low_limit && vma->vm_rb.rb_left) { 2062db4fbfb9SMichel Lespinasse struct vm_area_struct *left = 2063db4fbfb9SMichel Lespinasse rb_entry(vma->vm_rb.rb_left, 2064db4fbfb9SMichel Lespinasse struct vm_area_struct, vm_rb); 2065db4fbfb9SMichel Lespinasse if (left->rb_subtree_gap >= length) { 2066db4fbfb9SMichel Lespinasse vma = left; 2067db4fbfb9SMichel Lespinasse continue; 2068db4fbfb9SMichel Lespinasse } 2069db4fbfb9SMichel Lespinasse } 2070db4fbfb9SMichel Lespinasse 20711be7107fSHugh Dickins gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; 2072db4fbfb9SMichel Lespinasse check_current: 2073db4fbfb9SMichel Lespinasse /* Check if current node has a suitable gap */ 2074db4fbfb9SMichel Lespinasse if (gap_start > high_limit) 2075db4fbfb9SMichel Lespinasse return -ENOMEM; 2076f4cb767dSHugh Dickins if (gap_end >= low_limit && 2077f4cb767dSHugh Dickins gap_end > gap_start && gap_end - gap_start >= length) 2078db4fbfb9SMichel Lespinasse goto found; 2079db4fbfb9SMichel Lespinasse 2080db4fbfb9SMichel Lespinasse /* Visit right subtree if it looks promising */ 2081db4fbfb9SMichel Lespinasse if (vma->vm_rb.rb_right) { 2082db4fbfb9SMichel Lespinasse struct vm_area_struct *right = 2083db4fbfb9SMichel Lespinasse rb_entry(vma->vm_rb.rb_right, 2084db4fbfb9SMichel Lespinasse struct vm_area_struct, vm_rb); 2085db4fbfb9SMichel Lespinasse if (right->rb_subtree_gap >= length) { 2086db4fbfb9SMichel Lespinasse vma = right; 2087db4fbfb9SMichel Lespinasse continue; 2088db4fbfb9SMichel Lespinasse } 2089db4fbfb9SMichel Lespinasse } 2090db4fbfb9SMichel Lespinasse 2091db4fbfb9SMichel Lespinasse /* Go back up the rbtree to find next candidate node */ 2092db4fbfb9SMichel Lespinasse while (true) { 2093db4fbfb9SMichel Lespinasse struct rb_node *prev = &vma->vm_rb; 2094db4fbfb9SMichel Lespinasse if (!rb_parent(prev)) 2095db4fbfb9SMichel Lespinasse goto check_highest; 2096db4fbfb9SMichel Lespinasse vma = rb_entry(rb_parent(prev), 2097db4fbfb9SMichel Lespinasse struct vm_area_struct, vm_rb); 2098db4fbfb9SMichel Lespinasse if (prev == vma->vm_rb.rb_left) { 20991be7107fSHugh Dickins gap_start = vm_end_gap(vma->vm_prev); 21001be7107fSHugh Dickins gap_end = vm_start_gap(vma); 2101db4fbfb9SMichel Lespinasse goto check_current; 2102db4fbfb9SMichel Lespinasse } 2103db4fbfb9SMichel Lespinasse } 2104db4fbfb9SMichel Lespinasse } 2105db4fbfb9SMichel Lespinasse 2106db4fbfb9SMichel Lespinasse check_highest: 2107db4fbfb9SMichel Lespinasse /* Check highest gap, which does not precede any rbtree node */ 2108db4fbfb9SMichel Lespinasse gap_start = mm->highest_vm_end; 2109db4fbfb9SMichel Lespinasse gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */ 2110db4fbfb9SMichel Lespinasse if (gap_start > high_limit) 2111db4fbfb9SMichel Lespinasse return -ENOMEM; 2112db4fbfb9SMichel Lespinasse 2113db4fbfb9SMichel Lespinasse found: 2114db4fbfb9SMichel Lespinasse /* We found a suitable gap. Clip it with the original low_limit. */ 2115db4fbfb9SMichel Lespinasse if (gap_start < info->low_limit) 2116db4fbfb9SMichel Lespinasse gap_start = info->low_limit; 2117db4fbfb9SMichel Lespinasse 2118db4fbfb9SMichel Lespinasse /* Adjust gap address to the desired alignment */ 2119db4fbfb9SMichel Lespinasse gap_start += (info->align_offset - gap_start) & info->align_mask; 2120db4fbfb9SMichel Lespinasse 2121db4fbfb9SMichel Lespinasse VM_BUG_ON(gap_start + info->length > info->high_limit); 2122db4fbfb9SMichel Lespinasse VM_BUG_ON(gap_start + info->length > gap_end); 2123d4af56c5SLiam R. Howlett 2124d4af56c5SLiam R. Howlett VM_BUG_ON(gap != gap_start); 2125db4fbfb9SMichel Lespinasse return gap_start; 2126db4fbfb9SMichel Lespinasse } 2127db4fbfb9SMichel Lespinasse 2128baceaf1cSJaewon Kim static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) 2129db4fbfb9SMichel Lespinasse { 2130db4fbfb9SMichel Lespinasse struct mm_struct *mm = current->mm; 2131d4af56c5SLiam R. Howlett struct vm_area_struct *vma = NULL; 2132db4fbfb9SMichel Lespinasse unsigned long length, low_limit, high_limit, gap_start, gap_end; 2133d4af56c5SLiam R. Howlett unsigned long gap; 2134d4af56c5SLiam R. Howlett 2135d4af56c5SLiam R. Howlett MA_STATE(mas, &mm->mm_mt, 0, 0); 2136d4af56c5SLiam R. Howlett validate_mm_mt(mm); 2137db4fbfb9SMichel Lespinasse 2138db4fbfb9SMichel Lespinasse /* Adjust search length to account for worst case alignment overhead */ 2139db4fbfb9SMichel Lespinasse length = info->length + info->align_mask; 2140db4fbfb9SMichel Lespinasse if (length < info->length) 2141db4fbfb9SMichel Lespinasse return -ENOMEM; 2142db4fbfb9SMichel Lespinasse 2143d4af56c5SLiam R. Howlett mas_empty_area_rev(&mas, info->low_limit, info->high_limit - 1, 2144d4af56c5SLiam R. Howlett length); 2145d4af56c5SLiam R. Howlett gap = mas.last + 1 - info->length; 2146d4af56c5SLiam R. Howlett gap -= (gap - info->align_offset) & info->align_mask; 2147d4af56c5SLiam R. Howlett 2148db4fbfb9SMichel Lespinasse /* 2149db4fbfb9SMichel Lespinasse * Adjust search limits by the desired length. 2150db4fbfb9SMichel Lespinasse * See implementation comment at top of unmapped_area(). 2151db4fbfb9SMichel Lespinasse */ 2152db4fbfb9SMichel Lespinasse gap_end = info->high_limit; 2153db4fbfb9SMichel Lespinasse if (gap_end < length) 2154db4fbfb9SMichel Lespinasse return -ENOMEM; 2155db4fbfb9SMichel Lespinasse high_limit = gap_end - length; 2156db4fbfb9SMichel Lespinasse 2157db4fbfb9SMichel Lespinasse if (info->low_limit > high_limit) 2158db4fbfb9SMichel Lespinasse return -ENOMEM; 2159db4fbfb9SMichel Lespinasse low_limit = info->low_limit + length; 2160db4fbfb9SMichel Lespinasse 2161db4fbfb9SMichel Lespinasse /* Check highest gap, which does not precede any rbtree node */ 2162db4fbfb9SMichel Lespinasse gap_start = mm->highest_vm_end; 2163db4fbfb9SMichel Lespinasse if (gap_start <= high_limit) 2164db4fbfb9SMichel Lespinasse goto found_highest; 2165db4fbfb9SMichel Lespinasse 2166db4fbfb9SMichel Lespinasse /* Check if rbtree root looks promising */ 2167db4fbfb9SMichel Lespinasse if (RB_EMPTY_ROOT(&mm->mm_rb)) 2168db4fbfb9SMichel Lespinasse return -ENOMEM; 2169db4fbfb9SMichel Lespinasse vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); 2170db4fbfb9SMichel Lespinasse if (vma->rb_subtree_gap < length) 2171db4fbfb9SMichel Lespinasse return -ENOMEM; 2172db4fbfb9SMichel Lespinasse 2173db4fbfb9SMichel Lespinasse while (true) { 2174db4fbfb9SMichel Lespinasse /* Visit right subtree if it looks promising */ 21751be7107fSHugh Dickins gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; 2176db4fbfb9SMichel Lespinasse if (gap_start <= high_limit && vma->vm_rb.rb_right) { 2177db4fbfb9SMichel Lespinasse struct vm_area_struct *right = 2178db4fbfb9SMichel Lespinasse rb_entry(vma->vm_rb.rb_right, 2179db4fbfb9SMichel Lespinasse struct vm_area_struct, vm_rb); 2180db4fbfb9SMichel Lespinasse if (right->rb_subtree_gap >= length) { 2181db4fbfb9SMichel Lespinasse vma = right; 2182db4fbfb9SMichel Lespinasse continue; 2183db4fbfb9SMichel Lespinasse } 2184db4fbfb9SMichel Lespinasse } 2185db4fbfb9SMichel Lespinasse 2186db4fbfb9SMichel Lespinasse check_current: 2187db4fbfb9SMichel Lespinasse /* Check if current node has a suitable gap */ 21881be7107fSHugh Dickins gap_end = vm_start_gap(vma); 2189db4fbfb9SMichel Lespinasse if (gap_end < low_limit) 2190db4fbfb9SMichel Lespinasse return -ENOMEM; 2191f4cb767dSHugh Dickins if (gap_start <= high_limit && 2192f4cb767dSHugh Dickins gap_end > gap_start && gap_end - gap_start >= length) 2193db4fbfb9SMichel Lespinasse goto found; 2194db4fbfb9SMichel Lespinasse 2195db4fbfb9SMichel Lespinasse /* Visit left subtree if it looks promising */ 2196db4fbfb9SMichel Lespinasse if (vma->vm_rb.rb_left) { 2197db4fbfb9SMichel Lespinasse struct vm_area_struct *left = 2198db4fbfb9SMichel Lespinasse rb_entry(vma->vm_rb.rb_left, 2199db4fbfb9SMichel Lespinasse struct vm_area_struct, vm_rb); 2200db4fbfb9SMichel Lespinasse if (left->rb_subtree_gap >= length) { 2201db4fbfb9SMichel Lespinasse vma = left; 2202db4fbfb9SMichel Lespinasse continue; 2203db4fbfb9SMichel Lespinasse } 2204db4fbfb9SMichel Lespinasse } 2205db4fbfb9SMichel Lespinasse 2206db4fbfb9SMichel Lespinasse /* Go back up the rbtree to find next candidate node */ 2207db4fbfb9SMichel Lespinasse while (true) { 2208db4fbfb9SMichel Lespinasse struct rb_node *prev = &vma->vm_rb; 2209db4fbfb9SMichel Lespinasse if (!rb_parent(prev)) 2210db4fbfb9SMichel Lespinasse return -ENOMEM; 2211db4fbfb9SMichel Lespinasse vma = rb_entry(rb_parent(prev), 2212db4fbfb9SMichel Lespinasse struct vm_area_struct, vm_rb); 2213db4fbfb9SMichel Lespinasse if (prev == vma->vm_rb.rb_right) { 2214db4fbfb9SMichel Lespinasse gap_start = vma->vm_prev ? 22151be7107fSHugh Dickins vm_end_gap(vma->vm_prev) : 0; 2216db4fbfb9SMichel Lespinasse goto check_current; 2217db4fbfb9SMichel Lespinasse } 2218db4fbfb9SMichel Lespinasse } 2219db4fbfb9SMichel Lespinasse } 2220db4fbfb9SMichel Lespinasse 2221db4fbfb9SMichel Lespinasse found: 2222db4fbfb9SMichel Lespinasse /* We found a suitable gap. Clip it with the original high_limit. */ 2223db4fbfb9SMichel Lespinasse if (gap_end > info->high_limit) 2224db4fbfb9SMichel Lespinasse gap_end = info->high_limit; 2225db4fbfb9SMichel Lespinasse 2226db4fbfb9SMichel Lespinasse found_highest: 2227db4fbfb9SMichel Lespinasse /* Compute highest gap address at the desired alignment */ 2228db4fbfb9SMichel Lespinasse gap_end -= info->length; 2229db4fbfb9SMichel Lespinasse gap_end -= (gap_end - info->align_offset) & info->align_mask; 2230db4fbfb9SMichel Lespinasse 2231db4fbfb9SMichel Lespinasse VM_BUG_ON(gap_end < info->low_limit); 2232db4fbfb9SMichel Lespinasse VM_BUG_ON(gap_end < gap_start); 2233d4af56c5SLiam R. Howlett 2234d4af56c5SLiam R. Howlett if (gap != gap_end) { 2235d4af56c5SLiam R. Howlett pr_err("%s: %p Gap was found: mt %lu gap_end %lu\n", __func__, 2236d4af56c5SLiam R. Howlett mm, gap, gap_end); 2237d4af56c5SLiam R. Howlett pr_err("window was %lu - %lu size %lu\n", info->high_limit, 2238d4af56c5SLiam R. Howlett info->low_limit, length); 2239d4af56c5SLiam R. Howlett pr_err("mas.min %lu max %lu mas.last %lu\n", mas.min, mas.max, 2240d4af56c5SLiam R. Howlett mas.last); 2241d4af56c5SLiam R. Howlett pr_err("mas.index %lu align mask %lu offset %lu\n", mas.index, 2242d4af56c5SLiam R. Howlett info->align_mask, info->align_offset); 2243d4af56c5SLiam R. Howlett pr_err("rb_find_vma find on %lu => %p (%p)\n", mas.index, 2244d4af56c5SLiam R. Howlett find_vma(mm, mas.index), vma); 2245d4af56c5SLiam R. Howlett #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 2246d4af56c5SLiam R. Howlett mt_dump(&mm->mm_mt); 2247d4af56c5SLiam R. Howlett #endif 2248d4af56c5SLiam R. Howlett { 2249d4af56c5SLiam R. Howlett struct vm_area_struct *dv = mm->mmap; 2250d4af56c5SLiam R. Howlett 2251d4af56c5SLiam R. Howlett while (dv) { 2252d4af56c5SLiam R. Howlett pr_err("vma %p %lu-%lu\n", dv, dv->vm_start, dv->vm_end); 2253d4af56c5SLiam R. Howlett dv = dv->vm_next; 2254d4af56c5SLiam R. Howlett } 2255d4af56c5SLiam R. Howlett } 2256d4af56c5SLiam R. Howlett VM_BUG_ON(gap != gap_end); 2257d4af56c5SLiam R. Howlett } 2258d4af56c5SLiam R. Howlett 2259db4fbfb9SMichel Lespinasse return gap_end; 2260db4fbfb9SMichel Lespinasse } 2261db4fbfb9SMichel Lespinasse 2262baceaf1cSJaewon Kim /* 2263baceaf1cSJaewon Kim * Search for an unmapped address range. 2264baceaf1cSJaewon Kim * 2265baceaf1cSJaewon Kim * We are looking for a range that: 2266baceaf1cSJaewon Kim * - does not intersect with any VMA; 2267baceaf1cSJaewon Kim * - is contained within the [low_limit, high_limit) interval; 2268baceaf1cSJaewon Kim * - is at least the desired size. 2269baceaf1cSJaewon Kim * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) 2270baceaf1cSJaewon Kim */ 2271baceaf1cSJaewon Kim unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) 2272baceaf1cSJaewon Kim { 2273df529cabSJaewon Kim unsigned long addr; 2274df529cabSJaewon Kim 2275baceaf1cSJaewon Kim if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) 2276df529cabSJaewon Kim addr = unmapped_area_topdown(info); 2277baceaf1cSJaewon Kim else 2278df529cabSJaewon Kim addr = unmapped_area(info); 2279df529cabSJaewon Kim 2280df529cabSJaewon Kim trace_vm_unmapped_area(addr, info); 2281df529cabSJaewon Kim return addr; 2282baceaf1cSJaewon Kim } 2283f6795053SSteve Capper 22841da177e4SLinus Torvalds /* Get an address range which is currently unmapped. 22851da177e4SLinus Torvalds * For shmat() with addr=0. 22861da177e4SLinus Torvalds * 22871da177e4SLinus Torvalds * Ugly calling convention alert: 22881da177e4SLinus Torvalds * Return value with the low bits set means error value, 22891da177e4SLinus Torvalds * ie 22901da177e4SLinus Torvalds * if (ret & ~PAGE_MASK) 22911da177e4SLinus Torvalds * error = ret; 22921da177e4SLinus Torvalds * 22931da177e4SLinus Torvalds * This function "knows" that -ENOMEM has the bits set. 22941da177e4SLinus Torvalds */ 22951da177e4SLinus Torvalds unsigned long 22964b439e25SChristophe Leroy generic_get_unmapped_area(struct file *filp, unsigned long addr, 22974b439e25SChristophe Leroy unsigned long len, unsigned long pgoff, 22984b439e25SChristophe Leroy unsigned long flags) 22991da177e4SLinus Torvalds { 23001da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 23011be7107fSHugh Dickins struct vm_area_struct *vma, *prev; 2302db4fbfb9SMichel Lespinasse struct vm_unmapped_area_info info; 23032cb4de08SChristophe Leroy const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 23041da177e4SLinus Torvalds 2305f6795053SSteve Capper if (len > mmap_end - mmap_min_addr) 23061da177e4SLinus Torvalds return -ENOMEM; 23071da177e4SLinus Torvalds 230806abdfb4SBenjamin Herrenschmidt if (flags & MAP_FIXED) 230906abdfb4SBenjamin Herrenschmidt return addr; 231006abdfb4SBenjamin Herrenschmidt 23111da177e4SLinus Torvalds if (addr) { 23121da177e4SLinus Torvalds addr = PAGE_ALIGN(addr); 23131be7107fSHugh Dickins vma = find_vma_prev(mm, addr, &prev); 2314f6795053SSteve Capper if (mmap_end - len >= addr && addr >= mmap_min_addr && 23151be7107fSHugh Dickins (!vma || addr + len <= vm_start_gap(vma)) && 23161be7107fSHugh Dickins (!prev || addr >= vm_end_gap(prev))) 23171da177e4SLinus Torvalds return addr; 23181da177e4SLinus Torvalds } 23191da177e4SLinus Torvalds 2320db4fbfb9SMichel Lespinasse info.flags = 0; 2321db4fbfb9SMichel Lespinasse info.length = len; 23224e99b021SHeiko Carstens info.low_limit = mm->mmap_base; 2323f6795053SSteve Capper info.high_limit = mmap_end; 2324db4fbfb9SMichel Lespinasse info.align_mask = 0; 232509ef5283SJaewon Kim info.align_offset = 0; 2326db4fbfb9SMichel Lespinasse return vm_unmapped_area(&info); 23271da177e4SLinus Torvalds } 23284b439e25SChristophe Leroy 23294b439e25SChristophe Leroy #ifndef HAVE_ARCH_UNMAPPED_AREA 23304b439e25SChristophe Leroy unsigned long 23314b439e25SChristophe Leroy arch_get_unmapped_area(struct file *filp, unsigned long addr, 23324b439e25SChristophe Leroy unsigned long len, unsigned long pgoff, 23334b439e25SChristophe Leroy unsigned long flags) 23344b439e25SChristophe Leroy { 23354b439e25SChristophe Leroy return generic_get_unmapped_area(filp, addr, len, pgoff, flags); 23364b439e25SChristophe Leroy } 23371da177e4SLinus Torvalds #endif 23381da177e4SLinus Torvalds 23391da177e4SLinus Torvalds /* 23401da177e4SLinus Torvalds * This mmap-allocator allocates new areas top-down from below the 23411da177e4SLinus Torvalds * stack's low limit (the base): 23421da177e4SLinus Torvalds */ 23431da177e4SLinus Torvalds unsigned long 23444b439e25SChristophe Leroy generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 234543cca0b1SYang Fan unsigned long len, unsigned long pgoff, 234643cca0b1SYang Fan unsigned long flags) 23471da177e4SLinus Torvalds { 23481be7107fSHugh Dickins struct vm_area_struct *vma, *prev; 23491da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 2350db4fbfb9SMichel Lespinasse struct vm_unmapped_area_info info; 23512cb4de08SChristophe Leroy const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 23521da177e4SLinus Torvalds 23531da177e4SLinus Torvalds /* requested length too big for entire address space */ 2354f6795053SSteve Capper if (len > mmap_end - mmap_min_addr) 23551da177e4SLinus Torvalds return -ENOMEM; 23561da177e4SLinus Torvalds 235706abdfb4SBenjamin Herrenschmidt if (flags & MAP_FIXED) 235806abdfb4SBenjamin Herrenschmidt return addr; 235906abdfb4SBenjamin Herrenschmidt 23601da177e4SLinus Torvalds /* requesting a specific address */ 23611da177e4SLinus Torvalds if (addr) { 23621da177e4SLinus Torvalds addr = PAGE_ALIGN(addr); 23631be7107fSHugh Dickins vma = find_vma_prev(mm, addr, &prev); 2364f6795053SSteve Capper if (mmap_end - len >= addr && addr >= mmap_min_addr && 23651be7107fSHugh Dickins (!vma || addr + len <= vm_start_gap(vma)) && 23661be7107fSHugh Dickins (!prev || addr >= vm_end_gap(prev))) 23671da177e4SLinus Torvalds return addr; 23681da177e4SLinus Torvalds } 23691da177e4SLinus Torvalds 2370db4fbfb9SMichel Lespinasse info.flags = VM_UNMAPPED_AREA_TOPDOWN; 2371db4fbfb9SMichel Lespinasse info.length = len; 23722afc745fSAkira Takeuchi info.low_limit = max(PAGE_SIZE, mmap_min_addr); 2373f6795053SSteve Capper info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); 2374db4fbfb9SMichel Lespinasse info.align_mask = 0; 237509ef5283SJaewon Kim info.align_offset = 0; 2376db4fbfb9SMichel Lespinasse addr = vm_unmapped_area(&info); 2377b716ad95SXiao Guangrong 23781da177e4SLinus Torvalds /* 23791da177e4SLinus Torvalds * A failed mmap() very likely causes application failure, 23801da177e4SLinus Torvalds * so fall back to the bottom-up function here. This scenario 23811da177e4SLinus Torvalds * can happen with large stack limits and large mmap() 23821da177e4SLinus Torvalds * allocations. 23831da177e4SLinus Torvalds */ 2384de1741a1SAlexander Kuleshov if (offset_in_page(addr)) { 2385db4fbfb9SMichel Lespinasse VM_BUG_ON(addr != -ENOMEM); 2386db4fbfb9SMichel Lespinasse info.flags = 0; 2387db4fbfb9SMichel Lespinasse info.low_limit = TASK_UNMAPPED_BASE; 2388f6795053SSteve Capper info.high_limit = mmap_end; 2389db4fbfb9SMichel Lespinasse addr = vm_unmapped_area(&info); 2390db4fbfb9SMichel Lespinasse } 23911da177e4SLinus Torvalds 23921da177e4SLinus Torvalds return addr; 23931da177e4SLinus Torvalds } 23944b439e25SChristophe Leroy 23954b439e25SChristophe Leroy #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 23964b439e25SChristophe Leroy unsigned long 23974b439e25SChristophe Leroy arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 23984b439e25SChristophe Leroy unsigned long len, unsigned long pgoff, 23994b439e25SChristophe Leroy unsigned long flags) 24004b439e25SChristophe Leroy { 24014b439e25SChristophe Leroy return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 24024b439e25SChristophe Leroy } 24031da177e4SLinus Torvalds #endif 24041da177e4SLinus Torvalds 24051da177e4SLinus Torvalds unsigned long 24061da177e4SLinus Torvalds get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 24071da177e4SLinus Torvalds unsigned long pgoff, unsigned long flags) 24081da177e4SLinus Torvalds { 240906abdfb4SBenjamin Herrenschmidt unsigned long (*get_area)(struct file *, unsigned long, 241006abdfb4SBenjamin Herrenschmidt unsigned long, unsigned long, unsigned long); 241107ab67c8SLinus Torvalds 24129206de95SAl Viro unsigned long error = arch_mmap_check(addr, len, flags); 24139206de95SAl Viro if (error) 24149206de95SAl Viro return error; 24159206de95SAl Viro 24169206de95SAl Viro /* Careful about overflows.. */ 24179206de95SAl Viro if (len > TASK_SIZE) 24189206de95SAl Viro return -ENOMEM; 24199206de95SAl Viro 242007ab67c8SLinus Torvalds get_area = current->mm->get_unmapped_area; 2421c01d5b30SHugh Dickins if (file) { 2422c01d5b30SHugh Dickins if (file->f_op->get_unmapped_area) 242307ab67c8SLinus Torvalds get_area = file->f_op->get_unmapped_area; 2424c01d5b30SHugh Dickins } else if (flags & MAP_SHARED) { 2425c01d5b30SHugh Dickins /* 2426c01d5b30SHugh Dickins * mmap_region() will call shmem_zero_setup() to create a file, 2427c01d5b30SHugh Dickins * so use shmem's get_unmapped_area in case it can be huge. 242845e55300SPeter Collingbourne * do_mmap() will clear pgoff, so match alignment. 2429c01d5b30SHugh Dickins */ 2430c01d5b30SHugh Dickins pgoff = 0; 2431c01d5b30SHugh Dickins get_area = shmem_get_unmapped_area; 2432f35b5d7dSRik van Riel } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 2433f35b5d7dSRik van Riel /* Ensures that larger anonymous mappings are THP aligned. */ 2434f35b5d7dSRik van Riel get_area = thp_get_unmapped_area; 2435c01d5b30SHugh Dickins } 2436c01d5b30SHugh Dickins 243707ab67c8SLinus Torvalds addr = get_area(file, addr, len, pgoff, flags); 243807ab67c8SLinus Torvalds if (IS_ERR_VALUE(addr)) 243907ab67c8SLinus Torvalds return addr; 244007ab67c8SLinus Torvalds 24411da177e4SLinus Torvalds if (addr > TASK_SIZE - len) 24421da177e4SLinus Torvalds return -ENOMEM; 2443de1741a1SAlexander Kuleshov if (offset_in_page(addr)) 24441da177e4SLinus Torvalds return -EINVAL; 244506abdfb4SBenjamin Herrenschmidt 24469ac4ed4bSAl Viro error = security_mmap_addr(addr); 24479ac4ed4bSAl Viro return error ? error : addr; 24481da177e4SLinus Torvalds } 24491da177e4SLinus Torvalds 24501da177e4SLinus Torvalds EXPORT_SYMBOL(get_unmapped_area); 24511da177e4SLinus Torvalds 24521da177e4SLinus Torvalds /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 24531da177e4SLinus Torvalds struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 24541da177e4SLinus Torvalds { 2455615d6e87SDavidlohr Bueso struct rb_node *rb_node; 2456615d6e87SDavidlohr Bueso struct vm_area_struct *vma; 24571da177e4SLinus Torvalds 24585b78ed24SLuigi Rizzo mmap_assert_locked(mm); 24591da177e4SLinus Torvalds /* Check the cache first. */ 2460615d6e87SDavidlohr Bueso vma = vmacache_find(mm, addr); 2461615d6e87SDavidlohr Bueso if (likely(vma)) 2462615d6e87SDavidlohr Bueso return vma; 24631da177e4SLinus Torvalds 24641da177e4SLinus Torvalds rb_node = mm->mm_rb.rb_node; 24651da177e4SLinus Torvalds 24661da177e4SLinus Torvalds while (rb_node) { 2467615d6e87SDavidlohr Bueso struct vm_area_struct *tmp; 24681da177e4SLinus Torvalds 2469615d6e87SDavidlohr Bueso tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); 24701da177e4SLinus Torvalds 2471615d6e87SDavidlohr Bueso if (tmp->vm_end > addr) { 2472615d6e87SDavidlohr Bueso vma = tmp; 2473615d6e87SDavidlohr Bueso if (tmp->vm_start <= addr) 24741da177e4SLinus Torvalds break; 24751da177e4SLinus Torvalds rb_node = rb_node->rb_left; 24761da177e4SLinus Torvalds } else 24771da177e4SLinus Torvalds rb_node = rb_node->rb_right; 24781da177e4SLinus Torvalds } 2479615d6e87SDavidlohr Bueso 24801da177e4SLinus Torvalds if (vma) 2481615d6e87SDavidlohr Bueso vmacache_update(addr, vma); 24821da177e4SLinus Torvalds return vma; 24831da177e4SLinus Torvalds } 24841da177e4SLinus Torvalds EXPORT_SYMBOL(find_vma); 24851da177e4SLinus Torvalds 24866bd4837dSKOSAKI Motohiro /* 24876bd4837dSKOSAKI Motohiro * Same as find_vma, but also return a pointer to the previous VMA in *pprev. 24886bd4837dSKOSAKI Motohiro */ 24891da177e4SLinus Torvalds struct vm_area_struct * 24901da177e4SLinus Torvalds find_vma_prev(struct mm_struct *mm, unsigned long addr, 24911da177e4SLinus Torvalds struct vm_area_struct **pprev) 24921da177e4SLinus Torvalds { 24936bd4837dSKOSAKI Motohiro struct vm_area_struct *vma; 24941da177e4SLinus Torvalds 24956bd4837dSKOSAKI Motohiro vma = find_vma(mm, addr); 249683cd904dSMikulas Patocka if (vma) { 249783cd904dSMikulas Patocka *pprev = vma->vm_prev; 249883cd904dSMikulas Patocka } else { 249973848a97SWei Yang struct rb_node *rb_node = rb_last(&mm->mm_rb); 250073848a97SWei Yang 250173848a97SWei Yang *pprev = rb_node ? rb_entry(rb_node, struct vm_area_struct, vm_rb) : NULL; 250283cd904dSMikulas Patocka } 25036bd4837dSKOSAKI Motohiro return vma; 25041da177e4SLinus Torvalds } 25051da177e4SLinus Torvalds 25061da177e4SLinus Torvalds /* 25071da177e4SLinus Torvalds * Verify that the stack growth is acceptable and 25081da177e4SLinus Torvalds * update accounting. This is shared with both the 25091da177e4SLinus Torvalds * grow-up and grow-down cases. 25101da177e4SLinus Torvalds */ 25111be7107fSHugh Dickins static int acct_stack_growth(struct vm_area_struct *vma, 25121be7107fSHugh Dickins unsigned long size, unsigned long grow) 25131da177e4SLinus Torvalds { 25141da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 25151be7107fSHugh Dickins unsigned long new_start; 25161da177e4SLinus Torvalds 25171da177e4SLinus Torvalds /* address space limit tests */ 251884638335SKonstantin Khlebnikov if (!may_expand_vm(mm, vma->vm_flags, grow)) 25191da177e4SLinus Torvalds return -ENOMEM; 25201da177e4SLinus Torvalds 25211da177e4SLinus Torvalds /* Stack limit test */ 252224c79d8eSKrzysztof Opasiak if (size > rlimit(RLIMIT_STACK)) 25231da177e4SLinus Torvalds return -ENOMEM; 25241da177e4SLinus Torvalds 25251da177e4SLinus Torvalds /* mlock limit tests */ 2526c5d8a364SMiaohe Lin if (mlock_future_check(mm, vma->vm_flags, grow << PAGE_SHIFT)) 25271da177e4SLinus Torvalds return -ENOMEM; 25281da177e4SLinus Torvalds 25290d59a01bSAdam Litke /* Check to ensure the stack will not grow into a hugetlb-only region */ 25300d59a01bSAdam Litke new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : 25310d59a01bSAdam Litke vma->vm_end - size; 25320d59a01bSAdam Litke if (is_hugepage_only_range(vma->vm_mm, new_start, size)) 25330d59a01bSAdam Litke return -EFAULT; 25340d59a01bSAdam Litke 25351da177e4SLinus Torvalds /* 25361da177e4SLinus Torvalds * Overcommit.. This must be the final test, as it will 25371da177e4SLinus Torvalds * update security statistics. 25381da177e4SLinus Torvalds */ 253905fa199dSHugh Dickins if (security_vm_enough_memory_mm(mm, grow)) 25401da177e4SLinus Torvalds return -ENOMEM; 25411da177e4SLinus Torvalds 25421da177e4SLinus Torvalds return 0; 25431da177e4SLinus Torvalds } 25441da177e4SLinus Torvalds 254546dea3d0SHugh Dickins #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) 25461da177e4SLinus Torvalds /* 254746dea3d0SHugh Dickins * PA-RISC uses this for its stack; IA64 for its Register Backing Store. 254846dea3d0SHugh Dickins * vma is the last one with address > vma->vm_end. Have to extend vma. 25491da177e4SLinus Torvalds */ 255046dea3d0SHugh Dickins int expand_upwards(struct vm_area_struct *vma, unsigned long address) 25511da177e4SLinus Torvalds { 255209357814SOleg Nesterov struct mm_struct *mm = vma->vm_mm; 25531be7107fSHugh Dickins struct vm_area_struct *next; 25541be7107fSHugh Dickins unsigned long gap_addr; 255512352d3cSKonstantin Khlebnikov int error = 0; 2556d4af56c5SLiam R. Howlett MA_STATE(mas, &mm->mm_mt, 0, 0); 25571da177e4SLinus Torvalds 2558d4af56c5SLiam R. Howlett validate_mm_mt(mm); 25591da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSUP)) 25601da177e4SLinus Torvalds return -EFAULT; 25611da177e4SLinus Torvalds 2562bd726c90SHelge Deller /* Guard against exceeding limits of the address space. */ 25631be7107fSHugh Dickins address &= PAGE_MASK; 256437511fb5SHelge Deller if (address >= (TASK_SIZE & PAGE_MASK)) 256512352d3cSKonstantin Khlebnikov return -ENOMEM; 2566bd726c90SHelge Deller address += PAGE_SIZE; 256712352d3cSKonstantin Khlebnikov 25681be7107fSHugh Dickins /* Enforce stack_guard_gap */ 25691be7107fSHugh Dickins gap_addr = address + stack_guard_gap; 2570bd726c90SHelge Deller 2571bd726c90SHelge Deller /* Guard against overflow */ 2572bd726c90SHelge Deller if (gap_addr < address || gap_addr > TASK_SIZE) 2573bd726c90SHelge Deller gap_addr = TASK_SIZE; 2574bd726c90SHelge Deller 25751be7107fSHugh Dickins next = vma->vm_next; 25763122e80eSAnshuman Khandual if (next && next->vm_start < gap_addr && vma_is_accessible(next)) { 25771be7107fSHugh Dickins if (!(next->vm_flags & VM_GROWSUP)) 25781be7107fSHugh Dickins return -ENOMEM; 25791be7107fSHugh Dickins /* Check that both stack segments have the same anon_vma? */ 25801be7107fSHugh Dickins } 25811be7107fSHugh Dickins 2582d4af56c5SLiam R. Howlett if (mas_preallocate(&mas, vma, GFP_KERNEL)) 25831da177e4SLinus Torvalds return -ENOMEM; 25841da177e4SLinus Torvalds 2585d4af56c5SLiam R. Howlett /* We must make sure the anon_vma is allocated. */ 2586d4af56c5SLiam R. Howlett if (unlikely(anon_vma_prepare(vma))) { 2587d4af56c5SLiam R. Howlett mas_destroy(&mas); 2588d4af56c5SLiam R. Howlett return -ENOMEM; 2589d4af56c5SLiam R. Howlett } 2590d4af56c5SLiam R. Howlett 25911da177e4SLinus Torvalds /* 25921da177e4SLinus Torvalds * vma->vm_start/vm_end cannot change under us because the caller 2593c1e8d7c6SMichel Lespinasse * is required to hold the mmap_lock in read mode. We need the 25941da177e4SLinus Torvalds * anon_vma lock to serialize against concurrent expand_stacks. 25951da177e4SLinus Torvalds */ 259612352d3cSKonstantin Khlebnikov anon_vma_lock_write(vma->anon_vma); 25971da177e4SLinus Torvalds 25981da177e4SLinus Torvalds /* Somebody else might have raced and expanded it already */ 25991da177e4SLinus Torvalds if (address > vma->vm_end) { 26001da177e4SLinus Torvalds unsigned long size, grow; 26011da177e4SLinus Torvalds 26021da177e4SLinus Torvalds size = address - vma->vm_start; 26031da177e4SLinus Torvalds grow = (address - vma->vm_end) >> PAGE_SHIFT; 26041da177e4SLinus Torvalds 260542c36f63SHugh Dickins error = -ENOMEM; 260642c36f63SHugh Dickins if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { 26071da177e4SLinus Torvalds error = acct_stack_growth(vma, size, grow); 26083af9e859SEric B Munson if (!error) { 26094128997bSMichel Lespinasse /* 26104128997bSMichel Lespinasse * vma_gap_update() doesn't support concurrent 2611c1e8d7c6SMichel Lespinasse * updates, but we only hold a shared mmap_lock 26124128997bSMichel Lespinasse * lock here, so we need to protect against 26134128997bSMichel Lespinasse * concurrent vma expansions. 261412352d3cSKonstantin Khlebnikov * anon_vma_lock_write() doesn't help here, as 26154128997bSMichel Lespinasse * we don't guarantee that all growable vmas 26164128997bSMichel Lespinasse * in a mm share the same root anon vma. 26174128997bSMichel Lespinasse * So, we reuse mm->page_table_lock to guard 26184128997bSMichel Lespinasse * against concurrent vma expansions. 26194128997bSMichel Lespinasse */ 262009357814SOleg Nesterov spin_lock(&mm->page_table_lock); 262187e8827bSOleg Nesterov if (vma->vm_flags & VM_LOCKED) 262209357814SOleg Nesterov mm->locked_vm += grow; 262384638335SKonstantin Khlebnikov vm_stat_account(mm, vma->vm_flags, grow); 2624bf181b9fSMichel Lespinasse anon_vma_interval_tree_pre_update_vma(vma); 26251da177e4SLinus Torvalds vma->vm_end = address; 2626d4af56c5SLiam R. Howlett /* Overwrite old entry in mtree. */ 2627d4af56c5SLiam R. Howlett vma_mas_store(vma, &mas); 2628bf181b9fSMichel Lespinasse anon_vma_interval_tree_post_update_vma(vma); 2629d3737187SMichel Lespinasse if (vma->vm_next) 2630d3737187SMichel Lespinasse vma_gap_update(vma->vm_next); 2631d3737187SMichel Lespinasse else 26321be7107fSHugh Dickins mm->highest_vm_end = vm_end_gap(vma); 263309357814SOleg Nesterov spin_unlock(&mm->page_table_lock); 26344128997bSMichel Lespinasse 26353af9e859SEric B Munson perf_event_mmap(vma); 26363af9e859SEric B Munson } 26371da177e4SLinus Torvalds } 263842c36f63SHugh Dickins } 263912352d3cSKonstantin Khlebnikov anon_vma_unlock_write(vma->anon_vma); 2640c791576cSYang Shi khugepaged_enter_vma(vma, vma->vm_flags); 264109357814SOleg Nesterov validate_mm(mm); 2642d4af56c5SLiam R. Howlett validate_mm_mt(mm); 2643d4af56c5SLiam R. Howlett mas_destroy(&mas); 26441da177e4SLinus Torvalds return error; 26451da177e4SLinus Torvalds } 264646dea3d0SHugh Dickins #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ 264746dea3d0SHugh Dickins 26481da177e4SLinus Torvalds /* 26491da177e4SLinus Torvalds * vma is the first one with address < vma->vm_start. Have to extend vma. 26501da177e4SLinus Torvalds */ 2651d05f3169SMichal Hocko int expand_downwards(struct vm_area_struct *vma, 2652b6a2fea3SOllie Wild unsigned long address) 26531da177e4SLinus Torvalds { 265409357814SOleg Nesterov struct mm_struct *mm = vma->vm_mm; 26551be7107fSHugh Dickins struct vm_area_struct *prev; 26560a1d5299SJann Horn int error = 0; 2657d4af56c5SLiam R. Howlett MA_STATE(mas, &mm->mm_mt, 0, 0); 26581da177e4SLinus Torvalds 2659d4af56c5SLiam R. Howlett validate_mm(mm); 26608869477aSEric Paris address &= PAGE_MASK; 26610a1d5299SJann Horn if (address < mmap_min_addr) 26620a1d5299SJann Horn return -EPERM; 26638869477aSEric Paris 26641be7107fSHugh Dickins /* Enforce stack_guard_gap */ 26651be7107fSHugh Dickins prev = vma->vm_prev; 26661be7107fSHugh Dickins /* Check that both stack segments have the same anon_vma? */ 266732e4e6d5SOleg Nesterov if (prev && !(prev->vm_flags & VM_GROWSDOWN) && 26683122e80eSAnshuman Khandual vma_is_accessible(prev)) { 266932e4e6d5SOleg Nesterov if (address - prev->vm_end < stack_guard_gap) 267032e4e6d5SOleg Nesterov return -ENOMEM; 26711be7107fSHugh Dickins } 26721be7107fSHugh Dickins 2673d4af56c5SLiam R. Howlett if (mas_preallocate(&mas, vma, GFP_KERNEL)) 267412352d3cSKonstantin Khlebnikov return -ENOMEM; 26751da177e4SLinus Torvalds 2676d4af56c5SLiam R. Howlett /* We must make sure the anon_vma is allocated. */ 2677d4af56c5SLiam R. Howlett if (unlikely(anon_vma_prepare(vma))) { 2678d4af56c5SLiam R. Howlett mas_destroy(&mas); 2679d4af56c5SLiam R. Howlett return -ENOMEM; 2680d4af56c5SLiam R. Howlett } 2681d4af56c5SLiam R. Howlett 26821da177e4SLinus Torvalds /* 26831da177e4SLinus Torvalds * vma->vm_start/vm_end cannot change under us because the caller 2684c1e8d7c6SMichel Lespinasse * is required to hold the mmap_lock in read mode. We need the 26851da177e4SLinus Torvalds * anon_vma lock to serialize against concurrent expand_stacks. 26861da177e4SLinus Torvalds */ 268712352d3cSKonstantin Khlebnikov anon_vma_lock_write(vma->anon_vma); 26881da177e4SLinus Torvalds 26891da177e4SLinus Torvalds /* Somebody else might have raced and expanded it already */ 26901da177e4SLinus Torvalds if (address < vma->vm_start) { 26911da177e4SLinus Torvalds unsigned long size, grow; 26921da177e4SLinus Torvalds 26931da177e4SLinus Torvalds size = vma->vm_end - address; 26941da177e4SLinus Torvalds grow = (vma->vm_start - address) >> PAGE_SHIFT; 26951da177e4SLinus Torvalds 2696a626ca6aSLinus Torvalds error = -ENOMEM; 2697a626ca6aSLinus Torvalds if (grow <= vma->vm_pgoff) { 26981da177e4SLinus Torvalds error = acct_stack_growth(vma, size, grow); 26991da177e4SLinus Torvalds if (!error) { 27004128997bSMichel Lespinasse /* 27014128997bSMichel Lespinasse * vma_gap_update() doesn't support concurrent 2702c1e8d7c6SMichel Lespinasse * updates, but we only hold a shared mmap_lock 27034128997bSMichel Lespinasse * lock here, so we need to protect against 27044128997bSMichel Lespinasse * concurrent vma expansions. 270512352d3cSKonstantin Khlebnikov * anon_vma_lock_write() doesn't help here, as 27064128997bSMichel Lespinasse * we don't guarantee that all growable vmas 27074128997bSMichel Lespinasse * in a mm share the same root anon vma. 27084128997bSMichel Lespinasse * So, we reuse mm->page_table_lock to guard 27094128997bSMichel Lespinasse * against concurrent vma expansions. 27104128997bSMichel Lespinasse */ 271109357814SOleg Nesterov spin_lock(&mm->page_table_lock); 271287e8827bSOleg Nesterov if (vma->vm_flags & VM_LOCKED) 271309357814SOleg Nesterov mm->locked_vm += grow; 271484638335SKonstantin Khlebnikov vm_stat_account(mm, vma->vm_flags, grow); 2715bf181b9fSMichel Lespinasse anon_vma_interval_tree_pre_update_vma(vma); 27161da177e4SLinus Torvalds vma->vm_start = address; 27171da177e4SLinus Torvalds vma->vm_pgoff -= grow; 2718d4af56c5SLiam R. Howlett /* Overwrite old entry in mtree. */ 2719d4af56c5SLiam R. Howlett vma_mas_store(vma, &mas); 2720bf181b9fSMichel Lespinasse anon_vma_interval_tree_post_update_vma(vma); 2721d3737187SMichel Lespinasse vma_gap_update(vma); 272209357814SOleg Nesterov spin_unlock(&mm->page_table_lock); 27234128997bSMichel Lespinasse 27243af9e859SEric B Munson perf_event_mmap(vma); 27251da177e4SLinus Torvalds } 27261da177e4SLinus Torvalds } 2727a626ca6aSLinus Torvalds } 272812352d3cSKonstantin Khlebnikov anon_vma_unlock_write(vma->anon_vma); 2729c791576cSYang Shi khugepaged_enter_vma(vma, vma->vm_flags); 273009357814SOleg Nesterov validate_mm(mm); 2731d4af56c5SLiam R. Howlett mas_destroy(&mas); 27321da177e4SLinus Torvalds return error; 27331da177e4SLinus Torvalds } 27341da177e4SLinus Torvalds 27351be7107fSHugh Dickins /* enforced gap between the expanding stack and other mappings. */ 27361be7107fSHugh Dickins unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; 27371be7107fSHugh Dickins 27381be7107fSHugh Dickins static int __init cmdline_parse_stack_guard_gap(char *p) 27391be7107fSHugh Dickins { 27401be7107fSHugh Dickins unsigned long val; 27411be7107fSHugh Dickins char *endptr; 27421be7107fSHugh Dickins 27431be7107fSHugh Dickins val = simple_strtoul(p, &endptr, 10); 27441be7107fSHugh Dickins if (!*endptr) 27451be7107fSHugh Dickins stack_guard_gap = val << PAGE_SHIFT; 27461be7107fSHugh Dickins 2747e6d09493SRandy Dunlap return 1; 27481be7107fSHugh Dickins } 27491be7107fSHugh Dickins __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); 27501be7107fSHugh Dickins 2751b6a2fea3SOllie Wild #ifdef CONFIG_STACK_GROWSUP 2752b6a2fea3SOllie Wild int expand_stack(struct vm_area_struct *vma, unsigned long address) 2753b6a2fea3SOllie Wild { 2754b6a2fea3SOllie Wild return expand_upwards(vma, address); 2755b6a2fea3SOllie Wild } 2756b6a2fea3SOllie Wild 2757b6a2fea3SOllie Wild struct vm_area_struct * 2758b6a2fea3SOllie Wild find_extend_vma(struct mm_struct *mm, unsigned long addr) 2759b6a2fea3SOllie Wild { 2760b6a2fea3SOllie Wild struct vm_area_struct *vma, *prev; 2761b6a2fea3SOllie Wild 2762b6a2fea3SOllie Wild addr &= PAGE_MASK; 2763b6a2fea3SOllie Wild vma = find_vma_prev(mm, addr, &prev); 2764b6a2fea3SOllie Wild if (vma && (vma->vm_start <= addr)) 2765b6a2fea3SOllie Wild return vma; 27664d45e75aSJann Horn if (!prev || expand_stack(prev, addr)) 2767b6a2fea3SOllie Wild return NULL; 2768cea10a19SMichel Lespinasse if (prev->vm_flags & VM_LOCKED) 2769fc05f566SKirill A. Shutemov populate_vma_page_range(prev, addr, prev->vm_end, NULL); 2770b6a2fea3SOllie Wild return prev; 2771b6a2fea3SOllie Wild } 2772b6a2fea3SOllie Wild #else 2773b6a2fea3SOllie Wild int expand_stack(struct vm_area_struct *vma, unsigned long address) 2774b6a2fea3SOllie Wild { 2775b6a2fea3SOllie Wild return expand_downwards(vma, address); 2776b6a2fea3SOllie Wild } 2777b6a2fea3SOllie Wild 27781da177e4SLinus Torvalds struct vm_area_struct * 27791da177e4SLinus Torvalds find_extend_vma(struct mm_struct *mm, unsigned long addr) 27801da177e4SLinus Torvalds { 27811da177e4SLinus Torvalds struct vm_area_struct *vma; 27821da177e4SLinus Torvalds unsigned long start; 27831da177e4SLinus Torvalds 27841da177e4SLinus Torvalds addr &= PAGE_MASK; 27851da177e4SLinus Torvalds vma = find_vma(mm, addr); 27861da177e4SLinus Torvalds if (!vma) 27871da177e4SLinus Torvalds return NULL; 27881da177e4SLinus Torvalds if (vma->vm_start <= addr) 27891da177e4SLinus Torvalds return vma; 27901da177e4SLinus Torvalds if (!(vma->vm_flags & VM_GROWSDOWN)) 27911da177e4SLinus Torvalds return NULL; 27921da177e4SLinus Torvalds start = vma->vm_start; 27931da177e4SLinus Torvalds if (expand_stack(vma, addr)) 27941da177e4SLinus Torvalds return NULL; 2795cea10a19SMichel Lespinasse if (vma->vm_flags & VM_LOCKED) 2796fc05f566SKirill A. Shutemov populate_vma_page_range(vma, addr, start, NULL); 27971da177e4SLinus Torvalds return vma; 27981da177e4SLinus Torvalds } 27991da177e4SLinus Torvalds #endif 28001da177e4SLinus Torvalds 2801e1d6d01aSJesse Barnes EXPORT_SYMBOL_GPL(find_extend_vma); 2802e1d6d01aSJesse Barnes 28032c0b3814SHugh Dickins /* 28042c0b3814SHugh Dickins * Ok - we have the memory areas we should free on the vma list, 28052c0b3814SHugh Dickins * so release them, and do the vma updates. 28061da177e4SLinus Torvalds * 28072c0b3814SHugh Dickins * Called with the mm semaphore held. 28081da177e4SLinus Torvalds */ 28092c0b3814SHugh Dickins static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) 28101da177e4SLinus Torvalds { 28114f74d2c8SLinus Torvalds unsigned long nr_accounted = 0; 28124f74d2c8SLinus Torvalds 2813365e9c87SHugh Dickins /* Update high watermark before we lower total_vm */ 2814365e9c87SHugh Dickins update_hiwater_vm(mm); 28152c0b3814SHugh Dickins do { 2816ab50b8edSHugh Dickins long nrpages = vma_pages(vma); 28171da177e4SLinus Torvalds 28184f74d2c8SLinus Torvalds if (vma->vm_flags & VM_ACCOUNT) 28194f74d2c8SLinus Torvalds nr_accounted += nrpages; 282084638335SKonstantin Khlebnikov vm_stat_account(mm, vma->vm_flags, -nrpages); 2821a8fb5618SHugh Dickins vma = remove_vma(vma); 2822146425a3SHugh Dickins } while (vma); 28234f74d2c8SLinus Torvalds vm_unacct_memory(nr_accounted); 28241da177e4SLinus Torvalds validate_mm(mm); 28251da177e4SLinus Torvalds } 28261da177e4SLinus Torvalds 28271da177e4SLinus Torvalds /* 28281da177e4SLinus Torvalds * Get rid of page table information in the indicated region. 28291da177e4SLinus Torvalds * 2830f10df686SPaolo 'Blaisorblade' Giarrusso * Called with the mm semaphore held. 28311da177e4SLinus Torvalds */ 28321da177e4SLinus Torvalds static void unmap_region(struct mm_struct *mm, 2833e0da382cSHugh Dickins struct vm_area_struct *vma, struct vm_area_struct *prev, 2834e0da382cSHugh Dickins unsigned long start, unsigned long end) 28351da177e4SLinus Torvalds { 2836f39af059SMatthew Wilcox (Oracle) struct vm_area_struct *next = __vma_next(mm, prev); 2837d16dfc55SPeter Zijlstra struct mmu_gather tlb; 28381da177e4SLinus Torvalds 28391da177e4SLinus Torvalds lru_add_drain(); 2840a72afd87SWill Deacon tlb_gather_mmu(&tlb, mm); 2841365e9c87SHugh Dickins update_hiwater_rss(mm); 28424f74d2c8SLinus Torvalds unmap_vmas(&tlb, vma, start, end); 2843d16dfc55SPeter Zijlstra free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 28446ee8630eSHugh Dickins next ? next->vm_start : USER_PGTABLES_CEILING); 2845ae8eba8bSWill Deacon tlb_finish_mmu(&tlb); 28461da177e4SLinus Torvalds } 28471da177e4SLinus Torvalds 28481da177e4SLinus Torvalds /* 28491da177e4SLinus Torvalds * Create a list of vma's touched by the unmap, removing them from the mm's 28501da177e4SLinus Torvalds * vma list as we go.. 28511da177e4SLinus Torvalds */ 2852246c320aSKirill A. Shutemov static bool 2853d4af56c5SLiam R. Howlett detach_vmas_to_be_unmapped(struct mm_struct *mm, struct ma_state *mas, 2854d4af56c5SLiam R. Howlett struct vm_area_struct *vma, struct vm_area_struct *prev, 2855d4af56c5SLiam R. Howlett unsigned long end) 28561da177e4SLinus Torvalds { 28571da177e4SLinus Torvalds struct vm_area_struct **insertion_point; 28581da177e4SLinus Torvalds struct vm_area_struct *tail_vma = NULL; 28591da177e4SLinus Torvalds 28601da177e4SLinus Torvalds insertion_point = (prev ? &prev->vm_next : &mm->mmap); 2861297c5eeeSLinus Torvalds vma->vm_prev = NULL; 2862d4af56c5SLiam R. Howlett mas_set_range(mas, vma->vm_start, end - 1); 2863d4af56c5SLiam R. Howlett mas_store_prealloc(mas, NULL); 28641da177e4SLinus Torvalds do { 2865d3737187SMichel Lespinasse vma_rb_erase(vma, &mm->mm_rb); 2866a213e5cfSHugh Dickins if (vma->vm_flags & VM_LOCKED) 2867a213e5cfSHugh Dickins mm->locked_vm -= vma_pages(vma); 28681da177e4SLinus Torvalds mm->map_count--; 28691da177e4SLinus Torvalds tail_vma = vma; 28701da177e4SLinus Torvalds vma = vma->vm_next; 28711da177e4SLinus Torvalds } while (vma && vma->vm_start < end); 28721da177e4SLinus Torvalds *insertion_point = vma; 2873d3737187SMichel Lespinasse if (vma) { 2874297c5eeeSLinus Torvalds vma->vm_prev = prev; 2875d3737187SMichel Lespinasse vma_gap_update(vma); 2876d3737187SMichel Lespinasse } else 28771be7107fSHugh Dickins mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; 28781da177e4SLinus Torvalds tail_vma->vm_next = NULL; 2879615d6e87SDavidlohr Bueso 2880615d6e87SDavidlohr Bueso /* Kill the cache */ 2881615d6e87SDavidlohr Bueso vmacache_invalidate(mm); 2882246c320aSKirill A. Shutemov 2883246c320aSKirill A. Shutemov /* 2884246c320aSKirill A. Shutemov * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or 2885246c320aSKirill A. Shutemov * VM_GROWSUP VMA. Such VMAs can change their size under 2886246c320aSKirill A. Shutemov * down_read(mmap_lock) and collide with the VMA we are about to unmap. 2887246c320aSKirill A. Shutemov */ 2888246c320aSKirill A. Shutemov if (vma && (vma->vm_flags & VM_GROWSDOWN)) 2889246c320aSKirill A. Shutemov return false; 2890246c320aSKirill A. Shutemov if (prev && (prev->vm_flags & VM_GROWSUP)) 2891246c320aSKirill A. Shutemov return false; 2892246c320aSKirill A. Shutemov return true; 28931da177e4SLinus Torvalds } 28941da177e4SLinus Torvalds 28951da177e4SLinus Torvalds /* 2896def5efe0SDavid Rientjes * __split_vma() bypasses sysctl_max_map_count checking. We use this where it 2897def5efe0SDavid Rientjes * has already been checked or doesn't make sense to fail. 28981da177e4SLinus Torvalds */ 2899def5efe0SDavid Rientjes int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, 29001da177e4SLinus Torvalds unsigned long addr, int new_below) 29011da177e4SLinus Torvalds { 29021da177e4SLinus Torvalds struct vm_area_struct *new; 2903e3975891SChen Gang int err; 2904d4af56c5SLiam R. Howlett validate_mm_mt(mm); 29051da177e4SLinus Torvalds 2906dd3b614fSDmitry Safonov if (vma->vm_ops && vma->vm_ops->may_split) { 2907dd3b614fSDmitry Safonov err = vma->vm_ops->may_split(vma, addr); 290831383c68SDan Williams if (err) 290931383c68SDan Williams return err; 291031383c68SDan Williams } 29111da177e4SLinus Torvalds 29123928d4f5SLinus Torvalds new = vm_area_dup(vma); 29131da177e4SLinus Torvalds if (!new) 2914e3975891SChen Gang return -ENOMEM; 29151da177e4SLinus Torvalds 29161da177e4SLinus Torvalds if (new_below) 29171da177e4SLinus Torvalds new->vm_end = addr; 29181da177e4SLinus Torvalds else { 29191da177e4SLinus Torvalds new->vm_start = addr; 29201da177e4SLinus Torvalds new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); 29211da177e4SLinus Torvalds } 29221da177e4SLinus Torvalds 2923ef0855d3SOleg Nesterov err = vma_dup_policy(vma, new); 2924ef0855d3SOleg Nesterov if (err) 29255beb4930SRik van Riel goto out_free_vma; 29261da177e4SLinus Torvalds 2927c4ea95d7SDaniel Forrest err = anon_vma_clone(new, vma); 2928c4ea95d7SDaniel Forrest if (err) 29295beb4930SRik van Riel goto out_free_mpol; 29305beb4930SRik van Riel 2931e9714acfSKonstantin Khlebnikov if (new->vm_file) 29321da177e4SLinus Torvalds get_file(new->vm_file); 29331da177e4SLinus Torvalds 29341da177e4SLinus Torvalds if (new->vm_ops && new->vm_ops->open) 29351da177e4SLinus Torvalds new->vm_ops->open(new); 29361da177e4SLinus Torvalds 29371da177e4SLinus Torvalds if (new_below) 29385beb4930SRik van Riel err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + 29391da177e4SLinus Torvalds ((addr - new->vm_start) >> PAGE_SHIFT), new); 29401da177e4SLinus Torvalds else 29415beb4930SRik van Riel err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); 29421da177e4SLinus Torvalds 29435beb4930SRik van Riel /* Success. */ 29445beb4930SRik van Riel if (!err) 29451da177e4SLinus Torvalds return 0; 29465beb4930SRik van Riel 2947d4af56c5SLiam R. Howlett /* Avoid vm accounting in close() operation */ 2948d4af56c5SLiam R. Howlett new->vm_start = new->vm_end; 2949d4af56c5SLiam R. Howlett new->vm_pgoff = 0; 29505beb4930SRik van Riel /* Clean everything up if vma_adjust failed. */ 295158927533SRik van Riel if (new->vm_ops && new->vm_ops->close) 29525beb4930SRik van Riel new->vm_ops->close(new); 2953e9714acfSKonstantin Khlebnikov if (new->vm_file) 29545beb4930SRik van Riel fput(new->vm_file); 29552aeadc30SAndrea Arcangeli unlink_anon_vmas(new); 29565beb4930SRik van Riel out_free_mpol: 2957ef0855d3SOleg Nesterov mpol_put(vma_policy(new)); 29585beb4930SRik van Riel out_free_vma: 29593928d4f5SLinus Torvalds vm_area_free(new); 2960d4af56c5SLiam R. Howlett validate_mm_mt(mm); 29615beb4930SRik van Riel return err; 29621da177e4SLinus Torvalds } 29631da177e4SLinus Torvalds 2964659ace58SKOSAKI Motohiro /* 2965659ace58SKOSAKI Motohiro * Split a vma into two pieces at address 'addr', a new vma is allocated 2966659ace58SKOSAKI Motohiro * either for the first part or the tail. 2967659ace58SKOSAKI Motohiro */ 2968659ace58SKOSAKI Motohiro int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, 2969659ace58SKOSAKI Motohiro unsigned long addr, int new_below) 2970659ace58SKOSAKI Motohiro { 2971659ace58SKOSAKI Motohiro if (mm->map_count >= sysctl_max_map_count) 2972659ace58SKOSAKI Motohiro return -ENOMEM; 2973659ace58SKOSAKI Motohiro 2974659ace58SKOSAKI Motohiro return __split_vma(mm, vma, addr, new_below); 2975659ace58SKOSAKI Motohiro } 2976659ace58SKOSAKI Motohiro 29771da177e4SLinus Torvalds /* Munmap is split into 2 main parts -- this part which finds 29781da177e4SLinus Torvalds * what needs doing, and the areas themselves, which do the 29791da177e4SLinus Torvalds * work. This now handles partial unmappings. 29801da177e4SLinus Torvalds * Jeremy Fitzhardinge <jeremy@goop.org> 29811da177e4SLinus Torvalds */ 298285a06835SYang Shi int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, 2983dd2283f2SYang Shi struct list_head *uf, bool downgrade) 29841da177e4SLinus Torvalds { 29851da177e4SLinus Torvalds unsigned long end; 2986146425a3SHugh Dickins struct vm_area_struct *vma, *prev, *last; 2987d4af56c5SLiam R. Howlett int error = -ENOMEM; 2988d4af56c5SLiam R. Howlett MA_STATE(mas, &mm->mm_mt, 0, 0); 29891da177e4SLinus Torvalds 2990de1741a1SAlexander Kuleshov if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) 29911da177e4SLinus Torvalds return -EINVAL; 29921da177e4SLinus Torvalds 2993cc71aba3Svishnu.ps len = PAGE_ALIGN(len); 29945a28fc94SDave Hansen end = start + len; 2995cc71aba3Svishnu.ps if (len == 0) 29961da177e4SLinus Torvalds return -EINVAL; 29971da177e4SLinus Torvalds 29985a28fc94SDave Hansen /* 29995a28fc94SDave Hansen * arch_unmap() might do unmaps itself. It must be called 30005a28fc94SDave Hansen * and finish any rbtree manipulation before this code 30015a28fc94SDave Hansen * runs and also starts to manipulate the rbtree. 30025a28fc94SDave Hansen */ 30035a28fc94SDave Hansen arch_unmap(mm, start, end); 30045a28fc94SDave Hansen 300578d9cf60SGonzalo Matias Juarez Tello /* Find the first overlapping VMA where start < vma->vm_end */ 300678d9cf60SGonzalo Matias Juarez Tello vma = find_vma_intersection(mm, start, end); 3007146425a3SHugh Dickins if (!vma) 30081da177e4SLinus Torvalds return 0; 3009d4af56c5SLiam R. Howlett 3010d4af56c5SLiam R. Howlett if (mas_preallocate(&mas, vma, GFP_KERNEL)) 3011d4af56c5SLiam R. Howlett return -ENOMEM; 30129be34c9dSLinus Torvalds prev = vma->vm_prev; 30131da177e4SLinus Torvalds 30141da177e4SLinus Torvalds /* 30151da177e4SLinus Torvalds * If we need to split any vma, do it now to save pain later. 30161da177e4SLinus Torvalds * 30171da177e4SLinus Torvalds * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially 30181da177e4SLinus Torvalds * unmapped vm_area_struct will remain in use: so lower split_vma 30191da177e4SLinus Torvalds * places tmp vma above, and higher split_vma places tmp vma below. 30201da177e4SLinus Torvalds */ 3021146425a3SHugh Dickins if (start > vma->vm_start) { 3022659ace58SKOSAKI Motohiro 3023659ace58SKOSAKI Motohiro /* 3024659ace58SKOSAKI Motohiro * Make sure that map_count on return from munmap() will 3025659ace58SKOSAKI Motohiro * not exceed its limit; but let map_count go just above 3026659ace58SKOSAKI Motohiro * its limit temporarily, to help free resources as expected. 3027659ace58SKOSAKI Motohiro */ 3028659ace58SKOSAKI Motohiro if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) 3029d4af56c5SLiam R. Howlett goto map_count_exceeded; 3030659ace58SKOSAKI Motohiro 3031659ace58SKOSAKI Motohiro error = __split_vma(mm, vma, start, 0); 30321da177e4SLinus Torvalds if (error) 3033d4af56c5SLiam R. Howlett goto split_failed; 3034146425a3SHugh Dickins prev = vma; 30351da177e4SLinus Torvalds } 30361da177e4SLinus Torvalds 30371da177e4SLinus Torvalds /* Does it split the last one? */ 30381da177e4SLinus Torvalds last = find_vma(mm, end); 30391da177e4SLinus Torvalds if (last && end > last->vm_start) { 3040d4af56c5SLiam R. Howlett error = __split_vma(mm, last, end, 1); 30411da177e4SLinus Torvalds if (error) 3042d4af56c5SLiam R. Howlett goto split_failed; 30431da177e4SLinus Torvalds } 3044f39af059SMatthew Wilcox (Oracle) vma = __vma_next(mm, prev); 30451da177e4SLinus Torvalds 30462376dd7cSAndrea Arcangeli if (unlikely(uf)) { 30472376dd7cSAndrea Arcangeli /* 30482376dd7cSAndrea Arcangeli * If userfaultfd_unmap_prep returns an error the vmas 3049f0953a1bSIngo Molnar * will remain split, but userland will get a 30502376dd7cSAndrea Arcangeli * highly unexpected error anyway. This is no 30512376dd7cSAndrea Arcangeli * different than the case where the first of the two 30522376dd7cSAndrea Arcangeli * __split_vma fails, but we don't undo the first 30532376dd7cSAndrea Arcangeli * split, despite we could. This is unlikely enough 30542376dd7cSAndrea Arcangeli * failure that it's not worth optimizing it for. 30552376dd7cSAndrea Arcangeli */ 3056d4af56c5SLiam R. Howlett error = userfaultfd_unmap_prep(vma, start, end, uf); 30572376dd7cSAndrea Arcangeli if (error) 3058d4af56c5SLiam R. Howlett goto userfaultfd_error; 30592376dd7cSAndrea Arcangeli } 30602376dd7cSAndrea Arcangeli 3061dd2283f2SYang Shi /* Detach vmas from rbtree */ 3062d4af56c5SLiam R. Howlett if (!detach_vmas_to_be_unmapped(mm, &mas, vma, prev, end)) 3063246c320aSKirill A. Shutemov downgrade = false; 30641da177e4SLinus Torvalds 3065dd2283f2SYang Shi if (downgrade) 3066d8ed45c5SMichel Lespinasse mmap_write_downgrade(mm); 3067dd2283f2SYang Shi 3068dd2283f2SYang Shi unmap_region(mm, vma, prev, start, end); 3069dd2283f2SYang Shi 30701da177e4SLinus Torvalds /* Fix up all other VM information */ 30712c0b3814SHugh Dickins remove_vma_list(mm, vma); 30721da177e4SLinus Torvalds 3073dd2283f2SYang Shi return downgrade ? 1 : 0; 3074d4af56c5SLiam R. Howlett 3075d4af56c5SLiam R. Howlett map_count_exceeded: 3076d4af56c5SLiam R. Howlett split_failed: 3077d4af56c5SLiam R. Howlett userfaultfd_error: 3078d4af56c5SLiam R. Howlett mas_destroy(&mas); 3079d4af56c5SLiam R. Howlett return error; 30801da177e4SLinus Torvalds } 30811da177e4SLinus Torvalds 3082dd2283f2SYang Shi int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, 3083dd2283f2SYang Shi struct list_head *uf) 3084dd2283f2SYang Shi { 3085dd2283f2SYang Shi return __do_munmap(mm, start, len, uf, false); 3086dd2283f2SYang Shi } 3087dd2283f2SYang Shi 3088dd2283f2SYang Shi static int __vm_munmap(unsigned long start, size_t len, bool downgrade) 3089a46ef99dSLinus Torvalds { 3090a46ef99dSLinus Torvalds int ret; 3091bfce281cSAl Viro struct mm_struct *mm = current->mm; 3092897ab3e0SMike Rapoport LIST_HEAD(uf); 3093a46ef99dSLinus Torvalds 3094d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(mm)) 3095ae798783SMichal Hocko return -EINTR; 3096ae798783SMichal Hocko 3097dd2283f2SYang Shi ret = __do_munmap(mm, start, len, &uf, downgrade); 3098dd2283f2SYang Shi /* 3099c1e8d7c6SMichel Lespinasse * Returning 1 indicates mmap_lock is downgraded. 3100dd2283f2SYang Shi * But 1 is not legal return value of vm_munmap() and munmap(), reset 3101dd2283f2SYang Shi * it to 0 before return. 3102dd2283f2SYang Shi */ 3103dd2283f2SYang Shi if (ret == 1) { 3104d8ed45c5SMichel Lespinasse mmap_read_unlock(mm); 3105dd2283f2SYang Shi ret = 0; 3106dd2283f2SYang Shi } else 3107d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 3108dd2283f2SYang Shi 3109897ab3e0SMike Rapoport userfaultfd_unmap_complete(mm, &uf); 3110a46ef99dSLinus Torvalds return ret; 3111a46ef99dSLinus Torvalds } 3112dd2283f2SYang Shi 3113dd2283f2SYang Shi int vm_munmap(unsigned long start, size_t len) 3114dd2283f2SYang Shi { 3115dd2283f2SYang Shi return __vm_munmap(start, len, false); 3116dd2283f2SYang Shi } 3117a46ef99dSLinus Torvalds EXPORT_SYMBOL(vm_munmap); 3118a46ef99dSLinus Torvalds 31196a6160a7SHeiko Carstens SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 31201da177e4SLinus Torvalds { 3121ce18d171SCatalin Marinas addr = untagged_addr(addr); 3122dd2283f2SYang Shi return __vm_munmap(addr, len, true); 31231da177e4SLinus Torvalds } 31241da177e4SLinus Torvalds 3125c8d78c18SKirill A. Shutemov 3126c8d78c18SKirill A. Shutemov /* 3127c8d78c18SKirill A. Shutemov * Emulation of deprecated remap_file_pages() syscall. 3128c8d78c18SKirill A. Shutemov */ 3129c8d78c18SKirill A. Shutemov SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, 3130c8d78c18SKirill A. Shutemov unsigned long, prot, unsigned long, pgoff, unsigned long, flags) 3131c8d78c18SKirill A. Shutemov { 3132c8d78c18SKirill A. Shutemov 3133c8d78c18SKirill A. Shutemov struct mm_struct *mm = current->mm; 3134c8d78c18SKirill A. Shutemov struct vm_area_struct *vma; 3135c8d78c18SKirill A. Shutemov unsigned long populate = 0; 3136c8d78c18SKirill A. Shutemov unsigned long ret = -EINVAL; 3137c8d78c18SKirill A. Shutemov struct file *file; 3138c8d78c18SKirill A. Shutemov 3139ee65728eSMike Rapoport pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n", 3140c8d78c18SKirill A. Shutemov current->comm, current->pid); 3141c8d78c18SKirill A. Shutemov 3142c8d78c18SKirill A. Shutemov if (prot) 3143c8d78c18SKirill A. Shutemov return ret; 3144c8d78c18SKirill A. Shutemov start = start & PAGE_MASK; 3145c8d78c18SKirill A. Shutemov size = size & PAGE_MASK; 3146c8d78c18SKirill A. Shutemov 3147c8d78c18SKirill A. Shutemov if (start + size <= start) 3148c8d78c18SKirill A. Shutemov return ret; 3149c8d78c18SKirill A. Shutemov 3150c8d78c18SKirill A. Shutemov /* Does pgoff wrap? */ 3151c8d78c18SKirill A. Shutemov if (pgoff + (size >> PAGE_SHIFT) < pgoff) 3152c8d78c18SKirill A. Shutemov return ret; 3153c8d78c18SKirill A. Shutemov 3154d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(mm)) 3155dc0ef0dfSMichal Hocko return -EINTR; 3156dc0ef0dfSMichal Hocko 31579b593cb2SLiam R. Howlett vma = vma_lookup(mm, start); 3158c8d78c18SKirill A. Shutemov 3159c8d78c18SKirill A. Shutemov if (!vma || !(vma->vm_flags & VM_SHARED)) 3160c8d78c18SKirill A. Shutemov goto out; 3161c8d78c18SKirill A. Shutemov 316248f7df32SKirill A. Shutemov if (start + size > vma->vm_end) { 316348f7df32SKirill A. Shutemov struct vm_area_struct *next; 316448f7df32SKirill A. Shutemov 316548f7df32SKirill A. Shutemov for (next = vma->vm_next; next; next = next->vm_next) { 316648f7df32SKirill A. Shutemov /* hole between vmas ? */ 316748f7df32SKirill A. Shutemov if (next->vm_start != next->vm_prev->vm_end) 316848f7df32SKirill A. Shutemov goto out; 316948f7df32SKirill A. Shutemov 317048f7df32SKirill A. Shutemov if (next->vm_file != vma->vm_file) 317148f7df32SKirill A. Shutemov goto out; 317248f7df32SKirill A. Shutemov 317348f7df32SKirill A. Shutemov if (next->vm_flags != vma->vm_flags) 317448f7df32SKirill A. Shutemov goto out; 317548f7df32SKirill A. Shutemov 317648f7df32SKirill A. Shutemov if (start + size <= next->vm_end) 317748f7df32SKirill A. Shutemov break; 317848f7df32SKirill A. Shutemov } 317948f7df32SKirill A. Shutemov 318048f7df32SKirill A. Shutemov if (!next) 3181c8d78c18SKirill A. Shutemov goto out; 3182c8d78c18SKirill A. Shutemov } 3183c8d78c18SKirill A. Shutemov 3184c8d78c18SKirill A. Shutemov prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; 3185c8d78c18SKirill A. Shutemov prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; 3186c8d78c18SKirill A. Shutemov prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; 3187c8d78c18SKirill A. Shutemov 3188c8d78c18SKirill A. Shutemov flags &= MAP_NONBLOCK; 3189c8d78c18SKirill A. Shutemov flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; 3190fce000b1SLiam Howlett if (vma->vm_flags & VM_LOCKED) 3191c8d78c18SKirill A. Shutemov flags |= MAP_LOCKED; 319248f7df32SKirill A. Shutemov 3193c8d78c18SKirill A. Shutemov file = get_file(vma->vm_file); 319445e55300SPeter Collingbourne ret = do_mmap(vma->vm_file, start, size, 3195897ab3e0SMike Rapoport prot, flags, pgoff, &populate, NULL); 3196c8d78c18SKirill A. Shutemov fput(file); 3197c8d78c18SKirill A. Shutemov out: 3198d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 3199c8d78c18SKirill A. Shutemov if (populate) 3200c8d78c18SKirill A. Shutemov mm_populate(ret, populate); 3201c8d78c18SKirill A. Shutemov if (!IS_ERR_VALUE(ret)) 3202c8d78c18SKirill A. Shutemov ret = 0; 3203c8d78c18SKirill A. Shutemov return ret; 3204c8d78c18SKirill A. Shutemov } 3205c8d78c18SKirill A. Shutemov 32061da177e4SLinus Torvalds /* 32071da177e4SLinus Torvalds * this is really a simplified "do_mmap". it only handles 32081da177e4SLinus Torvalds * anonymous maps. eventually we may be able to do some 32091da177e4SLinus Torvalds * brk-specific accounting here. 32101da177e4SLinus Torvalds */ 3211bb177a73SMichal Hocko static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf) 32121da177e4SLinus Torvalds { 32131da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 32141da177e4SLinus Torvalds struct vm_area_struct *vma, *prev; 32151da177e4SLinus Torvalds struct rb_node **rb_link, *rb_parent; 32161da177e4SLinus Torvalds pgoff_t pgoff = addr >> PAGE_SHIFT; 32173a459756SKirill Korotaev int error; 3218ff68dac6SGaowei Pu unsigned long mapped_addr; 3219d4af56c5SLiam R. Howlett validate_mm_mt(mm); 32201da177e4SLinus Torvalds 322116e72e9bSDenys Vlasenko /* Until we need other flags, refuse anything except VM_EXEC. */ 322216e72e9bSDenys Vlasenko if ((flags & (~VM_EXEC)) != 0) 322316e72e9bSDenys Vlasenko return -EINVAL; 322416e72e9bSDenys Vlasenko flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 32253a459756SKirill Korotaev 3226ff68dac6SGaowei Pu mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); 3227ff68dac6SGaowei Pu if (IS_ERR_VALUE(mapped_addr)) 3228ff68dac6SGaowei Pu return mapped_addr; 32293a459756SKirill Korotaev 3230363ee17fSDavidlohr Bueso error = mlock_future_check(mm, mm->def_flags, len); 3231363ee17fSDavidlohr Bueso if (error) 3232363ee17fSDavidlohr Bueso return error; 32331da177e4SLinus Torvalds 3234fb8090b6SLiam R. Howlett /* Clear old maps, set up prev, rb_link, rb_parent, and uf */ 3235fb8090b6SLiam R. Howlett if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf)) 32361da177e4SLinus Torvalds return -ENOMEM; 32371da177e4SLinus Torvalds 32381da177e4SLinus Torvalds /* Check against address space limits *after* clearing old maps... */ 323984638335SKonstantin Khlebnikov if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) 32401da177e4SLinus Torvalds return -ENOMEM; 32411da177e4SLinus Torvalds 32421da177e4SLinus Torvalds if (mm->map_count > sysctl_max_map_count) 32431da177e4SLinus Torvalds return -ENOMEM; 32441da177e4SLinus Torvalds 3245191c5424SAl Viro if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) 32461da177e4SLinus Torvalds return -ENOMEM; 32471da177e4SLinus Torvalds 32481da177e4SLinus Torvalds /* Can we just expand an old private anonymous mapping? */ 3249ba470de4SRik van Riel vma = vma_merge(mm, prev, addr, addr + len, flags, 32509a10064fSColin Cross NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX, NULL); 3251ba470de4SRik van Riel if (vma) 32521da177e4SLinus Torvalds goto out; 32531da177e4SLinus Torvalds 32541da177e4SLinus Torvalds /* 32551da177e4SLinus Torvalds * create a vma struct for an anonymous mapping 32561da177e4SLinus Torvalds */ 3257490fc053SLinus Torvalds vma = vm_area_alloc(mm); 32581da177e4SLinus Torvalds if (!vma) { 32591da177e4SLinus Torvalds vm_unacct_memory(len >> PAGE_SHIFT); 32601da177e4SLinus Torvalds return -ENOMEM; 32611da177e4SLinus Torvalds } 32621da177e4SLinus Torvalds 3263bfd40eafSKirill A. Shutemov vma_set_anonymous(vma); 32641da177e4SLinus Torvalds vma->vm_start = addr; 32651da177e4SLinus Torvalds vma->vm_end = addr + len; 32661da177e4SLinus Torvalds vma->vm_pgoff = pgoff; 32671da177e4SLinus Torvalds vma->vm_flags = flags; 32683ed75eb8SColy Li vma->vm_page_prot = vm_get_page_prot(flags); 3269d4af56c5SLiam R. Howlett if (vma_link(mm, vma, prev, rb_link, rb_parent)) 3270d4af56c5SLiam R. Howlett goto no_vma_link; 3271d4af56c5SLiam R. Howlett 32721da177e4SLinus Torvalds out: 32733af9e859SEric B Munson perf_event_mmap(vma); 32741da177e4SLinus Torvalds mm->total_vm += len >> PAGE_SHIFT; 327584638335SKonstantin Khlebnikov mm->data_vm += len >> PAGE_SHIFT; 3276128557ffSMichel Lespinasse if (flags & VM_LOCKED) 3277ba470de4SRik van Riel mm->locked_vm += (len >> PAGE_SHIFT); 3278d9104d1cSCyrill Gorcunov vma->vm_flags |= VM_SOFTDIRTY; 3279d4af56c5SLiam R. Howlett validate_mm_mt(mm); 32805d22fc25SLinus Torvalds return 0; 3281d4af56c5SLiam R. Howlett 3282d4af56c5SLiam R. Howlett no_vma_link: 3283d4af56c5SLiam R. Howlett vm_area_free(vma); 3284d4af56c5SLiam R. Howlett return -ENOMEM; 32851da177e4SLinus Torvalds } 32861da177e4SLinus Torvalds 3287bb177a73SMichal Hocko int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) 3288e4eb1ff6SLinus Torvalds { 3289e4eb1ff6SLinus Torvalds struct mm_struct *mm = current->mm; 3290bb177a73SMichal Hocko unsigned long len; 32915d22fc25SLinus Torvalds int ret; 3292128557ffSMichel Lespinasse bool populate; 3293897ab3e0SMike Rapoport LIST_HEAD(uf); 3294e4eb1ff6SLinus Torvalds 3295bb177a73SMichal Hocko len = PAGE_ALIGN(request); 3296bb177a73SMichal Hocko if (len < request) 3297bb177a73SMichal Hocko return -ENOMEM; 3298bb177a73SMichal Hocko if (!len) 3299bb177a73SMichal Hocko return 0; 3300bb177a73SMichal Hocko 3301d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(mm)) 33022d6c9282SMichal Hocko return -EINTR; 33032d6c9282SMichal Hocko 3304897ab3e0SMike Rapoport ret = do_brk_flags(addr, len, flags, &uf); 3305128557ffSMichel Lespinasse populate = ((mm->def_flags & VM_LOCKED) != 0); 3306d8ed45c5SMichel Lespinasse mmap_write_unlock(mm); 3307897ab3e0SMike Rapoport userfaultfd_unmap_complete(mm, &uf); 33085d22fc25SLinus Torvalds if (populate && !ret) 3309128557ffSMichel Lespinasse mm_populate(addr, len); 3310e4eb1ff6SLinus Torvalds return ret; 3311e4eb1ff6SLinus Torvalds } 331216e72e9bSDenys Vlasenko EXPORT_SYMBOL(vm_brk_flags); 331316e72e9bSDenys Vlasenko 331416e72e9bSDenys Vlasenko int vm_brk(unsigned long addr, unsigned long len) 331516e72e9bSDenys Vlasenko { 331616e72e9bSDenys Vlasenko return vm_brk_flags(addr, len, 0); 331716e72e9bSDenys Vlasenko } 3318e4eb1ff6SLinus Torvalds EXPORT_SYMBOL(vm_brk); 33191da177e4SLinus Torvalds 33201da177e4SLinus Torvalds /* Release all mmaps. */ 33211da177e4SLinus Torvalds void exit_mmap(struct mm_struct *mm) 33221da177e4SLinus Torvalds { 3323d16dfc55SPeter Zijlstra struct mmu_gather tlb; 3324ba470de4SRik van Riel struct vm_area_struct *vma; 33251da177e4SLinus Torvalds unsigned long nr_accounted = 0; 33261da177e4SLinus Torvalds 3327d6dd61c8SJeremy Fitzhardinge /* mm's last user has gone, and its about to be pulled down */ 3328cddb8a5cSAndrea Arcangeli mmu_notifier_release(mm); 3329d6dd61c8SJeremy Fitzhardinge 333027ae357fSDavid Rientjes if (unlikely(mm_is_oom_victim(mm))) { 333127ae357fSDavid Rientjes /* 333227ae357fSDavid Rientjes * Manually reap the mm to free as much memory as possible. 333327ae357fSDavid Rientjes * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard 3334c1e8d7c6SMichel Lespinasse * this mm from further consideration. Taking mm->mmap_lock for 333527ae357fSDavid Rientjes * write after setting MMF_OOM_SKIP will guarantee that the oom 3336c1e8d7c6SMichel Lespinasse * reaper will not run on this mm again after mmap_lock is 333727ae357fSDavid Rientjes * dropped. 333827ae357fSDavid Rientjes * 3339c1e8d7c6SMichel Lespinasse * Nothing can be holding mm->mmap_lock here and the above call 334027ae357fSDavid Rientjes * to mmu_notifier_release(mm) ensures mmu notifier callbacks in 334127ae357fSDavid Rientjes * __oom_reap_task_mm() will not block. 334227ae357fSDavid Rientjes */ 334393065ac7SMichal Hocko (void)__oom_reap_task_mm(mm); 334427ae357fSDavid Rientjes set_bit(MMF_OOM_SKIP, &mm->flags); 334527ae357fSDavid Rientjes } 334627ae357fSDavid Rientjes 334764591e86SSuren Baghdasaryan mmap_write_lock(mm); 33489480c53eSJeremy Fitzhardinge arch_exit_mmap(mm); 33499480c53eSJeremy Fitzhardinge 3350ba470de4SRik van Riel vma = mm->mmap; 335164591e86SSuren Baghdasaryan if (!vma) { 335264591e86SSuren Baghdasaryan /* Can happen if dup_mmap() received an OOM */ 335364591e86SSuren Baghdasaryan mmap_write_unlock(mm); 33549480c53eSJeremy Fitzhardinge return; 335564591e86SSuren Baghdasaryan } 33569480c53eSJeremy Fitzhardinge 33571da177e4SLinus Torvalds lru_add_drain(); 33581da177e4SLinus Torvalds flush_cache_mm(mm); 3359d8b45053SWill Deacon tlb_gather_mmu_fullmm(&tlb, mm); 3360901608d9SOleg Nesterov /* update_hiwater_rss(mm) here? but nobody should be looking */ 3361e0da382cSHugh Dickins /* Use -1 here to ensure all VMAs in the mm are unmapped */ 33624f74d2c8SLinus Torvalds unmap_vmas(&tlb, vma, 0, -1); 33636ee8630eSHugh Dickins free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); 3364ae8eba8bSWill Deacon tlb_finish_mmu(&tlb); 33651da177e4SLinus Torvalds 336664591e86SSuren Baghdasaryan /* Walk the list again, actually closing and freeing it. */ 33674f74d2c8SLinus Torvalds while (vma) { 33684f74d2c8SLinus Torvalds if (vma->vm_flags & VM_ACCOUNT) 33694f74d2c8SLinus Torvalds nr_accounted += vma_pages(vma); 3370a8fb5618SHugh Dickins vma = remove_vma(vma); 33710a3b3c25SPaul E. McKenney cond_resched(); 33724f74d2c8SLinus Torvalds } 3373d4af56c5SLiam R. Howlett 3374d4af56c5SLiam R. Howlett trace_exit_mmap(mm); 3375d4af56c5SLiam R. Howlett __mt_destroy(&mm->mm_mt); 3376f798a1d4SSuren Baghdasaryan mm->mmap = NULL; 337764591e86SSuren Baghdasaryan mmap_write_unlock(mm); 33784f74d2c8SLinus Torvalds vm_unacct_memory(nr_accounted); 33791da177e4SLinus Torvalds } 33801da177e4SLinus Torvalds 33811da177e4SLinus Torvalds /* Insert vm structure into process list sorted by address 33821da177e4SLinus Torvalds * and into the inode's i_mmap tree. If vm_file is non-NULL 3383c8c06efaSDavidlohr Bueso * then i_mmap_rwsem is taken here. 33841da177e4SLinus Torvalds */ 33851da177e4SLinus Torvalds int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) 33861da177e4SLinus Torvalds { 33876597d783SHugh Dickins struct vm_area_struct *prev; 33881da177e4SLinus Torvalds struct rb_node **rb_link, *rb_parent; 3389d4af56c5SLiam R. Howlett unsigned long start = vma->vm_start; 3390d4af56c5SLiam R. Howlett struct vm_area_struct *overlap = NULL; 3391d4af56c5SLiam R. Howlett unsigned long charged = vma_pages(vma); 33921da177e4SLinus Torvalds 3393c9d13f5fSChen Gang if (find_vma_links(mm, vma->vm_start, vma->vm_end, 3394c9d13f5fSChen Gang &prev, &rb_link, &rb_parent)) 3395d4af56c5SLiam R. Howlett 3396d4af56c5SLiam R. Howlett if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) 3397c9d13f5fSChen Gang return -ENOMEM; 3398d4af56c5SLiam R. Howlett 3399d4af56c5SLiam R. Howlett overlap = mt_find(&mm->mm_mt, &start, vma->vm_end - 1); 3400d4af56c5SLiam R. Howlett if (overlap) { 3401d4af56c5SLiam R. Howlett 3402d4af56c5SLiam R. Howlett pr_err("Found vma ending at %lu\n", start - 1); 3403d4af56c5SLiam R. Howlett pr_err("vma : %lu => %lu-%lu\n", (unsigned long)overlap, 3404d4af56c5SLiam R. Howlett overlap->vm_start, overlap->vm_end - 1); 3405d4af56c5SLiam R. Howlett #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 3406d4af56c5SLiam R. Howlett mt_dump(&mm->mm_mt); 3407d4af56c5SLiam R. Howlett #endif 3408d4af56c5SLiam R. Howlett BUG(); 3409d4af56c5SLiam R. Howlett } 3410d4af56c5SLiam R. Howlett 3411c9d13f5fSChen Gang if ((vma->vm_flags & VM_ACCOUNT) && 3412d4af56c5SLiam R. Howlett security_vm_enough_memory_mm(mm, charged)) 3413c9d13f5fSChen Gang return -ENOMEM; 3414c9d13f5fSChen Gang 34151da177e4SLinus Torvalds /* 34161da177e4SLinus Torvalds * The vm_pgoff of a purely anonymous vma should be irrelevant 34171da177e4SLinus Torvalds * until its first write fault, when page's anon_vma and index 34181da177e4SLinus Torvalds * are set. But now set the vm_pgoff it will almost certainly 34191da177e4SLinus Torvalds * end up with (unless mremap moves it elsewhere before that 34201da177e4SLinus Torvalds * first wfault), so /proc/pid/maps tells a consistent story. 34211da177e4SLinus Torvalds * 34221da177e4SLinus Torvalds * By setting it to reflect the virtual start address of the 34231da177e4SLinus Torvalds * vma, merges and splits can happen in a seamless way, just 34241da177e4SLinus Torvalds * using the existing file pgoff checks and manipulations. 34258332326eSLiao Pingfang * Similarly in do_mmap and in do_brk_flags. 34261da177e4SLinus Torvalds */ 34278a9cc3b5SOleg Nesterov if (vma_is_anonymous(vma)) { 34281da177e4SLinus Torvalds BUG_ON(vma->anon_vma); 34291da177e4SLinus Torvalds vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; 34301da177e4SLinus Torvalds } 34312b144498SSrikar Dronamraju 3432d4af56c5SLiam R. Howlett if (vma_link(mm, vma, prev, rb_link, rb_parent)) { 3433d4af56c5SLiam R. Howlett vm_unacct_memory(charged); 3434d4af56c5SLiam R. Howlett return -ENOMEM; 3435d4af56c5SLiam R. Howlett } 3436d4af56c5SLiam R. Howlett 34371da177e4SLinus Torvalds return 0; 34381da177e4SLinus Torvalds } 34391da177e4SLinus Torvalds 34401da177e4SLinus Torvalds /* 34411da177e4SLinus Torvalds * Copy the vma structure to a new location in the same mm, 34421da177e4SLinus Torvalds * prior to moving page table entries, to effect an mremap move. 34431da177e4SLinus Torvalds */ 34441da177e4SLinus Torvalds struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 344538a76013SMichel Lespinasse unsigned long addr, unsigned long len, pgoff_t pgoff, 344638a76013SMichel Lespinasse bool *need_rmap_locks) 34471da177e4SLinus Torvalds { 34481da177e4SLinus Torvalds struct vm_area_struct *vma = *vmap; 34491da177e4SLinus Torvalds unsigned long vma_start = vma->vm_start; 34501da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 34511da177e4SLinus Torvalds struct vm_area_struct *new_vma, *prev; 34521da177e4SLinus Torvalds struct rb_node **rb_link, *rb_parent; 3453948f017bSAndrea Arcangeli bool faulted_in_anon_vma = true; 3454d4af56c5SLiam R. Howlett unsigned long index = addr; 34551da177e4SLinus Torvalds 3456d4af56c5SLiam R. Howlett validate_mm_mt(mm); 34571da177e4SLinus Torvalds /* 34581da177e4SLinus Torvalds * If anonymous vma has not yet been faulted, update new pgoff 34591da177e4SLinus Torvalds * to match new location, to increase its chance of merging. 34601da177e4SLinus Torvalds */ 3461ce75799bSOleg Nesterov if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { 34621da177e4SLinus Torvalds pgoff = addr >> PAGE_SHIFT; 3463948f017bSAndrea Arcangeli faulted_in_anon_vma = false; 3464948f017bSAndrea Arcangeli } 34651da177e4SLinus Torvalds 34666597d783SHugh Dickins if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) 34676597d783SHugh Dickins return NULL; /* should never get here */ 3468d4af56c5SLiam R. Howlett if (mt_find(&mm->mm_mt, &index, addr+len - 1)) 3469d4af56c5SLiam R. Howlett BUG(); 34701da177e4SLinus Torvalds new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, 347119a809afSAndrea Arcangeli vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), 34725c26f6acSSuren Baghdasaryan vma->vm_userfaultfd_ctx, anon_vma_name(vma)); 34731da177e4SLinus Torvalds if (new_vma) { 34741da177e4SLinus Torvalds /* 34751da177e4SLinus Torvalds * Source vma may have been merged into new_vma 34761da177e4SLinus Torvalds */ 3477948f017bSAndrea Arcangeli if (unlikely(vma_start >= new_vma->vm_start && 3478948f017bSAndrea Arcangeli vma_start < new_vma->vm_end)) { 3479948f017bSAndrea Arcangeli /* 3480948f017bSAndrea Arcangeli * The only way we can get a vma_merge with 3481948f017bSAndrea Arcangeli * self during an mremap is if the vma hasn't 3482948f017bSAndrea Arcangeli * been faulted in yet and we were allowed to 3483948f017bSAndrea Arcangeli * reset the dst vma->vm_pgoff to the 3484948f017bSAndrea Arcangeli * destination address of the mremap to allow 3485948f017bSAndrea Arcangeli * the merge to happen. mremap must change the 3486948f017bSAndrea Arcangeli * vm_pgoff linearity between src and dst vmas 3487948f017bSAndrea Arcangeli * (in turn preventing a vma_merge) to be 3488948f017bSAndrea Arcangeli * safe. It is only safe to keep the vm_pgoff 3489948f017bSAndrea Arcangeli * linear if there are no pages mapped yet. 3490948f017bSAndrea Arcangeli */ 349181d1b09cSSasha Levin VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); 349238a76013SMichel Lespinasse *vmap = vma = new_vma; 3493108d6642SMichel Lespinasse } 349438a76013SMichel Lespinasse *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); 34951da177e4SLinus Torvalds } else { 34963928d4f5SLinus Torvalds new_vma = vm_area_dup(vma); 3497e3975891SChen Gang if (!new_vma) 3498e3975891SChen Gang goto out; 34991da177e4SLinus Torvalds new_vma->vm_start = addr; 35001da177e4SLinus Torvalds new_vma->vm_end = addr + len; 35011da177e4SLinus Torvalds new_vma->vm_pgoff = pgoff; 3502ef0855d3SOleg Nesterov if (vma_dup_policy(vma, new_vma)) 3503523d4e20SMichel Lespinasse goto out_free_vma; 3504523d4e20SMichel Lespinasse if (anon_vma_clone(new_vma, vma)) 3505523d4e20SMichel Lespinasse goto out_free_mempol; 3506e9714acfSKonstantin Khlebnikov if (new_vma->vm_file) 35071da177e4SLinus Torvalds get_file(new_vma->vm_file); 35081da177e4SLinus Torvalds if (new_vma->vm_ops && new_vma->vm_ops->open) 35091da177e4SLinus Torvalds new_vma->vm_ops->open(new_vma); 35101da177e4SLinus Torvalds vma_link(mm, new_vma, prev, rb_link, rb_parent); 351138a76013SMichel Lespinasse *need_rmap_locks = false; 35121da177e4SLinus Torvalds } 3513d4af56c5SLiam R. Howlett validate_mm_mt(mm); 35141da177e4SLinus Torvalds return new_vma; 35155beb4930SRik van Riel 35165beb4930SRik van Riel out_free_mempol: 3517ef0855d3SOleg Nesterov mpol_put(vma_policy(new_vma)); 35185beb4930SRik van Riel out_free_vma: 35193928d4f5SLinus Torvalds vm_area_free(new_vma); 3520e3975891SChen Gang out: 3521d4af56c5SLiam R. Howlett validate_mm_mt(mm); 35225beb4930SRik van Riel return NULL; 35231da177e4SLinus Torvalds } 3524119f657cSakpm@osdl.org 3525119f657cSakpm@osdl.org /* 3526119f657cSakpm@osdl.org * Return true if the calling process may expand its vm space by the passed 3527119f657cSakpm@osdl.org * number of pages 3528119f657cSakpm@osdl.org */ 352984638335SKonstantin Khlebnikov bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) 3530119f657cSakpm@osdl.org { 353184638335SKonstantin Khlebnikov if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) 353284638335SKonstantin Khlebnikov return false; 3533119f657cSakpm@osdl.org 3534d977d56cSKonstantin Khlebnikov if (is_data_mapping(flags) && 3535d977d56cSKonstantin Khlebnikov mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { 3536f4fcd558SKonstantin Khlebnikov /* Workaround for Valgrind */ 3537f4fcd558SKonstantin Khlebnikov if (rlimit(RLIMIT_DATA) == 0 && 3538f4fcd558SKonstantin Khlebnikov mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) 3539f4fcd558SKonstantin Khlebnikov return true; 354057a7702bSDavid Woodhouse 354157a7702bSDavid Woodhouse pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", 3542d977d56cSKonstantin Khlebnikov current->comm, current->pid, 3543d977d56cSKonstantin Khlebnikov (mm->data_vm + npages) << PAGE_SHIFT, 354457a7702bSDavid Woodhouse rlimit(RLIMIT_DATA), 354557a7702bSDavid Woodhouse ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data"); 354657a7702bSDavid Woodhouse 354757a7702bSDavid Woodhouse if (!ignore_rlimit_data) 3548d977d56cSKonstantin Khlebnikov return false; 3549d977d56cSKonstantin Khlebnikov } 3550119f657cSakpm@osdl.org 355184638335SKonstantin Khlebnikov return true; 355284638335SKonstantin Khlebnikov } 355384638335SKonstantin Khlebnikov 355484638335SKonstantin Khlebnikov void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) 355584638335SKonstantin Khlebnikov { 35567866076bSPeng Liu WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); 355784638335SKonstantin Khlebnikov 3558d977d56cSKonstantin Khlebnikov if (is_exec_mapping(flags)) 355984638335SKonstantin Khlebnikov mm->exec_vm += npages; 3560d977d56cSKonstantin Khlebnikov else if (is_stack_mapping(flags)) 356184638335SKonstantin Khlebnikov mm->stack_vm += npages; 3562d977d56cSKonstantin Khlebnikov else if (is_data_mapping(flags)) 356384638335SKonstantin Khlebnikov mm->data_vm += npages; 3564119f657cSakpm@osdl.org } 3565fa5dc22fSRoland McGrath 3566b3ec9f33SSouptick Joarder static vm_fault_t special_mapping_fault(struct vm_fault *vmf); 3567a62c34bdSAndy Lutomirski 3568a62c34bdSAndy Lutomirski /* 3569a62c34bdSAndy Lutomirski * Having a close hook prevents vma merging regardless of flags. 3570a62c34bdSAndy Lutomirski */ 3571a62c34bdSAndy Lutomirski static void special_mapping_close(struct vm_area_struct *vma) 3572a62c34bdSAndy Lutomirski { 3573a62c34bdSAndy Lutomirski } 3574a62c34bdSAndy Lutomirski 3575a62c34bdSAndy Lutomirski static const char *special_mapping_name(struct vm_area_struct *vma) 3576a62c34bdSAndy Lutomirski { 3577a62c34bdSAndy Lutomirski return ((struct vm_special_mapping *)vma->vm_private_data)->name; 3578a62c34bdSAndy Lutomirski } 3579a62c34bdSAndy Lutomirski 358014d07113SBrian Geffon static int special_mapping_mremap(struct vm_area_struct *new_vma) 3581b059a453SDmitry Safonov { 3582b059a453SDmitry Safonov struct vm_special_mapping *sm = new_vma->vm_private_data; 3583b059a453SDmitry Safonov 3584280e87e9SDmitry Safonov if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) 3585280e87e9SDmitry Safonov return -EFAULT; 3586280e87e9SDmitry Safonov 3587b059a453SDmitry Safonov if (sm->mremap) 3588b059a453SDmitry Safonov return sm->mremap(sm, new_vma); 3589280e87e9SDmitry Safonov 3590b059a453SDmitry Safonov return 0; 3591b059a453SDmitry Safonov } 3592b059a453SDmitry Safonov 3593871402e0SDmitry Safonov static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr) 3594871402e0SDmitry Safonov { 3595871402e0SDmitry Safonov /* 3596871402e0SDmitry Safonov * Forbid splitting special mappings - kernel has expectations over 3597871402e0SDmitry Safonov * the number of pages in mapping. Together with VM_DONTEXPAND 3598871402e0SDmitry Safonov * the size of vma should stay the same over the special mapping's 3599871402e0SDmitry Safonov * lifetime. 3600871402e0SDmitry Safonov */ 3601871402e0SDmitry Safonov return -EINVAL; 3602871402e0SDmitry Safonov } 3603871402e0SDmitry Safonov 3604a62c34bdSAndy Lutomirski static const struct vm_operations_struct special_mapping_vmops = { 3605a62c34bdSAndy Lutomirski .close = special_mapping_close, 3606a62c34bdSAndy Lutomirski .fault = special_mapping_fault, 3607b059a453SDmitry Safonov .mremap = special_mapping_mremap, 3608a62c34bdSAndy Lutomirski .name = special_mapping_name, 3609af34ebebSDmitry Safonov /* vDSO code relies that VVAR can't be accessed remotely */ 3610af34ebebSDmitry Safonov .access = NULL, 3611871402e0SDmitry Safonov .may_split = special_mapping_split, 3612a62c34bdSAndy Lutomirski }; 3613a62c34bdSAndy Lutomirski 3614a62c34bdSAndy Lutomirski static const struct vm_operations_struct legacy_special_mapping_vmops = { 3615a62c34bdSAndy Lutomirski .close = special_mapping_close, 3616a62c34bdSAndy Lutomirski .fault = special_mapping_fault, 3617a62c34bdSAndy Lutomirski }; 3618fa5dc22fSRoland McGrath 3619b3ec9f33SSouptick Joarder static vm_fault_t special_mapping_fault(struct vm_fault *vmf) 3620fa5dc22fSRoland McGrath { 362111bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 3622b1d0e4f5SNick Piggin pgoff_t pgoff; 3623fa5dc22fSRoland McGrath struct page **pages; 3624fa5dc22fSRoland McGrath 3625f872f540SAndy Lutomirski if (vma->vm_ops == &legacy_special_mapping_vmops) { 3626a62c34bdSAndy Lutomirski pages = vma->vm_private_data; 3627f872f540SAndy Lutomirski } else { 3628f872f540SAndy Lutomirski struct vm_special_mapping *sm = vma->vm_private_data; 3629f872f540SAndy Lutomirski 3630f872f540SAndy Lutomirski if (sm->fault) 363111bac800SDave Jiang return sm->fault(sm, vmf->vma, vmf); 3632f872f540SAndy Lutomirski 3633f872f540SAndy Lutomirski pages = sm->pages; 3634f872f540SAndy Lutomirski } 3635a62c34bdSAndy Lutomirski 36368a9cc3b5SOleg Nesterov for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) 3637b1d0e4f5SNick Piggin pgoff--; 3638fa5dc22fSRoland McGrath 3639fa5dc22fSRoland McGrath if (*pages) { 3640fa5dc22fSRoland McGrath struct page *page = *pages; 3641fa5dc22fSRoland McGrath get_page(page); 3642b1d0e4f5SNick Piggin vmf->page = page; 3643b1d0e4f5SNick Piggin return 0; 3644fa5dc22fSRoland McGrath } 3645fa5dc22fSRoland McGrath 3646b1d0e4f5SNick Piggin return VM_FAULT_SIGBUS; 3647fa5dc22fSRoland McGrath } 3648fa5dc22fSRoland McGrath 3649a62c34bdSAndy Lutomirski static struct vm_area_struct *__install_special_mapping( 3650a62c34bdSAndy Lutomirski struct mm_struct *mm, 3651fa5dc22fSRoland McGrath unsigned long addr, unsigned long len, 365227f28b97SChen Gang unsigned long vm_flags, void *priv, 365327f28b97SChen Gang const struct vm_operations_struct *ops) 3654fa5dc22fSRoland McGrath { 3655462e635eSTavis Ormandy int ret; 3656fa5dc22fSRoland McGrath struct vm_area_struct *vma; 3657fa5dc22fSRoland McGrath 3658d4af56c5SLiam R. Howlett validate_mm_mt(mm); 3659490fc053SLinus Torvalds vma = vm_area_alloc(mm); 3660fa5dc22fSRoland McGrath if (unlikely(vma == NULL)) 36613935ed6aSStefani Seibold return ERR_PTR(-ENOMEM); 3662fa5dc22fSRoland McGrath 3663fa5dc22fSRoland McGrath vma->vm_start = addr; 3664fa5dc22fSRoland McGrath vma->vm_end = addr + len; 3665fa5dc22fSRoland McGrath 3666d9104d1cSCyrill Gorcunov vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; 36671fc09228SHugh Dickins vma->vm_flags &= VM_LOCKED_CLEAR_MASK; 36683ed75eb8SColy Li vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 3669fa5dc22fSRoland McGrath 3670a62c34bdSAndy Lutomirski vma->vm_ops = ops; 3671a62c34bdSAndy Lutomirski vma->vm_private_data = priv; 3672fa5dc22fSRoland McGrath 3673462e635eSTavis Ormandy ret = insert_vm_struct(mm, vma); 3674462e635eSTavis Ormandy if (ret) 3675462e635eSTavis Ormandy goto out; 3676fa5dc22fSRoland McGrath 367784638335SKonstantin Khlebnikov vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); 3678fa5dc22fSRoland McGrath 3679cdd6c482SIngo Molnar perf_event_mmap(vma); 3680089dd79dSPeter Zijlstra 3681d4af56c5SLiam R. Howlett validate_mm_mt(mm); 36823935ed6aSStefani Seibold return vma; 3683462e635eSTavis Ormandy 3684462e635eSTavis Ormandy out: 36853928d4f5SLinus Torvalds vm_area_free(vma); 3686d4af56c5SLiam R. Howlett validate_mm_mt(mm); 36873935ed6aSStefani Seibold return ERR_PTR(ret); 36883935ed6aSStefani Seibold } 36893935ed6aSStefani Seibold 36902eefd878SDmitry Safonov bool vma_is_special_mapping(const struct vm_area_struct *vma, 36912eefd878SDmitry Safonov const struct vm_special_mapping *sm) 36922eefd878SDmitry Safonov { 36932eefd878SDmitry Safonov return vma->vm_private_data == sm && 36942eefd878SDmitry Safonov (vma->vm_ops == &special_mapping_vmops || 36952eefd878SDmitry Safonov vma->vm_ops == &legacy_special_mapping_vmops); 36962eefd878SDmitry Safonov } 36972eefd878SDmitry Safonov 3698a62c34bdSAndy Lutomirski /* 3699c1e8d7c6SMichel Lespinasse * Called with mm->mmap_lock held for writing. 3700a62c34bdSAndy Lutomirski * Insert a new vma covering the given region, with the given flags. 3701a62c34bdSAndy Lutomirski * Its pages are supplied by the given array of struct page *. 3702a62c34bdSAndy Lutomirski * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. 3703a62c34bdSAndy Lutomirski * The region past the last page supplied will always produce SIGBUS. 3704a62c34bdSAndy Lutomirski * The array pointer and the pages it points to are assumed to stay alive 3705a62c34bdSAndy Lutomirski * for as long as this mapping might exist. 3706a62c34bdSAndy Lutomirski */ 3707a62c34bdSAndy Lutomirski struct vm_area_struct *_install_special_mapping( 3708a62c34bdSAndy Lutomirski struct mm_struct *mm, 3709a62c34bdSAndy Lutomirski unsigned long addr, unsigned long len, 3710a62c34bdSAndy Lutomirski unsigned long vm_flags, const struct vm_special_mapping *spec) 3711a62c34bdSAndy Lutomirski { 371227f28b97SChen Gang return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, 371327f28b97SChen Gang &special_mapping_vmops); 3714a62c34bdSAndy Lutomirski } 3715a62c34bdSAndy Lutomirski 37163935ed6aSStefani Seibold int install_special_mapping(struct mm_struct *mm, 37173935ed6aSStefani Seibold unsigned long addr, unsigned long len, 37183935ed6aSStefani Seibold unsigned long vm_flags, struct page **pages) 37193935ed6aSStefani Seibold { 3720a62c34bdSAndy Lutomirski struct vm_area_struct *vma = __install_special_mapping( 372127f28b97SChen Gang mm, addr, len, vm_flags, (void *)pages, 372227f28b97SChen Gang &legacy_special_mapping_vmops); 37233935ed6aSStefani Seibold 372414bd5b45SDuan Jiong return PTR_ERR_OR_ZERO(vma); 3725fa5dc22fSRoland McGrath } 37267906d00cSAndrea Arcangeli 37277906d00cSAndrea Arcangeli static DEFINE_MUTEX(mm_all_locks_mutex); 37287906d00cSAndrea Arcangeli 3729454ed842SPeter Zijlstra static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) 37307906d00cSAndrea Arcangeli { 3731f808c13fSDavidlohr Bueso if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 37327906d00cSAndrea Arcangeli /* 37337906d00cSAndrea Arcangeli * The LSB of head.next can't change from under us 37347906d00cSAndrea Arcangeli * because we hold the mm_all_locks_mutex. 37357906d00cSAndrea Arcangeli */ 3736da1c55f1SMichel Lespinasse down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); 37377906d00cSAndrea Arcangeli /* 37387906d00cSAndrea Arcangeli * We can safely modify head.next after taking the 37395a505085SIngo Molnar * anon_vma->root->rwsem. If some other vma in this mm shares 37407906d00cSAndrea Arcangeli * the same anon_vma we won't take it again. 37417906d00cSAndrea Arcangeli * 37427906d00cSAndrea Arcangeli * No need of atomic instructions here, head.next 37437906d00cSAndrea Arcangeli * can't change from under us thanks to the 37445a505085SIngo Molnar * anon_vma->root->rwsem. 37457906d00cSAndrea Arcangeli */ 37467906d00cSAndrea Arcangeli if (__test_and_set_bit(0, (unsigned long *) 3747f808c13fSDavidlohr Bueso &anon_vma->root->rb_root.rb_root.rb_node)) 37487906d00cSAndrea Arcangeli BUG(); 37497906d00cSAndrea Arcangeli } 37507906d00cSAndrea Arcangeli } 37517906d00cSAndrea Arcangeli 3752454ed842SPeter Zijlstra static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) 37537906d00cSAndrea Arcangeli { 37547906d00cSAndrea Arcangeli if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 37557906d00cSAndrea Arcangeli /* 37567906d00cSAndrea Arcangeli * AS_MM_ALL_LOCKS can't change from under us because 37577906d00cSAndrea Arcangeli * we hold the mm_all_locks_mutex. 37587906d00cSAndrea Arcangeli * 37597906d00cSAndrea Arcangeli * Operations on ->flags have to be atomic because 37607906d00cSAndrea Arcangeli * even if AS_MM_ALL_LOCKS is stable thanks to the 37617906d00cSAndrea Arcangeli * mm_all_locks_mutex, there may be other cpus 37627906d00cSAndrea Arcangeli * changing other bitflags in parallel to us. 37637906d00cSAndrea Arcangeli */ 37647906d00cSAndrea Arcangeli if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) 37657906d00cSAndrea Arcangeli BUG(); 3766da1c55f1SMichel Lespinasse down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); 37677906d00cSAndrea Arcangeli } 37687906d00cSAndrea Arcangeli } 37697906d00cSAndrea Arcangeli 37707906d00cSAndrea Arcangeli /* 37717906d00cSAndrea Arcangeli * This operation locks against the VM for all pte/vma/mm related 37727906d00cSAndrea Arcangeli * operations that could ever happen on a certain mm. This includes 37737906d00cSAndrea Arcangeli * vmtruncate, try_to_unmap, and all page faults. 37747906d00cSAndrea Arcangeli * 3775c1e8d7c6SMichel Lespinasse * The caller must take the mmap_lock in write mode before calling 37767906d00cSAndrea Arcangeli * mm_take_all_locks(). The caller isn't allowed to release the 3777c1e8d7c6SMichel Lespinasse * mmap_lock until mm_drop_all_locks() returns. 37787906d00cSAndrea Arcangeli * 3779c1e8d7c6SMichel Lespinasse * mmap_lock in write mode is required in order to block all operations 37807906d00cSAndrea Arcangeli * that could modify pagetables and free pages without need of 378127ba0644SKirill A. Shutemov * altering the vma layout. It's also needed in write mode to avoid new 37827906d00cSAndrea Arcangeli * anon_vmas to be associated with existing vmas. 37837906d00cSAndrea Arcangeli * 37847906d00cSAndrea Arcangeli * A single task can't take more than one mm_take_all_locks() in a row 37857906d00cSAndrea Arcangeli * or it would deadlock. 37867906d00cSAndrea Arcangeli * 3787bf181b9fSMichel Lespinasse * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in 37887906d00cSAndrea Arcangeli * mapping->flags avoid to take the same lock twice, if more than one 37897906d00cSAndrea Arcangeli * vma in this mm is backed by the same anon_vma or address_space. 37907906d00cSAndrea Arcangeli * 379188f306b6SKirill A. Shutemov * We take locks in following order, accordingly to comment at beginning 379288f306b6SKirill A. Shutemov * of mm/rmap.c: 379388f306b6SKirill A. Shutemov * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for 379488f306b6SKirill A. Shutemov * hugetlb mapping); 379588f306b6SKirill A. Shutemov * - all i_mmap_rwsem locks; 379688f306b6SKirill A. Shutemov * - all anon_vma->rwseml 379788f306b6SKirill A. Shutemov * 379888f306b6SKirill A. Shutemov * We can take all locks within these types randomly because the VM code 379988f306b6SKirill A. Shutemov * doesn't nest them and we protected from parallel mm_take_all_locks() by 380088f306b6SKirill A. Shutemov * mm_all_locks_mutex. 38017906d00cSAndrea Arcangeli * 38027906d00cSAndrea Arcangeli * mm_take_all_locks() and mm_drop_all_locks are expensive operations 38037906d00cSAndrea Arcangeli * that may have to take thousand of locks. 38047906d00cSAndrea Arcangeli * 38057906d00cSAndrea Arcangeli * mm_take_all_locks() can fail if it's interrupted by signals. 38067906d00cSAndrea Arcangeli */ 38077906d00cSAndrea Arcangeli int mm_take_all_locks(struct mm_struct *mm) 38087906d00cSAndrea Arcangeli { 38097906d00cSAndrea Arcangeli struct vm_area_struct *vma; 38105beb4930SRik van Riel struct anon_vma_chain *avc; 38117906d00cSAndrea Arcangeli 3812325bca1fSRolf Eike Beer mmap_assert_write_locked(mm); 38137906d00cSAndrea Arcangeli 38147906d00cSAndrea Arcangeli mutex_lock(&mm_all_locks_mutex); 38157906d00cSAndrea Arcangeli 38167906d00cSAndrea Arcangeli for (vma = mm->mmap; vma; vma = vma->vm_next) { 38177906d00cSAndrea Arcangeli if (signal_pending(current)) 38187906d00cSAndrea Arcangeli goto out_unlock; 381988f306b6SKirill A. Shutemov if (vma->vm_file && vma->vm_file->f_mapping && 382088f306b6SKirill A. Shutemov is_vm_hugetlb_page(vma)) 382188f306b6SKirill A. Shutemov vm_lock_mapping(mm, vma->vm_file->f_mapping); 382288f306b6SKirill A. Shutemov } 382388f306b6SKirill A. Shutemov 382488f306b6SKirill A. Shutemov for (vma = mm->mmap; vma; vma = vma->vm_next) { 382588f306b6SKirill A. Shutemov if (signal_pending(current)) 382688f306b6SKirill A. Shutemov goto out_unlock; 382788f306b6SKirill A. Shutemov if (vma->vm_file && vma->vm_file->f_mapping && 382888f306b6SKirill A. Shutemov !is_vm_hugetlb_page(vma)) 3829454ed842SPeter Zijlstra vm_lock_mapping(mm, vma->vm_file->f_mapping); 38307906d00cSAndrea Arcangeli } 38317cd5a02fSPeter Zijlstra 38327cd5a02fSPeter Zijlstra for (vma = mm->mmap; vma; vma = vma->vm_next) { 38337cd5a02fSPeter Zijlstra if (signal_pending(current)) 38347cd5a02fSPeter Zijlstra goto out_unlock; 38357cd5a02fSPeter Zijlstra if (vma->anon_vma) 38365beb4930SRik van Riel list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 38375beb4930SRik van Riel vm_lock_anon_vma(mm, avc->anon_vma); 38387cd5a02fSPeter Zijlstra } 38397cd5a02fSPeter Zijlstra 3840584cff54SKautuk Consul return 0; 38417906d00cSAndrea Arcangeli 38427906d00cSAndrea Arcangeli out_unlock: 38437906d00cSAndrea Arcangeli mm_drop_all_locks(mm); 3844584cff54SKautuk Consul return -EINTR; 38457906d00cSAndrea Arcangeli } 38467906d00cSAndrea Arcangeli 38477906d00cSAndrea Arcangeli static void vm_unlock_anon_vma(struct anon_vma *anon_vma) 38487906d00cSAndrea Arcangeli { 3849f808c13fSDavidlohr Bueso if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 38507906d00cSAndrea Arcangeli /* 38517906d00cSAndrea Arcangeli * The LSB of head.next can't change to 0 from under 38527906d00cSAndrea Arcangeli * us because we hold the mm_all_locks_mutex. 38537906d00cSAndrea Arcangeli * 38547906d00cSAndrea Arcangeli * We must however clear the bitflag before unlocking 3855bf181b9fSMichel Lespinasse * the vma so the users using the anon_vma->rb_root will 38567906d00cSAndrea Arcangeli * never see our bitflag. 38577906d00cSAndrea Arcangeli * 38587906d00cSAndrea Arcangeli * No need of atomic instructions here, head.next 38597906d00cSAndrea Arcangeli * can't change from under us until we release the 38605a505085SIngo Molnar * anon_vma->root->rwsem. 38617906d00cSAndrea Arcangeli */ 38627906d00cSAndrea Arcangeli if (!__test_and_clear_bit(0, (unsigned long *) 3863f808c13fSDavidlohr Bueso &anon_vma->root->rb_root.rb_root.rb_node)) 38647906d00cSAndrea Arcangeli BUG(); 386508b52706SKonstantin Khlebnikov anon_vma_unlock_write(anon_vma); 38667906d00cSAndrea Arcangeli } 38677906d00cSAndrea Arcangeli } 38687906d00cSAndrea Arcangeli 38697906d00cSAndrea Arcangeli static void vm_unlock_mapping(struct address_space *mapping) 38707906d00cSAndrea Arcangeli { 38717906d00cSAndrea Arcangeli if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 38727906d00cSAndrea Arcangeli /* 38737906d00cSAndrea Arcangeli * AS_MM_ALL_LOCKS can't change to 0 from under us 38747906d00cSAndrea Arcangeli * because we hold the mm_all_locks_mutex. 38757906d00cSAndrea Arcangeli */ 387683cde9e8SDavidlohr Bueso i_mmap_unlock_write(mapping); 38777906d00cSAndrea Arcangeli if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 38787906d00cSAndrea Arcangeli &mapping->flags)) 38797906d00cSAndrea Arcangeli BUG(); 38807906d00cSAndrea Arcangeli } 38817906d00cSAndrea Arcangeli } 38827906d00cSAndrea Arcangeli 38837906d00cSAndrea Arcangeli /* 3884c1e8d7c6SMichel Lespinasse * The mmap_lock cannot be released by the caller until 38857906d00cSAndrea Arcangeli * mm_drop_all_locks() returns. 38867906d00cSAndrea Arcangeli */ 38877906d00cSAndrea Arcangeli void mm_drop_all_locks(struct mm_struct *mm) 38887906d00cSAndrea Arcangeli { 38897906d00cSAndrea Arcangeli struct vm_area_struct *vma; 38905beb4930SRik van Riel struct anon_vma_chain *avc; 38917906d00cSAndrea Arcangeli 3892325bca1fSRolf Eike Beer mmap_assert_write_locked(mm); 38937906d00cSAndrea Arcangeli BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); 38947906d00cSAndrea Arcangeli 38957906d00cSAndrea Arcangeli for (vma = mm->mmap; vma; vma = vma->vm_next) { 38967906d00cSAndrea Arcangeli if (vma->anon_vma) 38975beb4930SRik van Riel list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 38985beb4930SRik van Riel vm_unlock_anon_vma(avc->anon_vma); 38997906d00cSAndrea Arcangeli if (vma->vm_file && vma->vm_file->f_mapping) 39007906d00cSAndrea Arcangeli vm_unlock_mapping(vma->vm_file->f_mapping); 39017906d00cSAndrea Arcangeli } 39027906d00cSAndrea Arcangeli 39037906d00cSAndrea Arcangeli mutex_unlock(&mm_all_locks_mutex); 39047906d00cSAndrea Arcangeli } 39058feae131SDavid Howells 39068feae131SDavid Howells /* 39073edf41d8Sseokhoon.yoon * initialise the percpu counter for VM 39088feae131SDavid Howells */ 39098feae131SDavid Howells void __init mmap_init(void) 39108feae131SDavid Howells { 391100a62ce9SKOSAKI Motohiro int ret; 391200a62ce9SKOSAKI Motohiro 3913908c7f19STejun Heo ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); 391400a62ce9SKOSAKI Motohiro VM_BUG_ON(ret); 39158feae131SDavid Howells } 3916c9b1d098SAndrew Shewmaker 3917c9b1d098SAndrew Shewmaker /* 3918c9b1d098SAndrew Shewmaker * Initialise sysctl_user_reserve_kbytes. 3919c9b1d098SAndrew Shewmaker * 3920c9b1d098SAndrew Shewmaker * This is intended to prevent a user from starting a single memory hogging 3921c9b1d098SAndrew Shewmaker * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER 3922c9b1d098SAndrew Shewmaker * mode. 3923c9b1d098SAndrew Shewmaker * 3924c9b1d098SAndrew Shewmaker * The default value is min(3% of free memory, 128MB) 3925c9b1d098SAndrew Shewmaker * 128MB is enough to recover with sshd/login, bash, and top/kill. 3926c9b1d098SAndrew Shewmaker */ 39271640879aSAndrew Shewmaker static int init_user_reserve(void) 3928c9b1d098SAndrew Shewmaker { 3929c9b1d098SAndrew Shewmaker unsigned long free_kbytes; 3930c9b1d098SAndrew Shewmaker 3931c41f012aSMichal Hocko free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); 3932c9b1d098SAndrew Shewmaker 3933c9b1d098SAndrew Shewmaker sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); 3934c9b1d098SAndrew Shewmaker return 0; 3935c9b1d098SAndrew Shewmaker } 3936a64fb3cdSPaul Gortmaker subsys_initcall(init_user_reserve); 39374eeab4f5SAndrew Shewmaker 39384eeab4f5SAndrew Shewmaker /* 39394eeab4f5SAndrew Shewmaker * Initialise sysctl_admin_reserve_kbytes. 39404eeab4f5SAndrew Shewmaker * 39414eeab4f5SAndrew Shewmaker * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin 39424eeab4f5SAndrew Shewmaker * to log in and kill a memory hogging process. 39434eeab4f5SAndrew Shewmaker * 39444eeab4f5SAndrew Shewmaker * Systems with more than 256MB will reserve 8MB, enough to recover 39454eeab4f5SAndrew Shewmaker * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will 39464eeab4f5SAndrew Shewmaker * only reserve 3% of free pages by default. 39474eeab4f5SAndrew Shewmaker */ 39481640879aSAndrew Shewmaker static int init_admin_reserve(void) 39494eeab4f5SAndrew Shewmaker { 39504eeab4f5SAndrew Shewmaker unsigned long free_kbytes; 39514eeab4f5SAndrew Shewmaker 3952c41f012aSMichal Hocko free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); 39534eeab4f5SAndrew Shewmaker 39544eeab4f5SAndrew Shewmaker sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); 39554eeab4f5SAndrew Shewmaker return 0; 39564eeab4f5SAndrew Shewmaker } 3957a64fb3cdSPaul Gortmaker subsys_initcall(init_admin_reserve); 39581640879aSAndrew Shewmaker 39591640879aSAndrew Shewmaker /* 39601640879aSAndrew Shewmaker * Reinititalise user and admin reserves if memory is added or removed. 39611640879aSAndrew Shewmaker * 39621640879aSAndrew Shewmaker * The default user reserve max is 128MB, and the default max for the 39631640879aSAndrew Shewmaker * admin reserve is 8MB. These are usually, but not always, enough to 39641640879aSAndrew Shewmaker * enable recovery from a memory hogging process using login/sshd, a shell, 39651640879aSAndrew Shewmaker * and tools like top. It may make sense to increase or even disable the 39661640879aSAndrew Shewmaker * reserve depending on the existence of swap or variations in the recovery 39671640879aSAndrew Shewmaker * tools. So, the admin may have changed them. 39681640879aSAndrew Shewmaker * 39691640879aSAndrew Shewmaker * If memory is added and the reserves have been eliminated or increased above 39701640879aSAndrew Shewmaker * the default max, then we'll trust the admin. 39711640879aSAndrew Shewmaker * 39721640879aSAndrew Shewmaker * If memory is removed and there isn't enough free memory, then we 39731640879aSAndrew Shewmaker * need to reset the reserves. 39741640879aSAndrew Shewmaker * 39751640879aSAndrew Shewmaker * Otherwise keep the reserve set by the admin. 39761640879aSAndrew Shewmaker */ 39771640879aSAndrew Shewmaker static int reserve_mem_notifier(struct notifier_block *nb, 39781640879aSAndrew Shewmaker unsigned long action, void *data) 39791640879aSAndrew Shewmaker { 39801640879aSAndrew Shewmaker unsigned long tmp, free_kbytes; 39811640879aSAndrew Shewmaker 39821640879aSAndrew Shewmaker switch (action) { 39831640879aSAndrew Shewmaker case MEM_ONLINE: 39841640879aSAndrew Shewmaker /* Default max is 128MB. Leave alone if modified by operator. */ 39851640879aSAndrew Shewmaker tmp = sysctl_user_reserve_kbytes; 39861640879aSAndrew Shewmaker if (0 < tmp && tmp < (1UL << 17)) 39871640879aSAndrew Shewmaker init_user_reserve(); 39881640879aSAndrew Shewmaker 39891640879aSAndrew Shewmaker /* Default max is 8MB. Leave alone if modified by operator. */ 39901640879aSAndrew Shewmaker tmp = sysctl_admin_reserve_kbytes; 39911640879aSAndrew Shewmaker if (0 < tmp && tmp < (1UL << 13)) 39921640879aSAndrew Shewmaker init_admin_reserve(); 39931640879aSAndrew Shewmaker 39941640879aSAndrew Shewmaker break; 39951640879aSAndrew Shewmaker case MEM_OFFLINE: 3996c41f012aSMichal Hocko free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); 39971640879aSAndrew Shewmaker 39981640879aSAndrew Shewmaker if (sysctl_user_reserve_kbytes > free_kbytes) { 39991640879aSAndrew Shewmaker init_user_reserve(); 40001640879aSAndrew Shewmaker pr_info("vm.user_reserve_kbytes reset to %lu\n", 40011640879aSAndrew Shewmaker sysctl_user_reserve_kbytes); 40021640879aSAndrew Shewmaker } 40031640879aSAndrew Shewmaker 40041640879aSAndrew Shewmaker if (sysctl_admin_reserve_kbytes > free_kbytes) { 40051640879aSAndrew Shewmaker init_admin_reserve(); 40061640879aSAndrew Shewmaker pr_info("vm.admin_reserve_kbytes reset to %lu\n", 40071640879aSAndrew Shewmaker sysctl_admin_reserve_kbytes); 40081640879aSAndrew Shewmaker } 40091640879aSAndrew Shewmaker break; 40101640879aSAndrew Shewmaker default: 40111640879aSAndrew Shewmaker break; 40121640879aSAndrew Shewmaker } 40131640879aSAndrew Shewmaker return NOTIFY_OK; 40141640879aSAndrew Shewmaker } 40151640879aSAndrew Shewmaker 40161640879aSAndrew Shewmaker static struct notifier_block reserve_mem_nb = { 40171640879aSAndrew Shewmaker .notifier_call = reserve_mem_notifier, 40181640879aSAndrew Shewmaker }; 40191640879aSAndrew Shewmaker 40201640879aSAndrew Shewmaker static int __meminit init_reserve_notifier(void) 40211640879aSAndrew Shewmaker { 40221640879aSAndrew Shewmaker if (register_hotmemory_notifier(&reserve_mem_nb)) 4023b1de0d13SMitchel Humpherys pr_err("Failed registering memory add/remove notifier for admin reserve\n"); 40241640879aSAndrew Shewmaker 40251640879aSAndrew Shewmaker return 0; 40261640879aSAndrew Shewmaker } 4027a64fb3cdSPaul Gortmaker subsys_initcall(init_reserve_notifier); 4028