1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * mm/mremap.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * (C) Copyright 1996 Linus Torvalds 61da177e4SLinus Torvalds * 7046c6884SAlan Cox * Address space accounting code <alan@lxorguk.ukuu.org.uk> 81da177e4SLinus Torvalds * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/mm.h> 121da177e4SLinus Torvalds #include <linux/hugetlb.h> 131da177e4SLinus Torvalds #include <linux/shm.h> 141ff82995SHugh Dickins #include <linux/ksm.h> 151da177e4SLinus Torvalds #include <linux/mman.h> 161da177e4SLinus Torvalds #include <linux/swap.h> 17c59ede7bSRandy.Dunlap #include <linux/capability.h> 181da177e4SLinus Torvalds #include <linux/fs.h> 196dec97dcSCyrill Gorcunov #include <linux/swapops.h> 201da177e4SLinus Torvalds #include <linux/highmem.h> 211da177e4SLinus Torvalds #include <linux/security.h> 221da177e4SLinus Torvalds #include <linux/syscalls.h> 23cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 242581d202SPaul McQuade #include <linux/uaccess.h> 254abad2caSLaurent Dufour #include <linux/mm-arch-hooks.h> 2672f87654SPavel Emelyanov #include <linux/userfaultfd_k.h> 271da177e4SLinus Torvalds 281da177e4SLinus Torvalds #include <asm/cacheflush.h> 291da177e4SLinus Torvalds #include <asm/tlbflush.h> 301da177e4SLinus Torvalds 31ba470de4SRik van Riel #include "internal.h" 32ba470de4SRik van Riel 337be7a546SHugh Dickins static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) 341da177e4SLinus Torvalds { 351da177e4SLinus Torvalds pgd_t *pgd; 36c2febafcSKirill A. Shutemov p4d_t *p4d; 371da177e4SLinus Torvalds pud_t *pud; 381da177e4SLinus Torvalds pmd_t *pmd; 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 411da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 421da177e4SLinus Torvalds return NULL; 431da177e4SLinus Torvalds 44c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 45c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 46c2febafcSKirill A. Shutemov return NULL; 47c2febafcSKirill A. Shutemov 48c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 491da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 501da177e4SLinus Torvalds return NULL; 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 5337a1c49aSAndrea Arcangeli if (pmd_none(*pmd)) 541da177e4SLinus Torvalds return NULL; 551da177e4SLinus Torvalds 567be7a546SHugh Dickins return pmd; 571da177e4SLinus Torvalds } 581da177e4SLinus Torvalds 598ac1f832SAndrea Arcangeli static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, 608ac1f832SAndrea Arcangeli unsigned long addr) 611da177e4SLinus Torvalds { 621da177e4SLinus Torvalds pgd_t *pgd; 63c2febafcSKirill A. Shutemov p4d_t *p4d; 641da177e4SLinus Torvalds pud_t *pud; 65c74df32cSHugh Dickins pmd_t *pmd; 661da177e4SLinus Torvalds 671da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 68c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, addr); 69c2febafcSKirill A. Shutemov if (!p4d) 70c2febafcSKirill A. Shutemov return NULL; 71c2febafcSKirill A. Shutemov pud = pud_alloc(mm, p4d, addr); 721da177e4SLinus Torvalds if (!pud) 73c74df32cSHugh Dickins return NULL; 747be7a546SHugh Dickins 751da177e4SLinus Torvalds pmd = pmd_alloc(mm, pud, addr); 7657a8f0cdSHugh Dickins if (!pmd) 77c74df32cSHugh Dickins return NULL; 787be7a546SHugh Dickins 798ac1f832SAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*pmd)); 80c74df32cSHugh Dickins 817be7a546SHugh Dickins return pmd; 821da177e4SLinus Torvalds } 831da177e4SLinus Torvalds 841d069b7dSHugh Dickins static void take_rmap_locks(struct vm_area_struct *vma) 851d069b7dSHugh Dickins { 861d069b7dSHugh Dickins if (vma->vm_file) 871d069b7dSHugh Dickins i_mmap_lock_write(vma->vm_file->f_mapping); 881d069b7dSHugh Dickins if (vma->anon_vma) 891d069b7dSHugh Dickins anon_vma_lock_write(vma->anon_vma); 901d069b7dSHugh Dickins } 911d069b7dSHugh Dickins 921d069b7dSHugh Dickins static void drop_rmap_locks(struct vm_area_struct *vma) 931d069b7dSHugh Dickins { 941d069b7dSHugh Dickins if (vma->anon_vma) 951d069b7dSHugh Dickins anon_vma_unlock_write(vma->anon_vma); 961d069b7dSHugh Dickins if (vma->vm_file) 971d069b7dSHugh Dickins i_mmap_unlock_write(vma->vm_file->f_mapping); 981d069b7dSHugh Dickins } 991d069b7dSHugh Dickins 1006dec97dcSCyrill Gorcunov static pte_t move_soft_dirty_pte(pte_t pte) 1016dec97dcSCyrill Gorcunov { 1026dec97dcSCyrill Gorcunov /* 1036dec97dcSCyrill Gorcunov * Set soft dirty bit so we can notice 1046dec97dcSCyrill Gorcunov * in userspace the ptes were moved. 1056dec97dcSCyrill Gorcunov */ 1066dec97dcSCyrill Gorcunov #ifdef CONFIG_MEM_SOFT_DIRTY 1076dec97dcSCyrill Gorcunov if (pte_present(pte)) 1086dec97dcSCyrill Gorcunov pte = pte_mksoft_dirty(pte); 1096dec97dcSCyrill Gorcunov else if (is_swap_pte(pte)) 1106dec97dcSCyrill Gorcunov pte = pte_swp_mksoft_dirty(pte); 1116dec97dcSCyrill Gorcunov #endif 1126dec97dcSCyrill Gorcunov return pte; 1136dec97dcSCyrill Gorcunov } 1146dec97dcSCyrill Gorcunov 1157be7a546SHugh Dickins static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, 1167be7a546SHugh Dickins unsigned long old_addr, unsigned long old_end, 1177be7a546SHugh Dickins struct vm_area_struct *new_vma, pmd_t *new_pmd, 118eb66ae03SLinus Torvalds unsigned long new_addr, bool need_rmap_locks) 1191da177e4SLinus Torvalds { 1201da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 1217be7a546SHugh Dickins pte_t *old_pte, *new_pte, pte; 1224c21e2f2SHugh Dickins spinlock_t *old_ptl, *new_ptl; 1235d190420SAaron Lu bool force_flush = false; 1245d190420SAaron Lu unsigned long len = old_end - old_addr; 1251da177e4SLinus Torvalds 1261da177e4SLinus Torvalds /* 127c8c06efaSDavidlohr Bueso * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma 12838a76013SMichel Lespinasse * locks to ensure that rmap will always observe either the old or the 12938a76013SMichel Lespinasse * new ptes. This is the easiest way to avoid races with 13038a76013SMichel Lespinasse * truncate_pagecache(), page migration, etc... 13138a76013SMichel Lespinasse * 13238a76013SMichel Lespinasse * When need_rmap_locks is false, we use other ways to avoid 13338a76013SMichel Lespinasse * such races: 13438a76013SMichel Lespinasse * 13538a76013SMichel Lespinasse * - During exec() shift_arg_pages(), we use a specially tagged vma 136222100eeSAnshuman Khandual * which rmap call sites look for using vma_is_temporary_stack(). 13738a76013SMichel Lespinasse * 13838a76013SMichel Lespinasse * - During mremap(), new_vma is often known to be placed after vma 13938a76013SMichel Lespinasse * in rmap traversal order. This ensures rmap will always observe 14038a76013SMichel Lespinasse * either the old pte, or the new pte, or both (the page table locks 14138a76013SMichel Lespinasse * serialize access to individual ptes, but only rmap traversal 14238a76013SMichel Lespinasse * order guarantees that we won't miss both the old and new ptes). 1431da177e4SLinus Torvalds */ 1441d069b7dSHugh Dickins if (need_rmap_locks) 1451d069b7dSHugh Dickins take_rmap_locks(vma); 1461da177e4SLinus Torvalds 1474c21e2f2SHugh Dickins /* 1484c21e2f2SHugh Dickins * We don't have to worry about the ordering of src and dst 149c1e8d7c6SMichel Lespinasse * pte locks because exclusive mmap_lock prevents deadlock. 1504c21e2f2SHugh Dickins */ 151c74df32cSHugh Dickins old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); 152ece0e2b6SPeter Zijlstra new_pte = pte_offset_map(new_pmd, new_addr); 1534c21e2f2SHugh Dickins new_ptl = pte_lockptr(mm, new_pmd); 1544c21e2f2SHugh Dickins if (new_ptl != old_ptl) 155f20dc5f7SIngo Molnar spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 1563ea27719SMel Gorman flush_tlb_batched_pending(vma->vm_mm); 1576606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 1588b1f3124SNick Piggin 1597be7a546SHugh Dickins for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, 1607be7a546SHugh Dickins new_pte++, new_addr += PAGE_SIZE) { 1617be7a546SHugh Dickins if (pte_none(*old_pte)) 1627be7a546SHugh Dickins continue; 1635d190420SAaron Lu 1647b6efc2bSAndrea Arcangeli pte = ptep_get_and_clear(mm, old_addr, old_pte); 165a2ce2666SAaron Lu /* 166eb66ae03SLinus Torvalds * If we are remapping a valid PTE, make sure 167a2ce2666SAaron Lu * to flush TLB before we drop the PTL for the 168eb66ae03SLinus Torvalds * PTE. 169a2ce2666SAaron Lu * 170eb66ae03SLinus Torvalds * NOTE! Both old and new PTL matter: the old one 171eb66ae03SLinus Torvalds * for racing with page_mkclean(), the new one to 172eb66ae03SLinus Torvalds * make sure the physical page stays valid until 173eb66ae03SLinus Torvalds * the TLB entry for the old mapping has been 174eb66ae03SLinus Torvalds * flushed. 175a2ce2666SAaron Lu */ 176eb66ae03SLinus Torvalds if (pte_present(pte)) 177a2ce2666SAaron Lu force_flush = true; 1787be7a546SHugh Dickins pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 1796dec97dcSCyrill Gorcunov pte = move_soft_dirty_pte(pte); 1806dec97dcSCyrill Gorcunov set_pte_at(mm, new_addr, new_pte, pte); 1811da177e4SLinus Torvalds } 1827be7a546SHugh Dickins 1836606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 184eb66ae03SLinus Torvalds if (force_flush) 185eb66ae03SLinus Torvalds flush_tlb_range(vma, old_end - len, old_end); 1864c21e2f2SHugh Dickins if (new_ptl != old_ptl) 1874c21e2f2SHugh Dickins spin_unlock(new_ptl); 188ece0e2b6SPeter Zijlstra pte_unmap(new_pte - 1); 189c74df32cSHugh Dickins pte_unmap_unlock(old_pte - 1, old_ptl); 1901d069b7dSHugh Dickins if (need_rmap_locks) 1911d069b7dSHugh Dickins drop_rmap_locks(vma); 1921da177e4SLinus Torvalds } 1931da177e4SLinus Torvalds 1942c91bd4aSJoel Fernandes (Google) #ifdef CONFIG_HAVE_MOVE_PMD 1952c91bd4aSJoel Fernandes (Google) static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, 196*b8aa9d9dSWei Yang unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) 1972c91bd4aSJoel Fernandes (Google) { 1982c91bd4aSJoel Fernandes (Google) spinlock_t *old_ptl, *new_ptl; 1992c91bd4aSJoel Fernandes (Google) struct mm_struct *mm = vma->vm_mm; 2002c91bd4aSJoel Fernandes (Google) pmd_t pmd; 2012c91bd4aSJoel Fernandes (Google) 202*b8aa9d9dSWei Yang if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK)) 2032c91bd4aSJoel Fernandes (Google) return false; 2042c91bd4aSJoel Fernandes (Google) 2052c91bd4aSJoel Fernandes (Google) /* 2062c91bd4aSJoel Fernandes (Google) * The destination pmd shouldn't be established, free_pgtables() 207f81fdd0cSLinus Torvalds * should have released it. 208f81fdd0cSLinus Torvalds * 209f81fdd0cSLinus Torvalds * However, there's a case during execve() where we use mremap 210f81fdd0cSLinus Torvalds * to move the initial stack, and in that case the target area 211f81fdd0cSLinus Torvalds * may overlap the source area (always moving down). 212f81fdd0cSLinus Torvalds * 213f81fdd0cSLinus Torvalds * If everything is PMD-aligned, that works fine, as moving 214f81fdd0cSLinus Torvalds * each pmd down will clear the source pmd. But if we first 215f81fdd0cSLinus Torvalds * have a few 4kB-only pages that get moved down, and then 216f81fdd0cSLinus Torvalds * hit the "now the rest is PMD-aligned, let's do everything 217f81fdd0cSLinus Torvalds * one pmd at a time", we will still have the old (now empty 218f81fdd0cSLinus Torvalds * of any 4kB pages, but still there) PMD in the page table 219f81fdd0cSLinus Torvalds * tree. 220f81fdd0cSLinus Torvalds * 221f81fdd0cSLinus Torvalds * Warn on it once - because we really should try to figure 222f81fdd0cSLinus Torvalds * out how to do this better - but then say "I won't move 223f81fdd0cSLinus Torvalds * this pmd". 224f81fdd0cSLinus Torvalds * 225f81fdd0cSLinus Torvalds * One alternative might be to just unmap the target pmd at 226f81fdd0cSLinus Torvalds * this point, and verify that it really is empty. We'll see. 2272c91bd4aSJoel Fernandes (Google) */ 228f81fdd0cSLinus Torvalds if (WARN_ON_ONCE(!pmd_none(*new_pmd))) 2292c91bd4aSJoel Fernandes (Google) return false; 2302c91bd4aSJoel Fernandes (Google) 2312c91bd4aSJoel Fernandes (Google) /* 2322c91bd4aSJoel Fernandes (Google) * We don't have to worry about the ordering of src and dst 233c1e8d7c6SMichel Lespinasse * ptlocks because exclusive mmap_lock prevents deadlock. 2342c91bd4aSJoel Fernandes (Google) */ 2352c91bd4aSJoel Fernandes (Google) old_ptl = pmd_lock(vma->vm_mm, old_pmd); 2362c91bd4aSJoel Fernandes (Google) new_ptl = pmd_lockptr(mm, new_pmd); 2372c91bd4aSJoel Fernandes (Google) if (new_ptl != old_ptl) 2382c91bd4aSJoel Fernandes (Google) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 2392c91bd4aSJoel Fernandes (Google) 2402c91bd4aSJoel Fernandes (Google) /* Clear the pmd */ 2412c91bd4aSJoel Fernandes (Google) pmd = *old_pmd; 2422c91bd4aSJoel Fernandes (Google) pmd_clear(old_pmd); 2432c91bd4aSJoel Fernandes (Google) 2442c91bd4aSJoel Fernandes (Google) VM_BUG_ON(!pmd_none(*new_pmd)); 2452c91bd4aSJoel Fernandes (Google) 2462c91bd4aSJoel Fernandes (Google) /* Set the new pmd */ 2472c91bd4aSJoel Fernandes (Google) set_pmd_at(mm, new_addr, new_pmd, pmd); 2482c91bd4aSJoel Fernandes (Google) flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 2492c91bd4aSJoel Fernandes (Google) if (new_ptl != old_ptl) 2502c91bd4aSJoel Fernandes (Google) spin_unlock(new_ptl); 2512c91bd4aSJoel Fernandes (Google) spin_unlock(old_ptl); 2522c91bd4aSJoel Fernandes (Google) 2532c91bd4aSJoel Fernandes (Google) return true; 2542c91bd4aSJoel Fernandes (Google) } 2552c91bd4aSJoel Fernandes (Google) #endif 2562c91bd4aSJoel Fernandes (Google) 257b6a2fea3SOllie Wild unsigned long move_page_tables(struct vm_area_struct *vma, 2581da177e4SLinus Torvalds unsigned long old_addr, struct vm_area_struct *new_vma, 25938a76013SMichel Lespinasse unsigned long new_addr, unsigned long len, 26038a76013SMichel Lespinasse bool need_rmap_locks) 2611da177e4SLinus Torvalds { 2627be7a546SHugh Dickins unsigned long extent, next, old_end; 263ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 2647be7a546SHugh Dickins pmd_t *old_pmd, *new_pmd; 2651da177e4SLinus Torvalds 2667be7a546SHugh Dickins old_end = old_addr + len; 2677be7a546SHugh Dickins flush_cache_range(vma, old_addr, old_end); 2681da177e4SLinus Torvalds 2696f4f13e8SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, 2706f4f13e8SJérôme Glisse old_addr, old_end); 271ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 2727b6efc2bSAndrea Arcangeli 2737be7a546SHugh Dickins for (; old_addr < old_end; old_addr += extent, new_addr += extent) { 2741da177e4SLinus Torvalds cond_resched(); 2757be7a546SHugh Dickins next = (old_addr + PMD_SIZE) & PMD_MASK; 276ebed4846SAndrea Arcangeli /* even if next overflowed, extent below will be ok */ 2777be7a546SHugh Dickins extent = next - old_addr; 278ebed4846SAndrea Arcangeli if (extent > old_end - old_addr) 279ebed4846SAndrea Arcangeli extent = old_end - old_addr; 2807be7a546SHugh Dickins old_pmd = get_old_pmd(vma->vm_mm, old_addr); 2817be7a546SHugh Dickins if (!old_pmd) 2827be7a546SHugh Dickins continue; 2838ac1f832SAndrea Arcangeli new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); 2847be7a546SHugh Dickins if (!new_pmd) 2857be7a546SHugh Dickins break; 2865bfea2d9SFan Yang if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) { 287dd18dbc2SKirill A. Shutemov if (extent == HPAGE_PMD_SIZE) { 2884b471e88SKirill A. Shutemov bool moved; 289dd18dbc2SKirill A. Shutemov /* See comment in move_ptes() */ 290dd18dbc2SKirill A. Shutemov if (need_rmap_locks) 2911d069b7dSHugh Dickins take_rmap_locks(vma); 292bf8616d5SHugh Dickins moved = move_huge_pmd(vma, old_addr, new_addr, 293*b8aa9d9dSWei Yang old_pmd, new_pmd); 294dd18dbc2SKirill A. Shutemov if (need_rmap_locks) 2951d069b7dSHugh Dickins drop_rmap_locks(vma); 2965d190420SAaron Lu if (moved) 29737a1c49aSAndrea Arcangeli continue; 29837a1c49aSAndrea Arcangeli } 2994b471e88SKirill A. Shutemov split_huge_pmd(vma, old_pmd, old_addr); 300337d9abfSNaoya Horiguchi if (pmd_trans_unstable(old_pmd)) 3016b9116a6SKirill A. Shutemov continue; 3022c91bd4aSJoel Fernandes (Google) } else if (extent == PMD_SIZE) { 3032c91bd4aSJoel Fernandes (Google) #ifdef CONFIG_HAVE_MOVE_PMD 3042c91bd4aSJoel Fernandes (Google) /* 3052c91bd4aSJoel Fernandes (Google) * If the extent is PMD-sized, try to speed the move by 3062c91bd4aSJoel Fernandes (Google) * moving at the PMD level if possible. 3072c91bd4aSJoel Fernandes (Google) */ 3082c91bd4aSJoel Fernandes (Google) bool moved; 3092c91bd4aSJoel Fernandes (Google) 3102c91bd4aSJoel Fernandes (Google) if (need_rmap_locks) 3112c91bd4aSJoel Fernandes (Google) take_rmap_locks(vma); 3122c91bd4aSJoel Fernandes (Google) moved = move_normal_pmd(vma, old_addr, new_addr, 313*b8aa9d9dSWei Yang old_pmd, new_pmd); 3142c91bd4aSJoel Fernandes (Google) if (need_rmap_locks) 3152c91bd4aSJoel Fernandes (Google) drop_rmap_locks(vma); 3162c91bd4aSJoel Fernandes (Google) if (moved) 3172c91bd4aSJoel Fernandes (Google) continue; 3182c91bd4aSJoel Fernandes (Google) #endif 31937a1c49aSAndrea Arcangeli } 3202c91bd4aSJoel Fernandes (Google) 3214cf58924SJoel Fernandes (Google) if (pte_alloc(new_vma->vm_mm, new_pmd)) 32237a1c49aSAndrea Arcangeli break; 3237be7a546SHugh Dickins next = (new_addr + PMD_SIZE) & PMD_MASK; 3247be7a546SHugh Dickins if (extent > next - new_addr) 3257be7a546SHugh Dickins extent = next - new_addr; 3265d190420SAaron Lu move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, 327eb66ae03SLinus Torvalds new_pmd, new_addr, need_rmap_locks); 3281da177e4SLinus Torvalds } 3297b6efc2bSAndrea Arcangeli 330ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 3317be7a546SHugh Dickins 3327be7a546SHugh Dickins return len + old_addr - old_end; /* how much done */ 3331da177e4SLinus Torvalds } 3341da177e4SLinus Torvalds 3351da177e4SLinus Torvalds static unsigned long move_vma(struct vm_area_struct *vma, 3361da177e4SLinus Torvalds unsigned long old_addr, unsigned long old_len, 33772f87654SPavel Emelyanov unsigned long new_len, unsigned long new_addr, 338e346b381SBrian Geffon bool *locked, unsigned long flags, 339e346b381SBrian Geffon struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) 3401da177e4SLinus Torvalds { 3411da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 3421da177e4SLinus Torvalds struct vm_area_struct *new_vma; 3431da177e4SLinus Torvalds unsigned long vm_flags = vma->vm_flags; 3441da177e4SLinus Torvalds unsigned long new_pgoff; 3451da177e4SLinus Torvalds unsigned long moved_len; 3461da177e4SLinus Torvalds unsigned long excess = 0; 347365e9c87SHugh Dickins unsigned long hiwater_vm; 3481da177e4SLinus Torvalds int split = 0; 3497103ad32SHugh Dickins int err; 35038a76013SMichel Lespinasse bool need_rmap_locks; 3511da177e4SLinus Torvalds 3521da177e4SLinus Torvalds /* 3531da177e4SLinus Torvalds * We'd prefer to avoid failure later on in do_munmap: 3541da177e4SLinus Torvalds * which may split one vma into three before unmapping. 3551da177e4SLinus Torvalds */ 3561da177e4SLinus Torvalds if (mm->map_count >= sysctl_max_map_count - 3) 3571da177e4SLinus Torvalds return -ENOMEM; 3581da177e4SLinus Torvalds 3591ff82995SHugh Dickins /* 3601ff82995SHugh Dickins * Advise KSM to break any KSM pages in the area to be moved: 3611ff82995SHugh Dickins * it would be confusing if they were to turn up at the new 3621ff82995SHugh Dickins * location, where they happen to coincide with different KSM 3631ff82995SHugh Dickins * pages recently unmapped. But leave vma->vm_flags as it was, 3641ff82995SHugh Dickins * so KSM can come around to merge on vma and new_vma afterwards. 3651ff82995SHugh Dickins */ 3667103ad32SHugh Dickins err = ksm_madvise(vma, old_addr, old_addr + old_len, 3677103ad32SHugh Dickins MADV_UNMERGEABLE, &vm_flags); 3687103ad32SHugh Dickins if (err) 3697103ad32SHugh Dickins return err; 3701ff82995SHugh Dickins 3711da177e4SLinus Torvalds new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); 37238a76013SMichel Lespinasse new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, 37338a76013SMichel Lespinasse &need_rmap_locks); 3741da177e4SLinus Torvalds if (!new_vma) 3751da177e4SLinus Torvalds return -ENOMEM; 3761da177e4SLinus Torvalds 37738a76013SMichel Lespinasse moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, 37838a76013SMichel Lespinasse need_rmap_locks); 3791da177e4SLinus Torvalds if (moved_len < old_len) { 380df1eab30SOleg Nesterov err = -ENOMEM; 3815477e70aSOleg Nesterov } else if (vma->vm_ops && vma->vm_ops->mremap) { 3825477e70aSOleg Nesterov err = vma->vm_ops->mremap(new_vma); 383df1eab30SOleg Nesterov } 384df1eab30SOleg Nesterov 385df1eab30SOleg Nesterov if (unlikely(err)) { 3861da177e4SLinus Torvalds /* 3871da177e4SLinus Torvalds * On error, move entries back from new area to old, 3881da177e4SLinus Torvalds * which will succeed since page tables still there, 3891da177e4SLinus Torvalds * and then proceed to unmap new area instead of old. 3901da177e4SLinus Torvalds */ 39138a76013SMichel Lespinasse move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, 39238a76013SMichel Lespinasse true); 3931da177e4SLinus Torvalds vma = new_vma; 3941da177e4SLinus Torvalds old_len = new_len; 3951da177e4SLinus Torvalds old_addr = new_addr; 396df1eab30SOleg Nesterov new_addr = err; 3974abad2caSLaurent Dufour } else { 39872f87654SPavel Emelyanov mremap_userfaultfd_prep(new_vma, uf); 3994abad2caSLaurent Dufour arch_remap(mm, old_addr, old_addr + old_len, 4004abad2caSLaurent Dufour new_addr, new_addr + new_len); 4014abad2caSLaurent Dufour } 4021da177e4SLinus Torvalds 4031da177e4SLinus Torvalds /* Conceal VM_ACCOUNT so old reservation is not undone */ 4041da177e4SLinus Torvalds if (vm_flags & VM_ACCOUNT) { 4051da177e4SLinus Torvalds vma->vm_flags &= ~VM_ACCOUNT; 4061da177e4SLinus Torvalds excess = vma->vm_end - vma->vm_start - old_len; 4071da177e4SLinus Torvalds if (old_addr > vma->vm_start && 4081da177e4SLinus Torvalds old_addr + old_len < vma->vm_end) 4091da177e4SLinus Torvalds split = 1; 4101da177e4SLinus Torvalds } 4111da177e4SLinus Torvalds 41271799062SKirill Korotaev /* 413365e9c87SHugh Dickins * If we failed to move page tables we still do total_vm increment 414365e9c87SHugh Dickins * since do_munmap() will decrement it by old_len == new_len. 415365e9c87SHugh Dickins * 416365e9c87SHugh Dickins * Since total_vm is about to be raised artificially high for a 417365e9c87SHugh Dickins * moment, we need to restore high watermark afterwards: if stats 418365e9c87SHugh Dickins * are taken meanwhile, total_vm and hiwater_vm appear too high. 419365e9c87SHugh Dickins * If this were a serious issue, we'd add a flag to do_munmap(). 42071799062SKirill Korotaev */ 421365e9c87SHugh Dickins hiwater_vm = mm->hiwater_vm; 42284638335SKonstantin Khlebnikov vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); 42371799062SKirill Korotaev 424d9fe4fabSToshi Kani /* Tell pfnmap has moved from this vma */ 425d9fe4fabSToshi Kani if (unlikely(vma->vm_flags & VM_PFNMAP)) 426d9fe4fabSToshi Kani untrack_pfn_moved(vma); 427d9fe4fabSToshi Kani 428e346b381SBrian Geffon if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) { 429e346b381SBrian Geffon if (vm_flags & VM_ACCOUNT) { 430e346b381SBrian Geffon /* Always put back VM_ACCOUNT since we won't unmap */ 431e346b381SBrian Geffon vma->vm_flags |= VM_ACCOUNT; 432e346b381SBrian Geffon 433dadbd85fSBrian Geffon vm_acct_memory(new_len >> PAGE_SHIFT); 434e346b381SBrian Geffon } 435e346b381SBrian Geffon 436dadbd85fSBrian Geffon /* 437dadbd85fSBrian Geffon * VMAs can actually be merged back together in copy_vma 438dadbd85fSBrian Geffon * calling merge_vma. This can happen with anonymous vmas 439dadbd85fSBrian Geffon * which have not yet been faulted, so if we were to consider 440dadbd85fSBrian Geffon * this VMA split we'll end up adding VM_ACCOUNT on the 441dadbd85fSBrian Geffon * next VMA, which is completely unrelated if this VMA 442dadbd85fSBrian Geffon * was re-merged. 443dadbd85fSBrian Geffon */ 444dadbd85fSBrian Geffon if (split && new_vma == vma) 445dadbd85fSBrian Geffon split = 0; 446dadbd85fSBrian Geffon 447e346b381SBrian Geffon /* We always clear VM_LOCKED[ONFAULT] on the old vma */ 448e346b381SBrian Geffon vma->vm_flags &= VM_LOCKED_CLEAR_MASK; 449e346b381SBrian Geffon 450e346b381SBrian Geffon /* Because we won't unmap we don't need to touch locked_vm */ 451e346b381SBrian Geffon goto out; 452e346b381SBrian Geffon } 453e346b381SBrian Geffon 454897ab3e0SMike Rapoport if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) { 4551da177e4SLinus Torvalds /* OOM: unable to split vma, just get accounts right */ 4561da177e4SLinus Torvalds vm_unacct_memory(excess >> PAGE_SHIFT); 4571da177e4SLinus Torvalds excess = 0; 4581da177e4SLinus Torvalds } 459e346b381SBrian Geffon 460e346b381SBrian Geffon if (vm_flags & VM_LOCKED) { 461e346b381SBrian Geffon mm->locked_vm += new_len >> PAGE_SHIFT; 462e346b381SBrian Geffon *locked = true; 463e346b381SBrian Geffon } 464e346b381SBrian Geffon out: 465365e9c87SHugh Dickins mm->hiwater_vm = hiwater_vm; 4661da177e4SLinus Torvalds 4671da177e4SLinus Torvalds /* Restore VM_ACCOUNT if one or two pieces of vma left */ 4681da177e4SLinus Torvalds if (excess) { 4691da177e4SLinus Torvalds vma->vm_flags |= VM_ACCOUNT; 4701da177e4SLinus Torvalds if (split) 4711da177e4SLinus Torvalds vma->vm_next->vm_flags |= VM_ACCOUNT; 4721da177e4SLinus Torvalds } 4731da177e4SLinus Torvalds 4741da177e4SLinus Torvalds return new_addr; 4751da177e4SLinus Torvalds } 4761da177e4SLinus Torvalds 47754f5de70SAl Viro static struct vm_area_struct *vma_to_resize(unsigned long addr, 478e346b381SBrian Geffon unsigned long old_len, unsigned long new_len, unsigned long flags, 479e346b381SBrian Geffon unsigned long *p) 48054f5de70SAl Viro { 48154f5de70SAl Viro struct mm_struct *mm = current->mm; 48254f5de70SAl Viro struct vm_area_struct *vma = find_vma(mm, addr); 4831d391686SOleg Nesterov unsigned long pgoff; 48454f5de70SAl Viro 48554f5de70SAl Viro if (!vma || vma->vm_start > addr) 4866cd57613SDerek return ERR_PTR(-EFAULT); 48754f5de70SAl Viro 488dba58d3bSMike Kravetz /* 489dba58d3bSMike Kravetz * !old_len is a special case where an attempt is made to 'duplicate' 490dba58d3bSMike Kravetz * a mapping. This makes no sense for private mappings as it will 491dba58d3bSMike Kravetz * instead create a fresh/new mapping unrelated to the original. This 492dba58d3bSMike Kravetz * is contrary to the basic idea of mremap which creates new mappings 493dba58d3bSMike Kravetz * based on the original. There are no known use cases for this 494dba58d3bSMike Kravetz * behavior. As a result, fail such attempts. 495dba58d3bSMike Kravetz */ 496dba58d3bSMike Kravetz if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { 497dba58d3bSMike Kravetz pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid); 498dba58d3bSMike Kravetz return ERR_PTR(-EINVAL); 499dba58d3bSMike Kravetz } 500dba58d3bSMike Kravetz 501e346b381SBrian Geffon if (flags & MREMAP_DONTUNMAP && (!vma_is_anonymous(vma) || 502e346b381SBrian Geffon vma->vm_flags & VM_SHARED)) 503e346b381SBrian Geffon return ERR_PTR(-EINVAL); 504e346b381SBrian Geffon 50554f5de70SAl Viro if (is_vm_hugetlb_page(vma)) 5066cd57613SDerek return ERR_PTR(-EINVAL); 50754f5de70SAl Viro 50854f5de70SAl Viro /* We can't remap across vm area boundaries */ 50954f5de70SAl Viro if (old_len > vma->vm_end - addr) 5106cd57613SDerek return ERR_PTR(-EFAULT); 51154f5de70SAl Viro 5121d391686SOleg Nesterov if (new_len == old_len) 5131d391686SOleg Nesterov return vma; 514982134baSLinus Torvalds 5151d391686SOleg Nesterov /* Need to be careful about a growing mapping */ 516982134baSLinus Torvalds pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; 517982134baSLinus Torvalds pgoff += vma->vm_pgoff; 518982134baSLinus Torvalds if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) 5196cd57613SDerek return ERR_PTR(-EINVAL); 5201d391686SOleg Nesterov 5211d391686SOleg Nesterov if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) 5221d391686SOleg Nesterov return ERR_PTR(-EFAULT); 52354f5de70SAl Viro 52454f5de70SAl Viro if (vma->vm_flags & VM_LOCKED) { 52554f5de70SAl Viro unsigned long locked, lock_limit; 52654f5de70SAl Viro locked = mm->locked_vm << PAGE_SHIFT; 52759e99e5bSJiri Slaby lock_limit = rlimit(RLIMIT_MEMLOCK); 52854f5de70SAl Viro locked += new_len - old_len; 52954f5de70SAl Viro if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 5306cd57613SDerek return ERR_PTR(-EAGAIN); 53154f5de70SAl Viro } 53254f5de70SAl Viro 53384638335SKonstantin Khlebnikov if (!may_expand_vm(mm, vma->vm_flags, 53484638335SKonstantin Khlebnikov (new_len - old_len) >> PAGE_SHIFT)) 5356cd57613SDerek return ERR_PTR(-ENOMEM); 53654f5de70SAl Viro 53754f5de70SAl Viro if (vma->vm_flags & VM_ACCOUNT) { 53854f5de70SAl Viro unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; 539191c5424SAl Viro if (security_vm_enough_memory_mm(mm, charged)) 5406cd57613SDerek return ERR_PTR(-ENOMEM); 54154f5de70SAl Viro *p = charged; 54254f5de70SAl Viro } 54354f5de70SAl Viro 54454f5de70SAl Viro return vma; 54554f5de70SAl Viro } 54654f5de70SAl Viro 54781909b84SMichel Lespinasse static unsigned long mremap_to(unsigned long addr, unsigned long old_len, 54872f87654SPavel Emelyanov unsigned long new_addr, unsigned long new_len, bool *locked, 549e346b381SBrian Geffon unsigned long flags, struct vm_userfaultfd_ctx *uf, 550b2282371SMike Rapoport struct list_head *uf_unmap_early, 551897ab3e0SMike Rapoport struct list_head *uf_unmap) 552ecc1a899SAl Viro { 553ecc1a899SAl Viro struct mm_struct *mm = current->mm; 554ecc1a899SAl Viro struct vm_area_struct *vma; 555ecc1a899SAl Viro unsigned long ret = -EINVAL; 556ecc1a899SAl Viro unsigned long charged = 0; 557e346b381SBrian Geffon unsigned long map_flags = 0; 558ecc1a899SAl Viro 559f19cb115SAlexander Kuleshov if (offset_in_page(new_addr)) 560ecc1a899SAl Viro goto out; 561ecc1a899SAl Viro 562ecc1a899SAl Viro if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) 563ecc1a899SAl Viro goto out; 564ecc1a899SAl Viro 5659943242cSOleg Nesterov /* Ensure the old/new locations do not overlap */ 5669943242cSOleg Nesterov if (addr + old_len > new_addr && new_addr + new_len > addr) 567ecc1a899SAl Viro goto out; 568ecc1a899SAl Viro 569ea2c3f6fSOscar Salvador /* 570ea2c3f6fSOscar Salvador * move_vma() need us to stay 4 maps below the threshold, otherwise 571ea2c3f6fSOscar Salvador * it will bail out at the very beginning. 572ea2c3f6fSOscar Salvador * That is a problem if we have already unmaped the regions here 573ea2c3f6fSOscar Salvador * (new_addr, and old_addr), because userspace will not know the 574ea2c3f6fSOscar Salvador * state of the vma's after it gets -ENOMEM. 575ea2c3f6fSOscar Salvador * So, to avoid such scenario we can pre-compute if the whole 576ea2c3f6fSOscar Salvador * operation has high chances to success map-wise. 577ea2c3f6fSOscar Salvador * Worst-scenario case is when both vma's (new_addr and old_addr) get 578ea2c3f6fSOscar Salvador * split in 3 before unmaping it. 579ea2c3f6fSOscar Salvador * That means 2 more maps (1 for each) to the ones we already hold. 580ea2c3f6fSOscar Salvador * Check whether current map count plus 2 still leads us to 4 maps below 581ea2c3f6fSOscar Salvador * the threshold, otherwise return -ENOMEM here to be more safe. 582ea2c3f6fSOscar Salvador */ 583ea2c3f6fSOscar Salvador if ((mm->map_count + 2) >= sysctl_max_map_count - 3) 584ea2c3f6fSOscar Salvador return -ENOMEM; 585ea2c3f6fSOscar Salvador 586e346b381SBrian Geffon if (flags & MREMAP_FIXED) { 587b2282371SMike Rapoport ret = do_munmap(mm, new_addr, new_len, uf_unmap_early); 588ecc1a899SAl Viro if (ret) 589ecc1a899SAl Viro goto out; 590e346b381SBrian Geffon } 591ecc1a899SAl Viro 592ecc1a899SAl Viro if (old_len >= new_len) { 593897ab3e0SMike Rapoport ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap); 594ecc1a899SAl Viro if (ret && old_len != new_len) 595ecc1a899SAl Viro goto out; 596ecc1a899SAl Viro old_len = new_len; 597ecc1a899SAl Viro } 598ecc1a899SAl Viro 599e346b381SBrian Geffon vma = vma_to_resize(addr, old_len, new_len, flags, &charged); 600ecc1a899SAl Viro if (IS_ERR(vma)) { 601ecc1a899SAl Viro ret = PTR_ERR(vma); 602ecc1a899SAl Viro goto out; 603ecc1a899SAl Viro } 604ecc1a899SAl Viro 605e346b381SBrian Geffon /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */ 606e346b381SBrian Geffon if (flags & MREMAP_DONTUNMAP && 607e346b381SBrian Geffon !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) { 608e346b381SBrian Geffon ret = -ENOMEM; 609e346b381SBrian Geffon goto out; 610e346b381SBrian Geffon } 611e346b381SBrian Geffon 612e346b381SBrian Geffon if (flags & MREMAP_FIXED) 613e346b381SBrian Geffon map_flags |= MAP_FIXED; 614e346b381SBrian Geffon 615097eed10SAl Viro if (vma->vm_flags & VM_MAYSHARE) 616097eed10SAl Viro map_flags |= MAP_SHARED; 6179206de95SAl Viro 618097eed10SAl Viro ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + 619097eed10SAl Viro ((addr - vma->vm_start) >> PAGE_SHIFT), 620097eed10SAl Viro map_flags); 621ff68dac6SGaowei Pu if (IS_ERR_VALUE(ret)) 622097eed10SAl Viro goto out1; 623097eed10SAl Viro 624e346b381SBrian Geffon /* We got a new mapping */ 625e346b381SBrian Geffon if (!(flags & MREMAP_FIXED)) 626e346b381SBrian Geffon new_addr = ret; 627e346b381SBrian Geffon 628e346b381SBrian Geffon ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, 629897ab3e0SMike Rapoport uf_unmap); 630e346b381SBrian Geffon 631f19cb115SAlexander Kuleshov if (!(offset_in_page(ret))) 632097eed10SAl Viro goto out; 633e346b381SBrian Geffon 634097eed10SAl Viro out1: 635ecc1a899SAl Viro vm_unacct_memory(charged); 636ecc1a899SAl Viro 637ecc1a899SAl Viro out: 638ecc1a899SAl Viro return ret; 639ecc1a899SAl Viro } 640ecc1a899SAl Viro 6411a0ef85fSAl Viro static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) 6421a0ef85fSAl Viro { 643f106af4eSAl Viro unsigned long end = vma->vm_end + delta; 6449206de95SAl Viro if (end < vma->vm_end) /* overflow */ 6451a0ef85fSAl Viro return 0; 6469206de95SAl Viro if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ 647f106af4eSAl Viro return 0; 648f106af4eSAl Viro if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 649f106af4eSAl Viro 0, MAP_FIXED) & ~PAGE_MASK) 650f106af4eSAl Viro return 0; 6511a0ef85fSAl Viro return 1; 6521a0ef85fSAl Viro } 6531a0ef85fSAl Viro 6541da177e4SLinus Torvalds /* 6551da177e4SLinus Torvalds * Expand (or shrink) an existing mapping, potentially moving it at the 6561da177e4SLinus Torvalds * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) 6571da177e4SLinus Torvalds * 6581da177e4SLinus Torvalds * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise 6591da177e4SLinus Torvalds * This option implies MREMAP_MAYMOVE. 6601da177e4SLinus Torvalds */ 66163a81db1SAl Viro SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, 66263a81db1SAl Viro unsigned long, new_len, unsigned long, flags, 66363a81db1SAl Viro unsigned long, new_addr) 6641da177e4SLinus Torvalds { 665d0de32d9SHugh Dickins struct mm_struct *mm = current->mm; 6661da177e4SLinus Torvalds struct vm_area_struct *vma; 6671da177e4SLinus Torvalds unsigned long ret = -EINVAL; 6681da177e4SLinus Torvalds unsigned long charged = 0; 66981909b84SMichel Lespinasse bool locked = false; 67085a06835SYang Shi bool downgraded = false; 67172f87654SPavel Emelyanov struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; 672b2282371SMike Rapoport LIST_HEAD(uf_unmap_early); 673897ab3e0SMike Rapoport LIST_HEAD(uf_unmap); 6741da177e4SLinus Torvalds 675b2a84de2SWill Deacon /* 676b2a84de2SWill Deacon * There is a deliberate asymmetry here: we strip the pointer tag 677b2a84de2SWill Deacon * from the old address but leave the new address alone. This is 678b2a84de2SWill Deacon * for consistency with mmap(), where we prevent the creation of 679b2a84de2SWill Deacon * aliasing mappings in userspace by leaving the tag bits of the 680b2a84de2SWill Deacon * mapping address intact. A non-zero tag will cause the subsequent 681b2a84de2SWill Deacon * range checks to reject the address as invalid. 682b2a84de2SWill Deacon * 683b2a84de2SWill Deacon * See Documentation/arm64/tagged-address-abi.rst for more information. 684b2a84de2SWill Deacon */ 685057d3389SAndrey Konovalov addr = untagged_addr(addr); 686057d3389SAndrey Konovalov 687e346b381SBrian Geffon if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) 6889a2458a6SRasmus Villemoes return ret; 6899a2458a6SRasmus Villemoes 6909a2458a6SRasmus Villemoes if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE)) 6919a2458a6SRasmus Villemoes return ret; 6921da177e4SLinus Torvalds 693e346b381SBrian Geffon /* 694e346b381SBrian Geffon * MREMAP_DONTUNMAP is always a move and it does not allow resizing 695e346b381SBrian Geffon * in the process. 696e346b381SBrian Geffon */ 697e346b381SBrian Geffon if (flags & MREMAP_DONTUNMAP && 698e346b381SBrian Geffon (!(flags & MREMAP_MAYMOVE) || old_len != new_len)) 699e346b381SBrian Geffon return ret; 700e346b381SBrian Geffon 701e346b381SBrian Geffon 702f19cb115SAlexander Kuleshov if (offset_in_page(addr)) 7039a2458a6SRasmus Villemoes return ret; 7041da177e4SLinus Torvalds 7051da177e4SLinus Torvalds old_len = PAGE_ALIGN(old_len); 7061da177e4SLinus Torvalds new_len = PAGE_ALIGN(new_len); 7071da177e4SLinus Torvalds 7081da177e4SLinus Torvalds /* 7091da177e4SLinus Torvalds * We allow a zero old-len as a special case 7101da177e4SLinus Torvalds * for DOS-emu "duplicate shm area" thing. But 7111da177e4SLinus Torvalds * a zero new-len is nonsensical. 7121da177e4SLinus Torvalds */ 7131da177e4SLinus Torvalds if (!new_len) 7149a2458a6SRasmus Villemoes return ret; 7159a2458a6SRasmus Villemoes 716d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(current->mm)) 717dc0ef0dfSMichal Hocko return -EINTR; 7181da177e4SLinus Torvalds 719e346b381SBrian Geffon if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) { 72081909b84SMichel Lespinasse ret = mremap_to(addr, old_len, new_addr, new_len, 721e346b381SBrian Geffon &locked, flags, &uf, &uf_unmap_early, 722e346b381SBrian Geffon &uf_unmap); 7231da177e4SLinus Torvalds goto out; 7241da177e4SLinus Torvalds } 7251da177e4SLinus Torvalds 7261da177e4SLinus Torvalds /* 7271da177e4SLinus Torvalds * Always allow a shrinking remap: that just unmaps 7281da177e4SLinus Torvalds * the unnecessary pages.. 72985a06835SYang Shi * __do_munmap does all the needed commit accounting, and 730c1e8d7c6SMichel Lespinasse * downgrades mmap_lock to read if so directed. 7311da177e4SLinus Torvalds */ 7321da177e4SLinus Torvalds if (old_len >= new_len) { 73385a06835SYang Shi int retval; 73485a06835SYang Shi 73585a06835SYang Shi retval = __do_munmap(mm, addr+new_len, old_len - new_len, 73685a06835SYang Shi &uf_unmap, true); 73785a06835SYang Shi if (retval < 0 && old_len != new_len) { 73885a06835SYang Shi ret = retval; 7391da177e4SLinus Torvalds goto out; 740c1e8d7c6SMichel Lespinasse /* Returning 1 indicates mmap_lock is downgraded to read. */ 74185a06835SYang Shi } else if (retval == 1) 74285a06835SYang Shi downgraded = true; 7431da177e4SLinus Torvalds ret = addr; 7441da177e4SLinus Torvalds goto out; 7451da177e4SLinus Torvalds } 7461da177e4SLinus Torvalds 7471da177e4SLinus Torvalds /* 748ecc1a899SAl Viro * Ok, we need to grow.. 7491da177e4SLinus Torvalds */ 750e346b381SBrian Geffon vma = vma_to_resize(addr, old_len, new_len, flags, &charged); 75154f5de70SAl Viro if (IS_ERR(vma)) { 75254f5de70SAl Viro ret = PTR_ERR(vma); 7531da177e4SLinus Torvalds goto out; 7541da177e4SLinus Torvalds } 7551da177e4SLinus Torvalds 7561da177e4SLinus Torvalds /* old_len exactly to the end of the area.. 7571da177e4SLinus Torvalds */ 758ecc1a899SAl Viro if (old_len == vma->vm_end - addr) { 7591da177e4SLinus Torvalds /* can we just expand the current mapping? */ 7601a0ef85fSAl Viro if (vma_expandable(vma, new_len - old_len)) { 7611da177e4SLinus Torvalds int pages = (new_len - old_len) >> PAGE_SHIFT; 7621da177e4SLinus Torvalds 7635beb4930SRik van Riel if (vma_adjust(vma, vma->vm_start, addr + new_len, 7645beb4930SRik van Riel vma->vm_pgoff, NULL)) { 7655beb4930SRik van Riel ret = -ENOMEM; 7665beb4930SRik van Riel goto out; 7675beb4930SRik van Riel } 7681da177e4SLinus Torvalds 76984638335SKonstantin Khlebnikov vm_stat_account(mm, vma->vm_flags, pages); 7701da177e4SLinus Torvalds if (vma->vm_flags & VM_LOCKED) { 771d0de32d9SHugh Dickins mm->locked_vm += pages; 77281909b84SMichel Lespinasse locked = true; 77381909b84SMichel Lespinasse new_addr = addr; 7741da177e4SLinus Torvalds } 7751da177e4SLinus Torvalds ret = addr; 7761da177e4SLinus Torvalds goto out; 7771da177e4SLinus Torvalds } 7781da177e4SLinus Torvalds } 7791da177e4SLinus Torvalds 7801da177e4SLinus Torvalds /* 7811da177e4SLinus Torvalds * We weren't able to just expand or shrink the area, 7821da177e4SLinus Torvalds * we need to create a new one and move it.. 7831da177e4SLinus Torvalds */ 7841da177e4SLinus Torvalds ret = -ENOMEM; 7851da177e4SLinus Torvalds if (flags & MREMAP_MAYMOVE) { 7861da177e4SLinus Torvalds unsigned long map_flags = 0; 7871da177e4SLinus Torvalds if (vma->vm_flags & VM_MAYSHARE) 7881da177e4SLinus Torvalds map_flags |= MAP_SHARED; 7891da177e4SLinus Torvalds 7901da177e4SLinus Torvalds new_addr = get_unmapped_area(vma->vm_file, 0, new_len, 79193587414SAl Viro vma->vm_pgoff + 79293587414SAl Viro ((addr - vma->vm_start) >> PAGE_SHIFT), 79393587414SAl Viro map_flags); 794ff68dac6SGaowei Pu if (IS_ERR_VALUE(new_addr)) { 7951da177e4SLinus Torvalds ret = new_addr; 796ed032189SEric Paris goto out; 797ed032189SEric Paris } 798ed032189SEric Paris 79972f87654SPavel Emelyanov ret = move_vma(vma, addr, old_len, new_len, new_addr, 800e346b381SBrian Geffon &locked, flags, &uf, &uf_unmap); 8011da177e4SLinus Torvalds } 8021da177e4SLinus Torvalds out: 803f19cb115SAlexander Kuleshov if (offset_in_page(ret)) { 8041da177e4SLinus Torvalds vm_unacct_memory(charged); 805fa1f68ccSZou Wei locked = false; 806d456fb9eSOleg Nesterov } 80785a06835SYang Shi if (downgraded) 808d8ed45c5SMichel Lespinasse mmap_read_unlock(current->mm); 80985a06835SYang Shi else 810d8ed45c5SMichel Lespinasse mmap_write_unlock(current->mm); 81181909b84SMichel Lespinasse if (locked && new_len > old_len) 81281909b84SMichel Lespinasse mm_populate(new_addr + old_len, new_len - old_len); 813b2282371SMike Rapoport userfaultfd_unmap_complete(mm, &uf_unmap_early); 814d1564926SBrian Geffon mremap_userfaultfd_complete(&uf, addr, ret, old_len); 815897ab3e0SMike Rapoport userfaultfd_unmap_complete(mm, &uf_unmap); 8161da177e4SLinus Torvalds return ret; 8171da177e4SLinus Torvalds } 818