1*b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * mm/mremap.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * (C) Copyright 1996 Linus Torvalds 61da177e4SLinus Torvalds * 7046c6884SAlan Cox * Address space accounting code <alan@lxorguk.ukuu.org.uk> 81da177e4SLinus Torvalds * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/mm.h> 121da177e4SLinus Torvalds #include <linux/hugetlb.h> 131da177e4SLinus Torvalds #include <linux/shm.h> 141ff82995SHugh Dickins #include <linux/ksm.h> 151da177e4SLinus Torvalds #include <linux/mman.h> 161da177e4SLinus Torvalds #include <linux/swap.h> 17c59ede7bSRandy.Dunlap #include <linux/capability.h> 181da177e4SLinus Torvalds #include <linux/fs.h> 196dec97dcSCyrill Gorcunov #include <linux/swapops.h> 201da177e4SLinus Torvalds #include <linux/highmem.h> 211da177e4SLinus Torvalds #include <linux/security.h> 221da177e4SLinus Torvalds #include <linux/syscalls.h> 23cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 242581d202SPaul McQuade #include <linux/uaccess.h> 254abad2caSLaurent Dufour #include <linux/mm-arch-hooks.h> 2672f87654SPavel Emelyanov #include <linux/userfaultfd_k.h> 271da177e4SLinus Torvalds 281da177e4SLinus Torvalds #include <asm/cacheflush.h> 291da177e4SLinus Torvalds #include <asm/tlbflush.h> 301da177e4SLinus Torvalds 31ba470de4SRik van Riel #include "internal.h" 32ba470de4SRik van Riel 337be7a546SHugh Dickins static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) 341da177e4SLinus Torvalds { 351da177e4SLinus Torvalds pgd_t *pgd; 36c2febafcSKirill A. Shutemov p4d_t *p4d; 371da177e4SLinus Torvalds pud_t *pud; 381da177e4SLinus Torvalds pmd_t *pmd; 391da177e4SLinus Torvalds 401da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 411da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 421da177e4SLinus Torvalds return NULL; 431da177e4SLinus Torvalds 44c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 45c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 46c2febafcSKirill A. Shutemov return NULL; 47c2febafcSKirill A. Shutemov 48c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 491da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 501da177e4SLinus Torvalds return NULL; 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 5337a1c49aSAndrea Arcangeli if (pmd_none(*pmd)) 541da177e4SLinus Torvalds return NULL; 551da177e4SLinus Torvalds 567be7a546SHugh Dickins return pmd; 571da177e4SLinus Torvalds } 581da177e4SLinus Torvalds 598ac1f832SAndrea Arcangeli static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, 608ac1f832SAndrea Arcangeli unsigned long addr) 611da177e4SLinus Torvalds { 621da177e4SLinus Torvalds pgd_t *pgd; 63c2febafcSKirill A. Shutemov p4d_t *p4d; 641da177e4SLinus Torvalds pud_t *pud; 65c74df32cSHugh Dickins pmd_t *pmd; 661da177e4SLinus Torvalds 671da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 68c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, addr); 69c2febafcSKirill A. Shutemov if (!p4d) 70c2febafcSKirill A. Shutemov return NULL; 71c2febafcSKirill A. Shutemov pud = pud_alloc(mm, p4d, addr); 721da177e4SLinus Torvalds if (!pud) 73c74df32cSHugh Dickins return NULL; 747be7a546SHugh Dickins 751da177e4SLinus Torvalds pmd = pmd_alloc(mm, pud, addr); 7657a8f0cdSHugh Dickins if (!pmd) 77c74df32cSHugh Dickins return NULL; 787be7a546SHugh Dickins 798ac1f832SAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*pmd)); 80c74df32cSHugh Dickins 817be7a546SHugh Dickins return pmd; 821da177e4SLinus Torvalds } 831da177e4SLinus Torvalds 841d069b7dSHugh Dickins static void take_rmap_locks(struct vm_area_struct *vma) 851d069b7dSHugh Dickins { 861d069b7dSHugh Dickins if (vma->vm_file) 871d069b7dSHugh Dickins i_mmap_lock_write(vma->vm_file->f_mapping); 881d069b7dSHugh Dickins if (vma->anon_vma) 891d069b7dSHugh Dickins anon_vma_lock_write(vma->anon_vma); 901d069b7dSHugh Dickins } 911d069b7dSHugh Dickins 921d069b7dSHugh Dickins static void drop_rmap_locks(struct vm_area_struct *vma) 931d069b7dSHugh Dickins { 941d069b7dSHugh Dickins if (vma->anon_vma) 951d069b7dSHugh Dickins anon_vma_unlock_write(vma->anon_vma); 961d069b7dSHugh Dickins if (vma->vm_file) 971d069b7dSHugh Dickins i_mmap_unlock_write(vma->vm_file->f_mapping); 981d069b7dSHugh Dickins } 991d069b7dSHugh Dickins 1006dec97dcSCyrill Gorcunov static pte_t move_soft_dirty_pte(pte_t pte) 1016dec97dcSCyrill Gorcunov { 1026dec97dcSCyrill Gorcunov /* 1036dec97dcSCyrill Gorcunov * Set soft dirty bit so we can notice 1046dec97dcSCyrill Gorcunov * in userspace the ptes were moved. 1056dec97dcSCyrill Gorcunov */ 1066dec97dcSCyrill Gorcunov #ifdef CONFIG_MEM_SOFT_DIRTY 1076dec97dcSCyrill Gorcunov if (pte_present(pte)) 1086dec97dcSCyrill Gorcunov pte = pte_mksoft_dirty(pte); 1096dec97dcSCyrill Gorcunov else if (is_swap_pte(pte)) 1106dec97dcSCyrill Gorcunov pte = pte_swp_mksoft_dirty(pte); 1116dec97dcSCyrill Gorcunov #endif 1126dec97dcSCyrill Gorcunov return pte; 1136dec97dcSCyrill Gorcunov } 1146dec97dcSCyrill Gorcunov 1157be7a546SHugh Dickins static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, 1167be7a546SHugh Dickins unsigned long old_addr, unsigned long old_end, 1177be7a546SHugh Dickins struct vm_area_struct *new_vma, pmd_t *new_pmd, 1185d190420SAaron Lu unsigned long new_addr, bool need_rmap_locks, bool *need_flush) 1191da177e4SLinus Torvalds { 1201da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 1217be7a546SHugh Dickins pte_t *old_pte, *new_pte, pte; 1224c21e2f2SHugh Dickins spinlock_t *old_ptl, *new_ptl; 1235d190420SAaron Lu bool force_flush = false; 1245d190420SAaron Lu unsigned long len = old_end - old_addr; 1251da177e4SLinus Torvalds 1261da177e4SLinus Torvalds /* 127c8c06efaSDavidlohr Bueso * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma 12838a76013SMichel Lespinasse * locks to ensure that rmap will always observe either the old or the 12938a76013SMichel Lespinasse * new ptes. This is the easiest way to avoid races with 13038a76013SMichel Lespinasse * truncate_pagecache(), page migration, etc... 13138a76013SMichel Lespinasse * 13238a76013SMichel Lespinasse * When need_rmap_locks is false, we use other ways to avoid 13338a76013SMichel Lespinasse * such races: 13438a76013SMichel Lespinasse * 13538a76013SMichel Lespinasse * - During exec() shift_arg_pages(), we use a specially tagged vma 13638a76013SMichel Lespinasse * which rmap call sites look for using is_vma_temporary_stack(). 13738a76013SMichel Lespinasse * 13838a76013SMichel Lespinasse * - During mremap(), new_vma is often known to be placed after vma 13938a76013SMichel Lespinasse * in rmap traversal order. This ensures rmap will always observe 14038a76013SMichel Lespinasse * either the old pte, or the new pte, or both (the page table locks 14138a76013SMichel Lespinasse * serialize access to individual ptes, but only rmap traversal 14238a76013SMichel Lespinasse * order guarantees that we won't miss both the old and new ptes). 1431da177e4SLinus Torvalds */ 1441d069b7dSHugh Dickins if (need_rmap_locks) 1451d069b7dSHugh Dickins take_rmap_locks(vma); 1461da177e4SLinus Torvalds 1474c21e2f2SHugh Dickins /* 1484c21e2f2SHugh Dickins * We don't have to worry about the ordering of src and dst 1494c21e2f2SHugh Dickins * pte locks because exclusive mmap_sem prevents deadlock. 1504c21e2f2SHugh Dickins */ 151c74df32cSHugh Dickins old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); 152ece0e2b6SPeter Zijlstra new_pte = pte_offset_map(new_pmd, new_addr); 1534c21e2f2SHugh Dickins new_ptl = pte_lockptr(mm, new_pmd); 1544c21e2f2SHugh Dickins if (new_ptl != old_ptl) 155f20dc5f7SIngo Molnar spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 1563ea27719SMel Gorman flush_tlb_batched_pending(vma->vm_mm); 1576606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 1588b1f3124SNick Piggin 1597be7a546SHugh Dickins for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, 1607be7a546SHugh Dickins new_pte++, new_addr += PAGE_SIZE) { 1617be7a546SHugh Dickins if (pte_none(*old_pte)) 1627be7a546SHugh Dickins continue; 1635d190420SAaron Lu 1647b6efc2bSAndrea Arcangeli pte = ptep_get_and_clear(mm, old_addr, old_pte); 165a2ce2666SAaron Lu /* 166a2ce2666SAaron Lu * If we are remapping a dirty PTE, make sure 167a2ce2666SAaron Lu * to flush TLB before we drop the PTL for the 168a2ce2666SAaron Lu * old PTE or we may race with page_mkclean(). 169a2ce2666SAaron Lu * 170a2ce2666SAaron Lu * This check has to be done after we removed the 171a2ce2666SAaron Lu * old PTE from page tables or another thread may 172a2ce2666SAaron Lu * dirty it after the check and before the removal. 173a2ce2666SAaron Lu */ 174a2ce2666SAaron Lu if (pte_present(pte) && pte_dirty(pte)) 175a2ce2666SAaron Lu force_flush = true; 1767be7a546SHugh Dickins pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 1776dec97dcSCyrill Gorcunov pte = move_soft_dirty_pte(pte); 1786dec97dcSCyrill Gorcunov set_pte_at(mm, new_addr, new_pte, pte); 1791da177e4SLinus Torvalds } 1807be7a546SHugh Dickins 1816606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 1824c21e2f2SHugh Dickins if (new_ptl != old_ptl) 1834c21e2f2SHugh Dickins spin_unlock(new_ptl); 184ece0e2b6SPeter Zijlstra pte_unmap(new_pte - 1); 1855d190420SAaron Lu if (force_flush) 1865d190420SAaron Lu flush_tlb_range(vma, old_end - len, old_end); 1875d190420SAaron Lu else 1885d190420SAaron Lu *need_flush = true; 189c74df32cSHugh Dickins pte_unmap_unlock(old_pte - 1, old_ptl); 1901d069b7dSHugh Dickins if (need_rmap_locks) 1911d069b7dSHugh Dickins drop_rmap_locks(vma); 1921da177e4SLinus Torvalds } 1931da177e4SLinus Torvalds 1947be7a546SHugh Dickins #define LATENCY_LIMIT (64 * PAGE_SIZE) 1957be7a546SHugh Dickins 196b6a2fea3SOllie Wild unsigned long move_page_tables(struct vm_area_struct *vma, 1971da177e4SLinus Torvalds unsigned long old_addr, struct vm_area_struct *new_vma, 19838a76013SMichel Lespinasse unsigned long new_addr, unsigned long len, 19938a76013SMichel Lespinasse bool need_rmap_locks) 2001da177e4SLinus Torvalds { 2017be7a546SHugh Dickins unsigned long extent, next, old_end; 2027be7a546SHugh Dickins pmd_t *old_pmd, *new_pmd; 2037b6efc2bSAndrea Arcangeli bool need_flush = false; 2042ec74c3eSSagi Grimberg unsigned long mmun_start; /* For mmu_notifiers */ 2052ec74c3eSSagi Grimberg unsigned long mmun_end; /* For mmu_notifiers */ 2061da177e4SLinus Torvalds 2077be7a546SHugh Dickins old_end = old_addr + len; 2087be7a546SHugh Dickins flush_cache_range(vma, old_addr, old_end); 2091da177e4SLinus Torvalds 2102ec74c3eSSagi Grimberg mmun_start = old_addr; 2112ec74c3eSSagi Grimberg mmun_end = old_end; 2122ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); 2137b6efc2bSAndrea Arcangeli 2147be7a546SHugh Dickins for (; old_addr < old_end; old_addr += extent, new_addr += extent) { 2151da177e4SLinus Torvalds cond_resched(); 2167be7a546SHugh Dickins next = (old_addr + PMD_SIZE) & PMD_MASK; 217ebed4846SAndrea Arcangeli /* even if next overflowed, extent below will be ok */ 2187be7a546SHugh Dickins extent = next - old_addr; 219ebed4846SAndrea Arcangeli if (extent > old_end - old_addr) 220ebed4846SAndrea Arcangeli extent = old_end - old_addr; 2217be7a546SHugh Dickins old_pmd = get_old_pmd(vma->vm_mm, old_addr); 2227be7a546SHugh Dickins if (!old_pmd) 2237be7a546SHugh Dickins continue; 2248ac1f832SAndrea Arcangeli new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); 2257be7a546SHugh Dickins if (!new_pmd) 2267be7a546SHugh Dickins break; 22784c3fc4eSZi Yan if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd)) { 228dd18dbc2SKirill A. Shutemov if (extent == HPAGE_PMD_SIZE) { 2294b471e88SKirill A. Shutemov bool moved; 230dd18dbc2SKirill A. Shutemov /* See comment in move_ptes() */ 231dd18dbc2SKirill A. Shutemov if (need_rmap_locks) 2321d069b7dSHugh Dickins take_rmap_locks(vma); 233bf8616d5SHugh Dickins moved = move_huge_pmd(vma, old_addr, new_addr, 2345d190420SAaron Lu old_end, old_pmd, new_pmd, 2355d190420SAaron Lu &need_flush); 236dd18dbc2SKirill A. Shutemov if (need_rmap_locks) 2371d069b7dSHugh Dickins drop_rmap_locks(vma); 2385d190420SAaron Lu if (moved) 23937a1c49aSAndrea Arcangeli continue; 24037a1c49aSAndrea Arcangeli } 2414b471e88SKirill A. Shutemov split_huge_pmd(vma, old_pmd, old_addr); 242337d9abfSNaoya Horiguchi if (pmd_trans_unstable(old_pmd)) 2436b9116a6SKirill A. Shutemov continue; 24437a1c49aSAndrea Arcangeli } 2453ed3a4f0SKirill A. Shutemov if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr)) 24637a1c49aSAndrea Arcangeli break; 2477be7a546SHugh Dickins next = (new_addr + PMD_SIZE) & PMD_MASK; 2487be7a546SHugh Dickins if (extent > next - new_addr) 2497be7a546SHugh Dickins extent = next - new_addr; 2507be7a546SHugh Dickins if (extent > LATENCY_LIMIT) 2517be7a546SHugh Dickins extent = LATENCY_LIMIT; 2525d190420SAaron Lu move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, 2535d190420SAaron Lu new_pmd, new_addr, need_rmap_locks, &need_flush); 2541da177e4SLinus Torvalds } 2555d190420SAaron Lu if (need_flush) 2567b6efc2bSAndrea Arcangeli flush_tlb_range(vma, old_end-len, old_addr); 2577b6efc2bSAndrea Arcangeli 2582ec74c3eSSagi Grimberg mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); 2597be7a546SHugh Dickins 2607be7a546SHugh Dickins return len + old_addr - old_end; /* how much done */ 2611da177e4SLinus Torvalds } 2621da177e4SLinus Torvalds 2631da177e4SLinus Torvalds static unsigned long move_vma(struct vm_area_struct *vma, 2641da177e4SLinus Torvalds unsigned long old_addr, unsigned long old_len, 26572f87654SPavel Emelyanov unsigned long new_len, unsigned long new_addr, 266897ab3e0SMike Rapoport bool *locked, struct vm_userfaultfd_ctx *uf, 267897ab3e0SMike Rapoport struct list_head *uf_unmap) 2681da177e4SLinus Torvalds { 2691da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 2701da177e4SLinus Torvalds struct vm_area_struct *new_vma; 2711da177e4SLinus Torvalds unsigned long vm_flags = vma->vm_flags; 2721da177e4SLinus Torvalds unsigned long new_pgoff; 2731da177e4SLinus Torvalds unsigned long moved_len; 2741da177e4SLinus Torvalds unsigned long excess = 0; 275365e9c87SHugh Dickins unsigned long hiwater_vm; 2761da177e4SLinus Torvalds int split = 0; 2777103ad32SHugh Dickins int err; 27838a76013SMichel Lespinasse bool need_rmap_locks; 2791da177e4SLinus Torvalds 2801da177e4SLinus Torvalds /* 2811da177e4SLinus Torvalds * We'd prefer to avoid failure later on in do_munmap: 2821da177e4SLinus Torvalds * which may split one vma into three before unmapping. 2831da177e4SLinus Torvalds */ 2841da177e4SLinus Torvalds if (mm->map_count >= sysctl_max_map_count - 3) 2851da177e4SLinus Torvalds return -ENOMEM; 2861da177e4SLinus Torvalds 2871ff82995SHugh Dickins /* 2881ff82995SHugh Dickins * Advise KSM to break any KSM pages in the area to be moved: 2891ff82995SHugh Dickins * it would be confusing if they were to turn up at the new 2901ff82995SHugh Dickins * location, where they happen to coincide with different KSM 2911ff82995SHugh Dickins * pages recently unmapped. But leave vma->vm_flags as it was, 2921ff82995SHugh Dickins * so KSM can come around to merge on vma and new_vma afterwards. 2931ff82995SHugh Dickins */ 2947103ad32SHugh Dickins err = ksm_madvise(vma, old_addr, old_addr + old_len, 2957103ad32SHugh Dickins MADV_UNMERGEABLE, &vm_flags); 2967103ad32SHugh Dickins if (err) 2977103ad32SHugh Dickins return err; 2981ff82995SHugh Dickins 2991da177e4SLinus Torvalds new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); 30038a76013SMichel Lespinasse new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, 30138a76013SMichel Lespinasse &need_rmap_locks); 3021da177e4SLinus Torvalds if (!new_vma) 3031da177e4SLinus Torvalds return -ENOMEM; 3041da177e4SLinus Torvalds 30538a76013SMichel Lespinasse moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, 30638a76013SMichel Lespinasse need_rmap_locks); 3071da177e4SLinus Torvalds if (moved_len < old_len) { 308df1eab30SOleg Nesterov err = -ENOMEM; 3095477e70aSOleg Nesterov } else if (vma->vm_ops && vma->vm_ops->mremap) { 3105477e70aSOleg Nesterov err = vma->vm_ops->mremap(new_vma); 311df1eab30SOleg Nesterov } 312df1eab30SOleg Nesterov 313df1eab30SOleg Nesterov if (unlikely(err)) { 3141da177e4SLinus Torvalds /* 3151da177e4SLinus Torvalds * On error, move entries back from new area to old, 3161da177e4SLinus Torvalds * which will succeed since page tables still there, 3171da177e4SLinus Torvalds * and then proceed to unmap new area instead of old. 3181da177e4SLinus Torvalds */ 31938a76013SMichel Lespinasse move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, 32038a76013SMichel Lespinasse true); 3211da177e4SLinus Torvalds vma = new_vma; 3221da177e4SLinus Torvalds old_len = new_len; 3231da177e4SLinus Torvalds old_addr = new_addr; 324df1eab30SOleg Nesterov new_addr = err; 3254abad2caSLaurent Dufour } else { 32672f87654SPavel Emelyanov mremap_userfaultfd_prep(new_vma, uf); 3274abad2caSLaurent Dufour arch_remap(mm, old_addr, old_addr + old_len, 3284abad2caSLaurent Dufour new_addr, new_addr + new_len); 3294abad2caSLaurent Dufour } 3301da177e4SLinus Torvalds 3311da177e4SLinus Torvalds /* Conceal VM_ACCOUNT so old reservation is not undone */ 3321da177e4SLinus Torvalds if (vm_flags & VM_ACCOUNT) { 3331da177e4SLinus Torvalds vma->vm_flags &= ~VM_ACCOUNT; 3341da177e4SLinus Torvalds excess = vma->vm_end - vma->vm_start - old_len; 3351da177e4SLinus Torvalds if (old_addr > vma->vm_start && 3361da177e4SLinus Torvalds old_addr + old_len < vma->vm_end) 3371da177e4SLinus Torvalds split = 1; 3381da177e4SLinus Torvalds } 3391da177e4SLinus Torvalds 34071799062SKirill Korotaev /* 341365e9c87SHugh Dickins * If we failed to move page tables we still do total_vm increment 342365e9c87SHugh Dickins * since do_munmap() will decrement it by old_len == new_len. 343365e9c87SHugh Dickins * 344365e9c87SHugh Dickins * Since total_vm is about to be raised artificially high for a 345365e9c87SHugh Dickins * moment, we need to restore high watermark afterwards: if stats 346365e9c87SHugh Dickins * are taken meanwhile, total_vm and hiwater_vm appear too high. 347365e9c87SHugh Dickins * If this were a serious issue, we'd add a flag to do_munmap(). 34871799062SKirill Korotaev */ 349365e9c87SHugh Dickins hiwater_vm = mm->hiwater_vm; 35084638335SKonstantin Khlebnikov vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); 35171799062SKirill Korotaev 352d9fe4fabSToshi Kani /* Tell pfnmap has moved from this vma */ 353d9fe4fabSToshi Kani if (unlikely(vma->vm_flags & VM_PFNMAP)) 354d9fe4fabSToshi Kani untrack_pfn_moved(vma); 355d9fe4fabSToshi Kani 356897ab3e0SMike Rapoport if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) { 3571da177e4SLinus Torvalds /* OOM: unable to split vma, just get accounts right */ 3581da177e4SLinus Torvalds vm_unacct_memory(excess >> PAGE_SHIFT); 3591da177e4SLinus Torvalds excess = 0; 3601da177e4SLinus Torvalds } 361365e9c87SHugh Dickins mm->hiwater_vm = hiwater_vm; 3621da177e4SLinus Torvalds 3631da177e4SLinus Torvalds /* Restore VM_ACCOUNT if one or two pieces of vma left */ 3641da177e4SLinus Torvalds if (excess) { 3651da177e4SLinus Torvalds vma->vm_flags |= VM_ACCOUNT; 3661da177e4SLinus Torvalds if (split) 3671da177e4SLinus Torvalds vma->vm_next->vm_flags |= VM_ACCOUNT; 3681da177e4SLinus Torvalds } 3691da177e4SLinus Torvalds 3701da177e4SLinus Torvalds if (vm_flags & VM_LOCKED) { 3711da177e4SLinus Torvalds mm->locked_vm += new_len >> PAGE_SHIFT; 37281909b84SMichel Lespinasse *locked = true; 3731da177e4SLinus Torvalds } 3741da177e4SLinus Torvalds 3751da177e4SLinus Torvalds return new_addr; 3761da177e4SLinus Torvalds } 3771da177e4SLinus Torvalds 37854f5de70SAl Viro static struct vm_area_struct *vma_to_resize(unsigned long addr, 37954f5de70SAl Viro unsigned long old_len, unsigned long new_len, unsigned long *p) 38054f5de70SAl Viro { 38154f5de70SAl Viro struct mm_struct *mm = current->mm; 38254f5de70SAl Viro struct vm_area_struct *vma = find_vma(mm, addr); 3831d391686SOleg Nesterov unsigned long pgoff; 38454f5de70SAl Viro 38554f5de70SAl Viro if (!vma || vma->vm_start > addr) 3866cd57613SDerek return ERR_PTR(-EFAULT); 38754f5de70SAl Viro 388dba58d3bSMike Kravetz /* 389dba58d3bSMike Kravetz * !old_len is a special case where an attempt is made to 'duplicate' 390dba58d3bSMike Kravetz * a mapping. This makes no sense for private mappings as it will 391dba58d3bSMike Kravetz * instead create a fresh/new mapping unrelated to the original. This 392dba58d3bSMike Kravetz * is contrary to the basic idea of mremap which creates new mappings 393dba58d3bSMike Kravetz * based on the original. There are no known use cases for this 394dba58d3bSMike Kravetz * behavior. As a result, fail such attempts. 395dba58d3bSMike Kravetz */ 396dba58d3bSMike Kravetz if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { 397dba58d3bSMike Kravetz pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid); 398dba58d3bSMike Kravetz return ERR_PTR(-EINVAL); 399dba58d3bSMike Kravetz } 400dba58d3bSMike Kravetz 40154f5de70SAl Viro if (is_vm_hugetlb_page(vma)) 4026cd57613SDerek return ERR_PTR(-EINVAL); 40354f5de70SAl Viro 40454f5de70SAl Viro /* We can't remap across vm area boundaries */ 40554f5de70SAl Viro if (old_len > vma->vm_end - addr) 4066cd57613SDerek return ERR_PTR(-EFAULT); 40754f5de70SAl Viro 4081d391686SOleg Nesterov if (new_len == old_len) 4091d391686SOleg Nesterov return vma; 410982134baSLinus Torvalds 4111d391686SOleg Nesterov /* Need to be careful about a growing mapping */ 412982134baSLinus Torvalds pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; 413982134baSLinus Torvalds pgoff += vma->vm_pgoff; 414982134baSLinus Torvalds if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) 4156cd57613SDerek return ERR_PTR(-EINVAL); 4161d391686SOleg Nesterov 4171d391686SOleg Nesterov if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) 4181d391686SOleg Nesterov return ERR_PTR(-EFAULT); 41954f5de70SAl Viro 42054f5de70SAl Viro if (vma->vm_flags & VM_LOCKED) { 42154f5de70SAl Viro unsigned long locked, lock_limit; 42254f5de70SAl Viro locked = mm->locked_vm << PAGE_SHIFT; 42359e99e5bSJiri Slaby lock_limit = rlimit(RLIMIT_MEMLOCK); 42454f5de70SAl Viro locked += new_len - old_len; 42554f5de70SAl Viro if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 4266cd57613SDerek return ERR_PTR(-EAGAIN); 42754f5de70SAl Viro } 42854f5de70SAl Viro 42984638335SKonstantin Khlebnikov if (!may_expand_vm(mm, vma->vm_flags, 43084638335SKonstantin Khlebnikov (new_len - old_len) >> PAGE_SHIFT)) 4316cd57613SDerek return ERR_PTR(-ENOMEM); 43254f5de70SAl Viro 43354f5de70SAl Viro if (vma->vm_flags & VM_ACCOUNT) { 43454f5de70SAl Viro unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; 435191c5424SAl Viro if (security_vm_enough_memory_mm(mm, charged)) 4366cd57613SDerek return ERR_PTR(-ENOMEM); 43754f5de70SAl Viro *p = charged; 43854f5de70SAl Viro } 43954f5de70SAl Viro 44054f5de70SAl Viro return vma; 44154f5de70SAl Viro } 44254f5de70SAl Viro 44381909b84SMichel Lespinasse static unsigned long mremap_to(unsigned long addr, unsigned long old_len, 44472f87654SPavel Emelyanov unsigned long new_addr, unsigned long new_len, bool *locked, 445897ab3e0SMike Rapoport struct vm_userfaultfd_ctx *uf, 446b2282371SMike Rapoport struct list_head *uf_unmap_early, 447897ab3e0SMike Rapoport struct list_head *uf_unmap) 448ecc1a899SAl Viro { 449ecc1a899SAl Viro struct mm_struct *mm = current->mm; 450ecc1a899SAl Viro struct vm_area_struct *vma; 451ecc1a899SAl Viro unsigned long ret = -EINVAL; 452ecc1a899SAl Viro unsigned long charged = 0; 453097eed10SAl Viro unsigned long map_flags; 454ecc1a899SAl Viro 455f19cb115SAlexander Kuleshov if (offset_in_page(new_addr)) 456ecc1a899SAl Viro goto out; 457ecc1a899SAl Viro 458ecc1a899SAl Viro if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) 459ecc1a899SAl Viro goto out; 460ecc1a899SAl Viro 4619943242cSOleg Nesterov /* Ensure the old/new locations do not overlap */ 4629943242cSOleg Nesterov if (addr + old_len > new_addr && new_addr + new_len > addr) 463ecc1a899SAl Viro goto out; 464ecc1a899SAl Viro 465b2282371SMike Rapoport ret = do_munmap(mm, new_addr, new_len, uf_unmap_early); 466ecc1a899SAl Viro if (ret) 467ecc1a899SAl Viro goto out; 468ecc1a899SAl Viro 469ecc1a899SAl Viro if (old_len >= new_len) { 470897ab3e0SMike Rapoport ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap); 471ecc1a899SAl Viro if (ret && old_len != new_len) 472ecc1a899SAl Viro goto out; 473ecc1a899SAl Viro old_len = new_len; 474ecc1a899SAl Viro } 475ecc1a899SAl Viro 476ecc1a899SAl Viro vma = vma_to_resize(addr, old_len, new_len, &charged); 477ecc1a899SAl Viro if (IS_ERR(vma)) { 478ecc1a899SAl Viro ret = PTR_ERR(vma); 479ecc1a899SAl Viro goto out; 480ecc1a899SAl Viro } 481ecc1a899SAl Viro 482097eed10SAl Viro map_flags = MAP_FIXED; 483097eed10SAl Viro if (vma->vm_flags & VM_MAYSHARE) 484097eed10SAl Viro map_flags |= MAP_SHARED; 4859206de95SAl Viro 486097eed10SAl Viro ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + 487097eed10SAl Viro ((addr - vma->vm_start) >> PAGE_SHIFT), 488097eed10SAl Viro map_flags); 489f19cb115SAlexander Kuleshov if (offset_in_page(ret)) 490097eed10SAl Viro goto out1; 491097eed10SAl Viro 492897ab3e0SMike Rapoport ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, uf, 493897ab3e0SMike Rapoport uf_unmap); 494f19cb115SAlexander Kuleshov if (!(offset_in_page(ret))) 495097eed10SAl Viro goto out; 496097eed10SAl Viro out1: 497ecc1a899SAl Viro vm_unacct_memory(charged); 498ecc1a899SAl Viro 499ecc1a899SAl Viro out: 500ecc1a899SAl Viro return ret; 501ecc1a899SAl Viro } 502ecc1a899SAl Viro 5031a0ef85fSAl Viro static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) 5041a0ef85fSAl Viro { 505f106af4eSAl Viro unsigned long end = vma->vm_end + delta; 5069206de95SAl Viro if (end < vma->vm_end) /* overflow */ 5071a0ef85fSAl Viro return 0; 5089206de95SAl Viro if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ 509f106af4eSAl Viro return 0; 510f106af4eSAl Viro if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 511f106af4eSAl Viro 0, MAP_FIXED) & ~PAGE_MASK) 512f106af4eSAl Viro return 0; 5131a0ef85fSAl Viro return 1; 5141a0ef85fSAl Viro } 5151a0ef85fSAl Viro 5161da177e4SLinus Torvalds /* 5171da177e4SLinus Torvalds * Expand (or shrink) an existing mapping, potentially moving it at the 5181da177e4SLinus Torvalds * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) 5191da177e4SLinus Torvalds * 5201da177e4SLinus Torvalds * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise 5211da177e4SLinus Torvalds * This option implies MREMAP_MAYMOVE. 5221da177e4SLinus Torvalds */ 52363a81db1SAl Viro SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, 52463a81db1SAl Viro unsigned long, new_len, unsigned long, flags, 52563a81db1SAl Viro unsigned long, new_addr) 5261da177e4SLinus Torvalds { 527d0de32d9SHugh Dickins struct mm_struct *mm = current->mm; 5281da177e4SLinus Torvalds struct vm_area_struct *vma; 5291da177e4SLinus Torvalds unsigned long ret = -EINVAL; 5301da177e4SLinus Torvalds unsigned long charged = 0; 53181909b84SMichel Lespinasse bool locked = false; 53272f87654SPavel Emelyanov struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; 533b2282371SMike Rapoport LIST_HEAD(uf_unmap_early); 534897ab3e0SMike Rapoport LIST_HEAD(uf_unmap); 5351da177e4SLinus Torvalds 5361da177e4SLinus Torvalds if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) 5379a2458a6SRasmus Villemoes return ret; 5389a2458a6SRasmus Villemoes 5399a2458a6SRasmus Villemoes if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE)) 5409a2458a6SRasmus Villemoes return ret; 5411da177e4SLinus Torvalds 542f19cb115SAlexander Kuleshov if (offset_in_page(addr)) 5439a2458a6SRasmus Villemoes return ret; 5441da177e4SLinus Torvalds 5451da177e4SLinus Torvalds old_len = PAGE_ALIGN(old_len); 5461da177e4SLinus Torvalds new_len = PAGE_ALIGN(new_len); 5471da177e4SLinus Torvalds 5481da177e4SLinus Torvalds /* 5491da177e4SLinus Torvalds * We allow a zero old-len as a special case 5501da177e4SLinus Torvalds * for DOS-emu "duplicate shm area" thing. But 5511da177e4SLinus Torvalds * a zero new-len is nonsensical. 5521da177e4SLinus Torvalds */ 5531da177e4SLinus Torvalds if (!new_len) 5549a2458a6SRasmus Villemoes return ret; 5559a2458a6SRasmus Villemoes 556dc0ef0dfSMichal Hocko if (down_write_killable(¤t->mm->mmap_sem)) 557dc0ef0dfSMichal Hocko return -EINTR; 5581da177e4SLinus Torvalds 5591da177e4SLinus Torvalds if (flags & MREMAP_FIXED) { 56081909b84SMichel Lespinasse ret = mremap_to(addr, old_len, new_addr, new_len, 561b2282371SMike Rapoport &locked, &uf, &uf_unmap_early, &uf_unmap); 5621da177e4SLinus Torvalds goto out; 5631da177e4SLinus Torvalds } 5641da177e4SLinus Torvalds 5651da177e4SLinus Torvalds /* 5661da177e4SLinus Torvalds * Always allow a shrinking remap: that just unmaps 5671da177e4SLinus Torvalds * the unnecessary pages.. 5681da177e4SLinus Torvalds * do_munmap does all the needed commit accounting 5691da177e4SLinus Torvalds */ 5701da177e4SLinus Torvalds if (old_len >= new_len) { 571897ab3e0SMike Rapoport ret = do_munmap(mm, addr+new_len, old_len - new_len, &uf_unmap); 5721da177e4SLinus Torvalds if (ret && old_len != new_len) 5731da177e4SLinus Torvalds goto out; 5741da177e4SLinus Torvalds ret = addr; 5751da177e4SLinus Torvalds goto out; 5761da177e4SLinus Torvalds } 5771da177e4SLinus Torvalds 5781da177e4SLinus Torvalds /* 579ecc1a899SAl Viro * Ok, we need to grow.. 5801da177e4SLinus Torvalds */ 58154f5de70SAl Viro vma = vma_to_resize(addr, old_len, new_len, &charged); 58254f5de70SAl Viro if (IS_ERR(vma)) { 58354f5de70SAl Viro ret = PTR_ERR(vma); 5841da177e4SLinus Torvalds goto out; 5851da177e4SLinus Torvalds } 5861da177e4SLinus Torvalds 5871da177e4SLinus Torvalds /* old_len exactly to the end of the area.. 5881da177e4SLinus Torvalds */ 589ecc1a899SAl Viro if (old_len == vma->vm_end - addr) { 5901da177e4SLinus Torvalds /* can we just expand the current mapping? */ 5911a0ef85fSAl Viro if (vma_expandable(vma, new_len - old_len)) { 5921da177e4SLinus Torvalds int pages = (new_len - old_len) >> PAGE_SHIFT; 5931da177e4SLinus Torvalds 5945beb4930SRik van Riel if (vma_adjust(vma, vma->vm_start, addr + new_len, 5955beb4930SRik van Riel vma->vm_pgoff, NULL)) { 5965beb4930SRik van Riel ret = -ENOMEM; 5975beb4930SRik van Riel goto out; 5985beb4930SRik van Riel } 5991da177e4SLinus Torvalds 60084638335SKonstantin Khlebnikov vm_stat_account(mm, vma->vm_flags, pages); 6011da177e4SLinus Torvalds if (vma->vm_flags & VM_LOCKED) { 602d0de32d9SHugh Dickins mm->locked_vm += pages; 60381909b84SMichel Lespinasse locked = true; 60481909b84SMichel Lespinasse new_addr = addr; 6051da177e4SLinus Torvalds } 6061da177e4SLinus Torvalds ret = addr; 6071da177e4SLinus Torvalds goto out; 6081da177e4SLinus Torvalds } 6091da177e4SLinus Torvalds } 6101da177e4SLinus Torvalds 6111da177e4SLinus Torvalds /* 6121da177e4SLinus Torvalds * We weren't able to just expand or shrink the area, 6131da177e4SLinus Torvalds * we need to create a new one and move it.. 6141da177e4SLinus Torvalds */ 6151da177e4SLinus Torvalds ret = -ENOMEM; 6161da177e4SLinus Torvalds if (flags & MREMAP_MAYMOVE) { 6171da177e4SLinus Torvalds unsigned long map_flags = 0; 6181da177e4SLinus Torvalds if (vma->vm_flags & VM_MAYSHARE) 6191da177e4SLinus Torvalds map_flags |= MAP_SHARED; 6201da177e4SLinus Torvalds 6211da177e4SLinus Torvalds new_addr = get_unmapped_area(vma->vm_file, 0, new_len, 62293587414SAl Viro vma->vm_pgoff + 62393587414SAl Viro ((addr - vma->vm_start) >> PAGE_SHIFT), 62493587414SAl Viro map_flags); 625f19cb115SAlexander Kuleshov if (offset_in_page(new_addr)) { 6261da177e4SLinus Torvalds ret = new_addr; 627ed032189SEric Paris goto out; 628ed032189SEric Paris } 629ed032189SEric Paris 63072f87654SPavel Emelyanov ret = move_vma(vma, addr, old_len, new_len, new_addr, 631897ab3e0SMike Rapoport &locked, &uf, &uf_unmap); 6321da177e4SLinus Torvalds } 6331da177e4SLinus Torvalds out: 634f19cb115SAlexander Kuleshov if (offset_in_page(ret)) { 6351da177e4SLinus Torvalds vm_unacct_memory(charged); 636d456fb9eSOleg Nesterov locked = 0; 637d456fb9eSOleg Nesterov } 6381da177e4SLinus Torvalds up_write(¤t->mm->mmap_sem); 63981909b84SMichel Lespinasse if (locked && new_len > old_len) 64081909b84SMichel Lespinasse mm_populate(new_addr + old_len, new_len - old_len); 641b2282371SMike Rapoport userfaultfd_unmap_complete(mm, &uf_unmap_early); 64290794bf1SAndrea Arcangeli mremap_userfaultfd_complete(&uf, addr, new_addr, old_len); 643897ab3e0SMike Rapoport userfaultfd_unmap_complete(mm, &uf_unmap); 6441da177e4SLinus Torvalds return ret; 6451da177e4SLinus Torvalds } 646