1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * mm/mremap.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * (C) Copyright 1996 Linus Torvalds 61da177e4SLinus Torvalds * 7046c6884SAlan Cox * Address space accounting code <alan@lxorguk.ukuu.org.uk> 81da177e4SLinus Torvalds * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/mm.h> 12ca3d76b0SJakub Matěna #include <linux/mm_inline.h> 131da177e4SLinus Torvalds #include <linux/hugetlb.h> 141da177e4SLinus Torvalds #include <linux/shm.h> 151ff82995SHugh Dickins #include <linux/ksm.h> 161da177e4SLinus Torvalds #include <linux/mman.h> 171da177e4SLinus Torvalds #include <linux/swap.h> 18c59ede7bSRandy.Dunlap #include <linux/capability.h> 191da177e4SLinus Torvalds #include <linux/fs.h> 206dec97dcSCyrill Gorcunov #include <linux/swapops.h> 211da177e4SLinus Torvalds #include <linux/highmem.h> 221da177e4SLinus Torvalds #include <linux/security.h> 231da177e4SLinus Torvalds #include <linux/syscalls.h> 24cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 252581d202SPaul McQuade #include <linux/uaccess.h> 2672f87654SPavel Emelyanov #include <linux/userfaultfd_k.h> 27ca3d76b0SJakub Matěna #include <linux/mempolicy.h> 281da177e4SLinus Torvalds 291da177e4SLinus Torvalds #include <asm/cacheflush.h> 303bbda69cSAneesh Kumar K.V #include <asm/tlb.h> 310881ace2SAneesh Kumar K.V #include <asm/pgalloc.h> 321da177e4SLinus Torvalds 33ba470de4SRik van Riel #include "internal.h" 34ba470de4SRik van Riel 35c49dd340SKalesh Singh static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr) 361da177e4SLinus Torvalds { 371da177e4SLinus Torvalds pgd_t *pgd; 38c2febafcSKirill A. Shutemov p4d_t *p4d; 391da177e4SLinus Torvalds pud_t *pud; 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 421da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 431da177e4SLinus Torvalds return NULL; 441da177e4SLinus Torvalds 45c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 46c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 47c2febafcSKirill A. Shutemov return NULL; 48c2febafcSKirill A. Shutemov 49c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 501da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 511da177e4SLinus Torvalds return NULL; 521da177e4SLinus Torvalds 53c49dd340SKalesh Singh return pud; 54c49dd340SKalesh Singh } 55c49dd340SKalesh Singh 56c49dd340SKalesh Singh static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) 57c49dd340SKalesh Singh { 58c49dd340SKalesh Singh pud_t *pud; 59c49dd340SKalesh Singh pmd_t *pmd; 60c49dd340SKalesh Singh 61c49dd340SKalesh Singh pud = get_old_pud(mm, addr); 62c49dd340SKalesh Singh if (!pud) 63c49dd340SKalesh Singh return NULL; 64c49dd340SKalesh Singh 651da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 6637a1c49aSAndrea Arcangeli if (pmd_none(*pmd)) 671da177e4SLinus Torvalds return NULL; 681da177e4SLinus Torvalds 697be7a546SHugh Dickins return pmd; 701da177e4SLinus Torvalds } 711da177e4SLinus Torvalds 72c49dd340SKalesh Singh static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, 738ac1f832SAndrea Arcangeli unsigned long addr) 741da177e4SLinus Torvalds { 751da177e4SLinus Torvalds pgd_t *pgd; 76c2febafcSKirill A. Shutemov p4d_t *p4d; 771da177e4SLinus Torvalds 781da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 79c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, addr); 80c2febafcSKirill A. Shutemov if (!p4d) 81c2febafcSKirill A. Shutemov return NULL; 82c49dd340SKalesh Singh 83c49dd340SKalesh Singh return pud_alloc(mm, p4d, addr); 84c49dd340SKalesh Singh } 85c49dd340SKalesh Singh 86c49dd340SKalesh Singh static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, 87c49dd340SKalesh Singh unsigned long addr) 88c49dd340SKalesh Singh { 89c49dd340SKalesh Singh pud_t *pud; 90c49dd340SKalesh Singh pmd_t *pmd; 91c49dd340SKalesh Singh 92c49dd340SKalesh Singh pud = alloc_new_pud(mm, vma, addr); 931da177e4SLinus Torvalds if (!pud) 94c74df32cSHugh Dickins return NULL; 957be7a546SHugh Dickins 961da177e4SLinus Torvalds pmd = pmd_alloc(mm, pud, addr); 9757a8f0cdSHugh Dickins if (!pmd) 98c74df32cSHugh Dickins return NULL; 997be7a546SHugh Dickins 1008ac1f832SAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*pmd)); 101c74df32cSHugh Dickins 1027be7a546SHugh Dickins return pmd; 1031da177e4SLinus Torvalds } 1041da177e4SLinus Torvalds 1051d069b7dSHugh Dickins static void take_rmap_locks(struct vm_area_struct *vma) 1061d069b7dSHugh Dickins { 1071d069b7dSHugh Dickins if (vma->vm_file) 1081d069b7dSHugh Dickins i_mmap_lock_write(vma->vm_file->f_mapping); 1091d069b7dSHugh Dickins if (vma->anon_vma) 1101d069b7dSHugh Dickins anon_vma_lock_write(vma->anon_vma); 1111d069b7dSHugh Dickins } 1121d069b7dSHugh Dickins 1131d069b7dSHugh Dickins static void drop_rmap_locks(struct vm_area_struct *vma) 1141d069b7dSHugh Dickins { 1151d069b7dSHugh Dickins if (vma->anon_vma) 1161d069b7dSHugh Dickins anon_vma_unlock_write(vma->anon_vma); 1171d069b7dSHugh Dickins if (vma->vm_file) 1181d069b7dSHugh Dickins i_mmap_unlock_write(vma->vm_file->f_mapping); 1191d069b7dSHugh Dickins } 1201d069b7dSHugh Dickins 1216dec97dcSCyrill Gorcunov static pte_t move_soft_dirty_pte(pte_t pte) 1226dec97dcSCyrill Gorcunov { 1236dec97dcSCyrill Gorcunov /* 1246dec97dcSCyrill Gorcunov * Set soft dirty bit so we can notice 1256dec97dcSCyrill Gorcunov * in userspace the ptes were moved. 1266dec97dcSCyrill Gorcunov */ 1276dec97dcSCyrill Gorcunov #ifdef CONFIG_MEM_SOFT_DIRTY 1286dec97dcSCyrill Gorcunov if (pte_present(pte)) 1296dec97dcSCyrill Gorcunov pte = pte_mksoft_dirty(pte); 1306dec97dcSCyrill Gorcunov else if (is_swap_pte(pte)) 1316dec97dcSCyrill Gorcunov pte = pte_swp_mksoft_dirty(pte); 1326dec97dcSCyrill Gorcunov #endif 1336dec97dcSCyrill Gorcunov return pte; 1346dec97dcSCyrill Gorcunov } 1356dec97dcSCyrill Gorcunov 1367be7a546SHugh Dickins static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, 1377be7a546SHugh Dickins unsigned long old_addr, unsigned long old_end, 1387be7a546SHugh Dickins struct vm_area_struct *new_vma, pmd_t *new_pmd, 139eb66ae03SLinus Torvalds unsigned long new_addr, bool need_rmap_locks) 1401da177e4SLinus Torvalds { 1411da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 1427be7a546SHugh Dickins pte_t *old_pte, *new_pte, pte; 1434c21e2f2SHugh Dickins spinlock_t *old_ptl, *new_ptl; 1445d190420SAaron Lu bool force_flush = false; 1455d190420SAaron Lu unsigned long len = old_end - old_addr; 1461da177e4SLinus Torvalds 1471da177e4SLinus Torvalds /* 148c8c06efaSDavidlohr Bueso * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma 14938a76013SMichel Lespinasse * locks to ensure that rmap will always observe either the old or the 15038a76013SMichel Lespinasse * new ptes. This is the easiest way to avoid races with 15138a76013SMichel Lespinasse * truncate_pagecache(), page migration, etc... 15238a76013SMichel Lespinasse * 15338a76013SMichel Lespinasse * When need_rmap_locks is false, we use other ways to avoid 15438a76013SMichel Lespinasse * such races: 15538a76013SMichel Lespinasse * 15638a76013SMichel Lespinasse * - During exec() shift_arg_pages(), we use a specially tagged vma 157222100eeSAnshuman Khandual * which rmap call sites look for using vma_is_temporary_stack(). 15838a76013SMichel Lespinasse * 15938a76013SMichel Lespinasse * - During mremap(), new_vma is often known to be placed after vma 16038a76013SMichel Lespinasse * in rmap traversal order. This ensures rmap will always observe 16138a76013SMichel Lespinasse * either the old pte, or the new pte, or both (the page table locks 16238a76013SMichel Lespinasse * serialize access to individual ptes, but only rmap traversal 16338a76013SMichel Lespinasse * order guarantees that we won't miss both the old and new ptes). 1641da177e4SLinus Torvalds */ 1651d069b7dSHugh Dickins if (need_rmap_locks) 1661d069b7dSHugh Dickins take_rmap_locks(vma); 1671da177e4SLinus Torvalds 1684c21e2f2SHugh Dickins /* 1694c21e2f2SHugh Dickins * We don't have to worry about the ordering of src and dst 170c1e8d7c6SMichel Lespinasse * pte locks because exclusive mmap_lock prevents deadlock. 1714c21e2f2SHugh Dickins */ 172c74df32cSHugh Dickins old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); 173ece0e2b6SPeter Zijlstra new_pte = pte_offset_map(new_pmd, new_addr); 1744c21e2f2SHugh Dickins new_ptl = pte_lockptr(mm, new_pmd); 1754c21e2f2SHugh Dickins if (new_ptl != old_ptl) 176f20dc5f7SIngo Molnar spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 1773ea27719SMel Gorman flush_tlb_batched_pending(vma->vm_mm); 1786606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 1798b1f3124SNick Piggin 1807be7a546SHugh Dickins for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, 1817be7a546SHugh Dickins new_pte++, new_addr += PAGE_SIZE) { 1827be7a546SHugh Dickins if (pte_none(*old_pte)) 1837be7a546SHugh Dickins continue; 1845d190420SAaron Lu 1857b6efc2bSAndrea Arcangeli pte = ptep_get_and_clear(mm, old_addr, old_pte); 186a2ce2666SAaron Lu /* 187eb66ae03SLinus Torvalds * If we are remapping a valid PTE, make sure 188a2ce2666SAaron Lu * to flush TLB before we drop the PTL for the 189eb66ae03SLinus Torvalds * PTE. 190a2ce2666SAaron Lu * 191eb66ae03SLinus Torvalds * NOTE! Both old and new PTL matter: the old one 192eb66ae03SLinus Torvalds * for racing with page_mkclean(), the new one to 193eb66ae03SLinus Torvalds * make sure the physical page stays valid until 194eb66ae03SLinus Torvalds * the TLB entry for the old mapping has been 195eb66ae03SLinus Torvalds * flushed. 196a2ce2666SAaron Lu */ 197eb66ae03SLinus Torvalds if (pte_present(pte)) 198a2ce2666SAaron Lu force_flush = true; 1997be7a546SHugh Dickins pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 2006dec97dcSCyrill Gorcunov pte = move_soft_dirty_pte(pte); 2016dec97dcSCyrill Gorcunov set_pte_at(mm, new_addr, new_pte, pte); 2021da177e4SLinus Torvalds } 2037be7a546SHugh Dickins 2046606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 205eb66ae03SLinus Torvalds if (force_flush) 206eb66ae03SLinus Torvalds flush_tlb_range(vma, old_end - len, old_end); 2074c21e2f2SHugh Dickins if (new_ptl != old_ptl) 2084c21e2f2SHugh Dickins spin_unlock(new_ptl); 209ece0e2b6SPeter Zijlstra pte_unmap(new_pte - 1); 210c74df32cSHugh Dickins pte_unmap_unlock(old_pte - 1, old_ptl); 2111d069b7dSHugh Dickins if (need_rmap_locks) 2121d069b7dSHugh Dickins drop_rmap_locks(vma); 2131da177e4SLinus Torvalds } 2141da177e4SLinus Torvalds 2153bbda69cSAneesh Kumar K.V #ifndef arch_supports_page_table_move 2163bbda69cSAneesh Kumar K.V #define arch_supports_page_table_move arch_supports_page_table_move 2173bbda69cSAneesh Kumar K.V static inline bool arch_supports_page_table_move(void) 2183bbda69cSAneesh Kumar K.V { 2193bbda69cSAneesh Kumar K.V return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) || 2203bbda69cSAneesh Kumar K.V IS_ENABLED(CONFIG_HAVE_MOVE_PUD); 2213bbda69cSAneesh Kumar K.V } 2223bbda69cSAneesh Kumar K.V #endif 2233bbda69cSAneesh Kumar K.V 2242c91bd4aSJoel Fernandes (Google) #ifdef CONFIG_HAVE_MOVE_PMD 2252c91bd4aSJoel Fernandes (Google) static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, 226b8aa9d9dSWei Yang unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) 2272c91bd4aSJoel Fernandes (Google) { 2282c91bd4aSJoel Fernandes (Google) spinlock_t *old_ptl, *new_ptl; 2292c91bd4aSJoel Fernandes (Google) struct mm_struct *mm = vma->vm_mm; 2302c91bd4aSJoel Fernandes (Google) pmd_t pmd; 2312c91bd4aSJoel Fernandes (Google) 2323bbda69cSAneesh Kumar K.V if (!arch_supports_page_table_move()) 2333bbda69cSAneesh Kumar K.V return false; 2342c91bd4aSJoel Fernandes (Google) /* 2352c91bd4aSJoel Fernandes (Google) * The destination pmd shouldn't be established, free_pgtables() 236f81fdd0cSLinus Torvalds * should have released it. 237f81fdd0cSLinus Torvalds * 238f81fdd0cSLinus Torvalds * However, there's a case during execve() where we use mremap 239f81fdd0cSLinus Torvalds * to move the initial stack, and in that case the target area 240f81fdd0cSLinus Torvalds * may overlap the source area (always moving down). 241f81fdd0cSLinus Torvalds * 242f81fdd0cSLinus Torvalds * If everything is PMD-aligned, that works fine, as moving 243f81fdd0cSLinus Torvalds * each pmd down will clear the source pmd. But if we first 244f81fdd0cSLinus Torvalds * have a few 4kB-only pages that get moved down, and then 245f81fdd0cSLinus Torvalds * hit the "now the rest is PMD-aligned, let's do everything 246f81fdd0cSLinus Torvalds * one pmd at a time", we will still have the old (now empty 247f81fdd0cSLinus Torvalds * of any 4kB pages, but still there) PMD in the page table 248f81fdd0cSLinus Torvalds * tree. 249f81fdd0cSLinus Torvalds * 250f81fdd0cSLinus Torvalds * Warn on it once - because we really should try to figure 251f81fdd0cSLinus Torvalds * out how to do this better - but then say "I won't move 252f81fdd0cSLinus Torvalds * this pmd". 253f81fdd0cSLinus Torvalds * 254f81fdd0cSLinus Torvalds * One alternative might be to just unmap the target pmd at 255f81fdd0cSLinus Torvalds * this point, and verify that it really is empty. We'll see. 2562c91bd4aSJoel Fernandes (Google) */ 257f81fdd0cSLinus Torvalds if (WARN_ON_ONCE(!pmd_none(*new_pmd))) 2582c91bd4aSJoel Fernandes (Google) return false; 2592c91bd4aSJoel Fernandes (Google) 2602c91bd4aSJoel Fernandes (Google) /* 2612c91bd4aSJoel Fernandes (Google) * We don't have to worry about the ordering of src and dst 262c1e8d7c6SMichel Lespinasse * ptlocks because exclusive mmap_lock prevents deadlock. 2632c91bd4aSJoel Fernandes (Google) */ 2642c91bd4aSJoel Fernandes (Google) old_ptl = pmd_lock(vma->vm_mm, old_pmd); 2652c91bd4aSJoel Fernandes (Google) new_ptl = pmd_lockptr(mm, new_pmd); 2662c91bd4aSJoel Fernandes (Google) if (new_ptl != old_ptl) 2672c91bd4aSJoel Fernandes (Google) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 2682c91bd4aSJoel Fernandes (Google) 2692c91bd4aSJoel Fernandes (Google) /* Clear the pmd */ 2702c91bd4aSJoel Fernandes (Google) pmd = *old_pmd; 2712c91bd4aSJoel Fernandes (Google) pmd_clear(old_pmd); 2722c91bd4aSJoel Fernandes (Google) 2732c91bd4aSJoel Fernandes (Google) VM_BUG_ON(!pmd_none(*new_pmd)); 2742c91bd4aSJoel Fernandes (Google) 2750881ace2SAneesh Kumar K.V pmd_populate(mm, new_pmd, pmd_pgtable(pmd)); 2762c91bd4aSJoel Fernandes (Google) flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 2772c91bd4aSJoel Fernandes (Google) if (new_ptl != old_ptl) 2782c91bd4aSJoel Fernandes (Google) spin_unlock(new_ptl); 2792c91bd4aSJoel Fernandes (Google) spin_unlock(old_ptl); 2802c91bd4aSJoel Fernandes (Google) 2812c91bd4aSJoel Fernandes (Google) return true; 2822c91bd4aSJoel Fernandes (Google) } 283c49dd340SKalesh Singh #else 284c49dd340SKalesh Singh static inline bool move_normal_pmd(struct vm_area_struct *vma, 285c49dd340SKalesh Singh unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, 286c49dd340SKalesh Singh pmd_t *new_pmd) 287c49dd340SKalesh Singh { 288c49dd340SKalesh Singh return false; 289c49dd340SKalesh Singh } 2902c91bd4aSJoel Fernandes (Google) #endif 2912c91bd4aSJoel Fernandes (Google) 292d6655dffSAneesh Kumar K.V #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD) 293c49dd340SKalesh Singh static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, 294c49dd340SKalesh Singh unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) 295c49dd340SKalesh Singh { 296c49dd340SKalesh Singh spinlock_t *old_ptl, *new_ptl; 297c49dd340SKalesh Singh struct mm_struct *mm = vma->vm_mm; 298c49dd340SKalesh Singh pud_t pud; 299c49dd340SKalesh Singh 3003bbda69cSAneesh Kumar K.V if (!arch_supports_page_table_move()) 3013bbda69cSAneesh Kumar K.V return false; 302c49dd340SKalesh Singh /* 303c49dd340SKalesh Singh * The destination pud shouldn't be established, free_pgtables() 304c49dd340SKalesh Singh * should have released it. 305c49dd340SKalesh Singh */ 306c49dd340SKalesh Singh if (WARN_ON_ONCE(!pud_none(*new_pud))) 307c49dd340SKalesh Singh return false; 308c49dd340SKalesh Singh 309c49dd340SKalesh Singh /* 310c49dd340SKalesh Singh * We don't have to worry about the ordering of src and dst 311c49dd340SKalesh Singh * ptlocks because exclusive mmap_lock prevents deadlock. 312c49dd340SKalesh Singh */ 313c49dd340SKalesh Singh old_ptl = pud_lock(vma->vm_mm, old_pud); 314c49dd340SKalesh Singh new_ptl = pud_lockptr(mm, new_pud); 315c49dd340SKalesh Singh if (new_ptl != old_ptl) 316c49dd340SKalesh Singh spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 317c49dd340SKalesh Singh 318c49dd340SKalesh Singh /* Clear the pud */ 319c49dd340SKalesh Singh pud = *old_pud; 320c49dd340SKalesh Singh pud_clear(old_pud); 321c49dd340SKalesh Singh 322c49dd340SKalesh Singh VM_BUG_ON(!pud_none(*new_pud)); 323c49dd340SKalesh Singh 3240881ace2SAneesh Kumar K.V pud_populate(mm, new_pud, pud_pgtable(pud)); 325c49dd340SKalesh Singh flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE); 326c49dd340SKalesh Singh if (new_ptl != old_ptl) 327c49dd340SKalesh Singh spin_unlock(new_ptl); 328c49dd340SKalesh Singh spin_unlock(old_ptl); 329c49dd340SKalesh Singh 330c49dd340SKalesh Singh return true; 331c49dd340SKalesh Singh } 332c49dd340SKalesh Singh #else 333c49dd340SKalesh Singh static inline bool move_normal_pud(struct vm_area_struct *vma, 334c49dd340SKalesh Singh unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, 335c49dd340SKalesh Singh pud_t *new_pud) 336c49dd340SKalesh Singh { 337c49dd340SKalesh Singh return false; 338c49dd340SKalesh Singh } 339c49dd340SKalesh Singh #endif 340c49dd340SKalesh Singh 3417d846db7SAneesh Kumar K.V #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 3427d846db7SAneesh Kumar K.V static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, 3437d846db7SAneesh Kumar K.V unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) 3447d846db7SAneesh Kumar K.V { 3457d846db7SAneesh Kumar K.V spinlock_t *old_ptl, *new_ptl; 3467d846db7SAneesh Kumar K.V struct mm_struct *mm = vma->vm_mm; 3477d846db7SAneesh Kumar K.V pud_t pud; 3487d846db7SAneesh Kumar K.V 3497d846db7SAneesh Kumar K.V /* 3507d846db7SAneesh Kumar K.V * The destination pud shouldn't be established, free_pgtables() 3517d846db7SAneesh Kumar K.V * should have released it. 3527d846db7SAneesh Kumar K.V */ 3537d846db7SAneesh Kumar K.V if (WARN_ON_ONCE(!pud_none(*new_pud))) 3547d846db7SAneesh Kumar K.V return false; 3557d846db7SAneesh Kumar K.V 3567d846db7SAneesh Kumar K.V /* 3577d846db7SAneesh Kumar K.V * We don't have to worry about the ordering of src and dst 3587d846db7SAneesh Kumar K.V * ptlocks because exclusive mmap_lock prevents deadlock. 3597d846db7SAneesh Kumar K.V */ 3607d846db7SAneesh Kumar K.V old_ptl = pud_lock(vma->vm_mm, old_pud); 3617d846db7SAneesh Kumar K.V new_ptl = pud_lockptr(mm, new_pud); 3627d846db7SAneesh Kumar K.V if (new_ptl != old_ptl) 3637d846db7SAneesh Kumar K.V spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 3647d846db7SAneesh Kumar K.V 3657d846db7SAneesh Kumar K.V /* Clear the pud */ 3667d846db7SAneesh Kumar K.V pud = *old_pud; 3677d846db7SAneesh Kumar K.V pud_clear(old_pud); 3687d846db7SAneesh Kumar K.V 3697d846db7SAneesh Kumar K.V VM_BUG_ON(!pud_none(*new_pud)); 3707d846db7SAneesh Kumar K.V 3717d846db7SAneesh Kumar K.V /* Set the new pud */ 3727d846db7SAneesh Kumar K.V /* mark soft_ditry when we add pud level soft dirty support */ 3737d846db7SAneesh Kumar K.V set_pud_at(mm, new_addr, new_pud, pud); 3747d846db7SAneesh Kumar K.V flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE); 3757d846db7SAneesh Kumar K.V if (new_ptl != old_ptl) 3767d846db7SAneesh Kumar K.V spin_unlock(new_ptl); 3777d846db7SAneesh Kumar K.V spin_unlock(old_ptl); 3787d846db7SAneesh Kumar K.V 3797d846db7SAneesh Kumar K.V return true; 3807d846db7SAneesh Kumar K.V } 3817d846db7SAneesh Kumar K.V #else 3827d846db7SAneesh Kumar K.V static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, 3837d846db7SAneesh Kumar K.V unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) 3847d846db7SAneesh Kumar K.V { 3857d846db7SAneesh Kumar K.V WARN_ON_ONCE(1); 3867d846db7SAneesh Kumar K.V return false; 3877d846db7SAneesh Kumar K.V 3887d846db7SAneesh Kumar K.V } 3897d846db7SAneesh Kumar K.V #endif 3907d846db7SAneesh Kumar K.V 391c49dd340SKalesh Singh enum pgt_entry { 392c49dd340SKalesh Singh NORMAL_PMD, 393c49dd340SKalesh Singh HPAGE_PMD, 394c49dd340SKalesh Singh NORMAL_PUD, 3957d846db7SAneesh Kumar K.V HPAGE_PUD, 396c49dd340SKalesh Singh }; 397c49dd340SKalesh Singh 398c49dd340SKalesh Singh /* 399c49dd340SKalesh Singh * Returns an extent of the corresponding size for the pgt_entry specified if 400c49dd340SKalesh Singh * valid. Else returns a smaller extent bounded by the end of the source and 401c49dd340SKalesh Singh * destination pgt_entry. 402c49dd340SKalesh Singh */ 403a30a2909SArnd Bergmann static __always_inline unsigned long get_extent(enum pgt_entry entry, 404a30a2909SArnd Bergmann unsigned long old_addr, unsigned long old_end, 405a30a2909SArnd Bergmann unsigned long new_addr) 406c49dd340SKalesh Singh { 407c49dd340SKalesh Singh unsigned long next, extent, mask, size; 408c49dd340SKalesh Singh 409c49dd340SKalesh Singh switch (entry) { 410c49dd340SKalesh Singh case HPAGE_PMD: 411c49dd340SKalesh Singh case NORMAL_PMD: 412c49dd340SKalesh Singh mask = PMD_MASK; 413c49dd340SKalesh Singh size = PMD_SIZE; 414c49dd340SKalesh Singh break; 4157d846db7SAneesh Kumar K.V case HPAGE_PUD: 416c49dd340SKalesh Singh case NORMAL_PUD: 417c49dd340SKalesh Singh mask = PUD_MASK; 418c49dd340SKalesh Singh size = PUD_SIZE; 419c49dd340SKalesh Singh break; 420c49dd340SKalesh Singh default: 421c49dd340SKalesh Singh BUILD_BUG(); 422c49dd340SKalesh Singh break; 423c49dd340SKalesh Singh } 424c49dd340SKalesh Singh 425c49dd340SKalesh Singh next = (old_addr + size) & mask; 426c49dd340SKalesh Singh /* even if next overflowed, extent below will be ok */ 427e05986eeSKalesh Singh extent = next - old_addr; 428e05986eeSKalesh Singh if (extent > old_end - old_addr) 429e05986eeSKalesh Singh extent = old_end - old_addr; 430c49dd340SKalesh Singh next = (new_addr + size) & mask; 431c49dd340SKalesh Singh if (extent > next - new_addr) 432c49dd340SKalesh Singh extent = next - new_addr; 433c49dd340SKalesh Singh return extent; 434c49dd340SKalesh Singh } 435c49dd340SKalesh Singh 436c49dd340SKalesh Singh /* 437c49dd340SKalesh Singh * Attempts to speedup the move by moving entry at the level corresponding to 438c49dd340SKalesh Singh * pgt_entry. Returns true if the move was successful, else false. 439c49dd340SKalesh Singh */ 440c49dd340SKalesh Singh static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, 441c49dd340SKalesh Singh unsigned long old_addr, unsigned long new_addr, 442c49dd340SKalesh Singh void *old_entry, void *new_entry, bool need_rmap_locks) 443c49dd340SKalesh Singh { 444c49dd340SKalesh Singh bool moved = false; 445c49dd340SKalesh Singh 446c49dd340SKalesh Singh /* See comment in move_ptes() */ 447c49dd340SKalesh Singh if (need_rmap_locks) 448c49dd340SKalesh Singh take_rmap_locks(vma); 449c49dd340SKalesh Singh 450c49dd340SKalesh Singh switch (entry) { 451c49dd340SKalesh Singh case NORMAL_PMD: 452c49dd340SKalesh Singh moved = move_normal_pmd(vma, old_addr, new_addr, old_entry, 453c49dd340SKalesh Singh new_entry); 454c49dd340SKalesh Singh break; 455c49dd340SKalesh Singh case NORMAL_PUD: 456c49dd340SKalesh Singh moved = move_normal_pud(vma, old_addr, new_addr, old_entry, 457c49dd340SKalesh Singh new_entry); 458c49dd340SKalesh Singh break; 459c49dd340SKalesh Singh case HPAGE_PMD: 460c49dd340SKalesh Singh moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 461c49dd340SKalesh Singh move_huge_pmd(vma, old_addr, new_addr, old_entry, 462c49dd340SKalesh Singh new_entry); 463c49dd340SKalesh Singh break; 4647d846db7SAneesh Kumar K.V case HPAGE_PUD: 4657d846db7SAneesh Kumar K.V moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 4667d846db7SAneesh Kumar K.V move_huge_pud(vma, old_addr, new_addr, old_entry, 4677d846db7SAneesh Kumar K.V new_entry); 4687d846db7SAneesh Kumar K.V break; 4697d846db7SAneesh Kumar K.V 470c49dd340SKalesh Singh default: 471c49dd340SKalesh Singh WARN_ON_ONCE(1); 472c49dd340SKalesh Singh break; 473c49dd340SKalesh Singh } 474c49dd340SKalesh Singh 475c49dd340SKalesh Singh if (need_rmap_locks) 476c49dd340SKalesh Singh drop_rmap_locks(vma); 477c49dd340SKalesh Singh 478c49dd340SKalesh Singh return moved; 479c49dd340SKalesh Singh } 480c49dd340SKalesh Singh 481b6a2fea3SOllie Wild unsigned long move_page_tables(struct vm_area_struct *vma, 4821da177e4SLinus Torvalds unsigned long old_addr, struct vm_area_struct *new_vma, 48338a76013SMichel Lespinasse unsigned long new_addr, unsigned long len, 48438a76013SMichel Lespinasse bool need_rmap_locks) 4851da177e4SLinus Torvalds { 486c49dd340SKalesh Singh unsigned long extent, old_end; 487ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 4887be7a546SHugh Dickins pmd_t *old_pmd, *new_pmd; 4897d846db7SAneesh Kumar K.V pud_t *old_pud, *new_pud; 4901da177e4SLinus Torvalds 49101e67e04SPaolo Bonzini if (!len) 49201e67e04SPaolo Bonzini return 0; 49301e67e04SPaolo Bonzini 4947be7a546SHugh Dickins old_end = old_addr + len; 4951da177e4SLinus Torvalds 496550a7d60SMina Almasry if (is_vm_hugetlb_page(vma)) 497550a7d60SMina Almasry return move_hugetlb_page_tables(vma, new_vma, old_addr, 498550a7d60SMina Almasry new_addr, len); 499550a7d60SMina Almasry 5003d0b95cdSBaolin Wang flush_cache_range(vma, old_addr, old_end); 5017d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, 5026f4f13e8SJérôme Glisse old_addr, old_end); 503ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 5047b6efc2bSAndrea Arcangeli 5057be7a546SHugh Dickins for (; old_addr < old_end; old_addr += extent, new_addr += extent) { 5061da177e4SLinus Torvalds cond_resched(); 507c49dd340SKalesh Singh /* 508c49dd340SKalesh Singh * If extent is PUD-sized try to speed up the move by moving at the 509c49dd340SKalesh Singh * PUD level if possible. 510c49dd340SKalesh Singh */ 511c49dd340SKalesh Singh extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr); 512c49dd340SKalesh Singh 513c49dd340SKalesh Singh old_pud = get_old_pud(vma->vm_mm, old_addr); 514c49dd340SKalesh Singh if (!old_pud) 515c49dd340SKalesh Singh continue; 516c49dd340SKalesh Singh new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); 517c49dd340SKalesh Singh if (!new_pud) 518c49dd340SKalesh Singh break; 5197d846db7SAneesh Kumar K.V if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) { 5207d846db7SAneesh Kumar K.V if (extent == HPAGE_PUD_SIZE) { 5217d846db7SAneesh Kumar K.V move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr, 5227d846db7SAneesh Kumar K.V old_pud, new_pud, need_rmap_locks); 5237d846db7SAneesh Kumar K.V /* We ignore and continue on error? */ 5247d846db7SAneesh Kumar K.V continue; 5257d846db7SAneesh Kumar K.V } 5267d846db7SAneesh Kumar K.V } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) { 5277d846db7SAneesh Kumar K.V 528c49dd340SKalesh Singh if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr, 52997113eb3SAneesh Kumar K.V old_pud, new_pud, true)) 530c49dd340SKalesh Singh continue; 531c49dd340SKalesh Singh } 532c49dd340SKalesh Singh 533c49dd340SKalesh Singh extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr); 5347be7a546SHugh Dickins old_pmd = get_old_pmd(vma->vm_mm, old_addr); 5357be7a546SHugh Dickins if (!old_pmd) 5367be7a546SHugh Dickins continue; 5378ac1f832SAndrea Arcangeli new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); 5387be7a546SHugh Dickins if (!new_pmd) 5397be7a546SHugh Dickins break; 540c49dd340SKalesh Singh if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || 541c49dd340SKalesh Singh pmd_devmap(*old_pmd)) { 542c49dd340SKalesh Singh if (extent == HPAGE_PMD_SIZE && 543c49dd340SKalesh Singh move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr, 544c49dd340SKalesh Singh old_pmd, new_pmd, need_rmap_locks)) 54537a1c49aSAndrea Arcangeli continue; 5464b471e88SKirill A. Shutemov split_huge_pmd(vma, old_pmd, old_addr); 547337d9abfSNaoya Horiguchi if (pmd_trans_unstable(old_pmd)) 5486b9116a6SKirill A. Shutemov continue; 549c49dd340SKalesh Singh } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) && 550c49dd340SKalesh Singh extent == PMD_SIZE) { 5512c91bd4aSJoel Fernandes (Google) /* 5522c91bd4aSJoel Fernandes (Google) * If the extent is PMD-sized, try to speed the move by 5532c91bd4aSJoel Fernandes (Google) * moving at the PMD level if possible. 5542c91bd4aSJoel Fernandes (Google) */ 555c49dd340SKalesh Singh if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr, 55697113eb3SAneesh Kumar K.V old_pmd, new_pmd, true)) 5572c91bd4aSJoel Fernandes (Google) continue; 55837a1c49aSAndrea Arcangeli } 5592c91bd4aSJoel Fernandes (Google) 5604cf58924SJoel Fernandes (Google) if (pte_alloc(new_vma->vm_mm, new_pmd)) 56137a1c49aSAndrea Arcangeli break; 5625d190420SAaron Lu move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, 563eb66ae03SLinus Torvalds new_pmd, new_addr, need_rmap_locks); 5641da177e4SLinus Torvalds } 5657b6efc2bSAndrea Arcangeli 566ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 5677be7a546SHugh Dickins 5687be7a546SHugh Dickins return len + old_addr - old_end; /* how much done */ 5691da177e4SLinus Torvalds } 5701da177e4SLinus Torvalds 5711da177e4SLinus Torvalds static unsigned long move_vma(struct vm_area_struct *vma, 5721da177e4SLinus Torvalds unsigned long old_addr, unsigned long old_len, 57372f87654SPavel Emelyanov unsigned long new_len, unsigned long new_addr, 574e346b381SBrian Geffon bool *locked, unsigned long flags, 575e346b381SBrian Geffon struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) 5761da177e4SLinus Torvalds { 577fdbef614SDmitry Safonov long to_account = new_len - old_len; 5781da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 5791da177e4SLinus Torvalds struct vm_area_struct *new_vma; 5801da177e4SLinus Torvalds unsigned long vm_flags = vma->vm_flags; 5811da177e4SLinus Torvalds unsigned long new_pgoff; 5821da177e4SLinus Torvalds unsigned long moved_len; 5836b73cff2SLiam R. Howlett unsigned long account_start = 0; 5846b73cff2SLiam R. Howlett unsigned long account_end = 0; 585365e9c87SHugh Dickins unsigned long hiwater_vm; 58673d5e062SDmitry Safonov int err = 0; 58738a76013SMichel Lespinasse bool need_rmap_locks; 5886b73cff2SLiam R. Howlett struct vma_iterator vmi; 5891da177e4SLinus Torvalds 5901da177e4SLinus Torvalds /* 5911da177e4SLinus Torvalds * We'd prefer to avoid failure later on in do_munmap: 5921da177e4SLinus Torvalds * which may split one vma into three before unmapping. 5931da177e4SLinus Torvalds */ 5941da177e4SLinus Torvalds if (mm->map_count >= sysctl_max_map_count - 3) 5951da177e4SLinus Torvalds return -ENOMEM; 5961da177e4SLinus Torvalds 597fdbef614SDmitry Safonov if (unlikely(flags & MREMAP_DONTUNMAP)) 598fdbef614SDmitry Safonov to_account = new_len; 599fdbef614SDmitry Safonov 60073d5e062SDmitry Safonov if (vma->vm_ops && vma->vm_ops->may_split) { 60173d5e062SDmitry Safonov if (vma->vm_start != old_addr) 60273d5e062SDmitry Safonov err = vma->vm_ops->may_split(vma, old_addr); 60373d5e062SDmitry Safonov if (!err && vma->vm_end != old_addr + old_len) 60473d5e062SDmitry Safonov err = vma->vm_ops->may_split(vma, old_addr + old_len); 60573d5e062SDmitry Safonov if (err) 60673d5e062SDmitry Safonov return err; 60773d5e062SDmitry Safonov } 60873d5e062SDmitry Safonov 6091ff82995SHugh Dickins /* 6101ff82995SHugh Dickins * Advise KSM to break any KSM pages in the area to be moved: 6111ff82995SHugh Dickins * it would be confusing if they were to turn up at the new 6121ff82995SHugh Dickins * location, where they happen to coincide with different KSM 6131ff82995SHugh Dickins * pages recently unmapped. But leave vma->vm_flags as it was, 6141ff82995SHugh Dickins * so KSM can come around to merge on vma and new_vma afterwards. 6151ff82995SHugh Dickins */ 6167103ad32SHugh Dickins err = ksm_madvise(vma, old_addr, old_addr + old_len, 6177103ad32SHugh Dickins MADV_UNMERGEABLE, &vm_flags); 6187103ad32SHugh Dickins if (err) 6197103ad32SHugh Dickins return err; 6201ff82995SHugh Dickins 621fdbef614SDmitry Safonov if (vm_flags & VM_ACCOUNT) { 622fdbef614SDmitry Safonov if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT)) 623ad8ee77eSDmitry Safonov return -ENOMEM; 624ad8ee77eSDmitry Safonov } 625ad8ee77eSDmitry Safonov 6261da177e4SLinus Torvalds new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); 62738a76013SMichel Lespinasse new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, 62838a76013SMichel Lespinasse &need_rmap_locks); 629ad8ee77eSDmitry Safonov if (!new_vma) { 630fdbef614SDmitry Safonov if (vm_flags & VM_ACCOUNT) 631fdbef614SDmitry Safonov vm_unacct_memory(to_account >> PAGE_SHIFT); 6321da177e4SLinus Torvalds return -ENOMEM; 633ad8ee77eSDmitry Safonov } 6341da177e4SLinus Torvalds 63538a76013SMichel Lespinasse moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, 63638a76013SMichel Lespinasse need_rmap_locks); 6371da177e4SLinus Torvalds if (moved_len < old_len) { 638df1eab30SOleg Nesterov err = -ENOMEM; 6395477e70aSOleg Nesterov } else if (vma->vm_ops && vma->vm_ops->mremap) { 64014d07113SBrian Geffon err = vma->vm_ops->mremap(new_vma); 641df1eab30SOleg Nesterov } 642df1eab30SOleg Nesterov 643df1eab30SOleg Nesterov if (unlikely(err)) { 6441da177e4SLinus Torvalds /* 6451da177e4SLinus Torvalds * On error, move entries back from new area to old, 6461da177e4SLinus Torvalds * which will succeed since page tables still there, 6471da177e4SLinus Torvalds * and then proceed to unmap new area instead of old. 6481da177e4SLinus Torvalds */ 64938a76013SMichel Lespinasse move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, 65038a76013SMichel Lespinasse true); 6511da177e4SLinus Torvalds vma = new_vma; 6521da177e4SLinus Torvalds old_len = new_len; 6531da177e4SLinus Torvalds old_addr = new_addr; 654df1eab30SOleg Nesterov new_addr = err; 6554abad2caSLaurent Dufour } else { 65672f87654SPavel Emelyanov mremap_userfaultfd_prep(new_vma, uf); 6574abad2caSLaurent Dufour } 6581da177e4SLinus Torvalds 659550a7d60SMina Almasry if (is_vm_hugetlb_page(vma)) { 660550a7d60SMina Almasry clear_vma_resv_huge_pages(vma); 661550a7d60SMina Almasry } 662550a7d60SMina Almasry 6631da177e4SLinus Torvalds /* Conceal VM_ACCOUNT so old reservation is not undone */ 664ad8ee77eSDmitry Safonov if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) { 665*1c71222eSSuren Baghdasaryan vm_flags_clear(vma, VM_ACCOUNT); 6666b73cff2SLiam R. Howlett if (vma->vm_start < old_addr) 6676b73cff2SLiam R. Howlett account_start = vma->vm_start; 6686b73cff2SLiam R. Howlett if (vma->vm_end > old_addr + old_len) 6696b73cff2SLiam R. Howlett account_end = vma->vm_end; 6701da177e4SLinus Torvalds } 6711da177e4SLinus Torvalds 67271799062SKirill Korotaev /* 673365e9c87SHugh Dickins * If we failed to move page tables we still do total_vm increment 674365e9c87SHugh Dickins * since do_munmap() will decrement it by old_len == new_len. 675365e9c87SHugh Dickins * 676365e9c87SHugh Dickins * Since total_vm is about to be raised artificially high for a 677365e9c87SHugh Dickins * moment, we need to restore high watermark afterwards: if stats 678365e9c87SHugh Dickins * are taken meanwhile, total_vm and hiwater_vm appear too high. 679365e9c87SHugh Dickins * If this were a serious issue, we'd add a flag to do_munmap(). 68071799062SKirill Korotaev */ 681365e9c87SHugh Dickins hiwater_vm = mm->hiwater_vm; 68284638335SKonstantin Khlebnikov vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); 68371799062SKirill Korotaev 684d9fe4fabSToshi Kani /* Tell pfnmap has moved from this vma */ 685d9fe4fabSToshi Kani if (unlikely(vma->vm_flags & VM_PFNMAP)) 686d9fe4fabSToshi Kani untrack_pfn_moved(vma); 687d9fe4fabSToshi Kani 688e346b381SBrian Geffon if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) { 689e346b381SBrian Geffon /* We always clear VM_LOCKED[ONFAULT] on the old vma */ 690e430a95aSSuren Baghdasaryan vm_flags_clear(vma, VM_LOCKED_MASK); 691e346b381SBrian Geffon 6921583aa27SLi Xinhai /* 6931583aa27SLi Xinhai * anon_vma links of the old vma is no longer needed after its page 6941583aa27SLi Xinhai * table has been moved. 6951583aa27SLi Xinhai */ 6961583aa27SLi Xinhai if (new_vma != vma && vma->vm_start == old_addr && 6971583aa27SLi Xinhai vma->vm_end == (old_addr + old_len)) 6981583aa27SLi Xinhai unlink_anon_vmas(vma); 6991583aa27SLi Xinhai 700e346b381SBrian Geffon /* Because we won't unmap we don't need to touch locked_vm */ 701ad8ee77eSDmitry Safonov return new_addr; 702e346b381SBrian Geffon } 703e346b381SBrian Geffon 7046b73cff2SLiam R. Howlett vma_iter_init(&vmi, mm, old_addr); 7056b73cff2SLiam R. Howlett if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) { 7061da177e4SLinus Torvalds /* OOM: unable to split vma, just get accounts right */ 707ad8ee77eSDmitry Safonov if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) 7085e22928aSChen Wandun vm_acct_memory(old_len >> PAGE_SHIFT); 7096b73cff2SLiam R. Howlett account_start = account_end = 0; 7101da177e4SLinus Torvalds } 711e346b381SBrian Geffon 712e346b381SBrian Geffon if (vm_flags & VM_LOCKED) { 713e346b381SBrian Geffon mm->locked_vm += new_len >> PAGE_SHIFT; 714e346b381SBrian Geffon *locked = true; 715e346b381SBrian Geffon } 716ad8ee77eSDmitry Safonov 717365e9c87SHugh Dickins mm->hiwater_vm = hiwater_vm; 7181da177e4SLinus Torvalds 7191da177e4SLinus Torvalds /* Restore VM_ACCOUNT if one or two pieces of vma left */ 7206b73cff2SLiam R. Howlett if (account_start) { 7216b73cff2SLiam R. Howlett vma = vma_prev(&vmi); 722*1c71222eSSuren Baghdasaryan vm_flags_set(vma, VM_ACCOUNT); 7236b73cff2SLiam R. Howlett } 7246b73cff2SLiam R. Howlett 7256b73cff2SLiam R. Howlett if (account_end) { 7266b73cff2SLiam R. Howlett vma = vma_next(&vmi); 727*1c71222eSSuren Baghdasaryan vm_flags_set(vma, VM_ACCOUNT); 7281da177e4SLinus Torvalds } 7291da177e4SLinus Torvalds 7301da177e4SLinus Torvalds return new_addr; 7311da177e4SLinus Torvalds } 7321da177e4SLinus Torvalds 73354f5de70SAl Viro static struct vm_area_struct *vma_to_resize(unsigned long addr, 734fdbef614SDmitry Safonov unsigned long old_len, unsigned long new_len, unsigned long flags) 73554f5de70SAl Viro { 73654f5de70SAl Viro struct mm_struct *mm = current->mm; 7375aaf07f0SLiam Howlett struct vm_area_struct *vma; 7381d391686SOleg Nesterov unsigned long pgoff; 73954f5de70SAl Viro 7405aaf07f0SLiam Howlett vma = vma_lookup(mm, addr); 7415aaf07f0SLiam Howlett if (!vma) 7426cd57613SDerek return ERR_PTR(-EFAULT); 74354f5de70SAl Viro 744dba58d3bSMike Kravetz /* 745dba58d3bSMike Kravetz * !old_len is a special case where an attempt is made to 'duplicate' 746dba58d3bSMike Kravetz * a mapping. This makes no sense for private mappings as it will 747dba58d3bSMike Kravetz * instead create a fresh/new mapping unrelated to the original. This 748dba58d3bSMike Kravetz * is contrary to the basic idea of mremap which creates new mappings 749dba58d3bSMike Kravetz * based on the original. There are no known use cases for this 750dba58d3bSMike Kravetz * behavior. As a result, fail such attempts. 751dba58d3bSMike Kravetz */ 752dba58d3bSMike Kravetz if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { 753dba58d3bSMike Kravetz pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid); 754dba58d3bSMike Kravetz return ERR_PTR(-EINVAL); 755dba58d3bSMike Kravetz } 756dba58d3bSMike Kravetz 757a4609387SBrian Geffon if ((flags & MREMAP_DONTUNMAP) && 758a4609387SBrian Geffon (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))) 759e346b381SBrian Geffon return ERR_PTR(-EINVAL); 760e346b381SBrian Geffon 76154f5de70SAl Viro /* We can't remap across vm area boundaries */ 76254f5de70SAl Viro if (old_len > vma->vm_end - addr) 7636cd57613SDerek return ERR_PTR(-EFAULT); 76454f5de70SAl Viro 7651d391686SOleg Nesterov if (new_len == old_len) 7661d391686SOleg Nesterov return vma; 767982134baSLinus Torvalds 7681d391686SOleg Nesterov /* Need to be careful about a growing mapping */ 769982134baSLinus Torvalds pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; 770982134baSLinus Torvalds pgoff += vma->vm_pgoff; 771982134baSLinus Torvalds if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) 7726cd57613SDerek return ERR_PTR(-EINVAL); 7731d391686SOleg Nesterov 7741d391686SOleg Nesterov if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) 7751d391686SOleg Nesterov return ERR_PTR(-EFAULT); 77654f5de70SAl Viro 777f4331956SMiaohe Lin if (mlock_future_check(mm, vma->vm_flags, new_len - old_len)) 7786cd57613SDerek return ERR_PTR(-EAGAIN); 77954f5de70SAl Viro 78084638335SKonstantin Khlebnikov if (!may_expand_vm(mm, vma->vm_flags, 78184638335SKonstantin Khlebnikov (new_len - old_len) >> PAGE_SHIFT)) 7826cd57613SDerek return ERR_PTR(-ENOMEM); 78354f5de70SAl Viro 78454f5de70SAl Viro return vma; 78554f5de70SAl Viro } 78654f5de70SAl Viro 78781909b84SMichel Lespinasse static unsigned long mremap_to(unsigned long addr, unsigned long old_len, 78872f87654SPavel Emelyanov unsigned long new_addr, unsigned long new_len, bool *locked, 789e346b381SBrian Geffon unsigned long flags, struct vm_userfaultfd_ctx *uf, 790b2282371SMike Rapoport struct list_head *uf_unmap_early, 791897ab3e0SMike Rapoport struct list_head *uf_unmap) 792ecc1a899SAl Viro { 793ecc1a899SAl Viro struct mm_struct *mm = current->mm; 794ecc1a899SAl Viro struct vm_area_struct *vma; 795ecc1a899SAl Viro unsigned long ret = -EINVAL; 796e346b381SBrian Geffon unsigned long map_flags = 0; 797ecc1a899SAl Viro 798f19cb115SAlexander Kuleshov if (offset_in_page(new_addr)) 799ecc1a899SAl Viro goto out; 800ecc1a899SAl Viro 801ecc1a899SAl Viro if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) 802ecc1a899SAl Viro goto out; 803ecc1a899SAl Viro 8049943242cSOleg Nesterov /* Ensure the old/new locations do not overlap */ 8059943242cSOleg Nesterov if (addr + old_len > new_addr && new_addr + new_len > addr) 806ecc1a899SAl Viro goto out; 807ecc1a899SAl Viro 808ea2c3f6fSOscar Salvador /* 809ea2c3f6fSOscar Salvador * move_vma() need us to stay 4 maps below the threshold, otherwise 810ea2c3f6fSOscar Salvador * it will bail out at the very beginning. 811ea2c3f6fSOscar Salvador * That is a problem if we have already unmaped the regions here 812ea2c3f6fSOscar Salvador * (new_addr, and old_addr), because userspace will not know the 813ea2c3f6fSOscar Salvador * state of the vma's after it gets -ENOMEM. 814ea2c3f6fSOscar Salvador * So, to avoid such scenario we can pre-compute if the whole 815ea2c3f6fSOscar Salvador * operation has high chances to success map-wise. 816ea2c3f6fSOscar Salvador * Worst-scenario case is when both vma's (new_addr and old_addr) get 817f0953a1bSIngo Molnar * split in 3 before unmapping it. 818ea2c3f6fSOscar Salvador * That means 2 more maps (1 for each) to the ones we already hold. 819ea2c3f6fSOscar Salvador * Check whether current map count plus 2 still leads us to 4 maps below 820ea2c3f6fSOscar Salvador * the threshold, otherwise return -ENOMEM here to be more safe. 821ea2c3f6fSOscar Salvador */ 822ea2c3f6fSOscar Salvador if ((mm->map_count + 2) >= sysctl_max_map_count - 3) 823ea2c3f6fSOscar Salvador return -ENOMEM; 824ea2c3f6fSOscar Salvador 825e346b381SBrian Geffon if (flags & MREMAP_FIXED) { 826b2282371SMike Rapoport ret = do_munmap(mm, new_addr, new_len, uf_unmap_early); 827ecc1a899SAl Viro if (ret) 828ecc1a899SAl Viro goto out; 829e346b381SBrian Geffon } 830ecc1a899SAl Viro 8313c9fe8b8SMiaohe Lin if (old_len > new_len) { 832897ab3e0SMike Rapoport ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap); 8333c9fe8b8SMiaohe Lin if (ret) 834ecc1a899SAl Viro goto out; 835ecc1a899SAl Viro old_len = new_len; 836ecc1a899SAl Viro } 837ecc1a899SAl Viro 838fdbef614SDmitry Safonov vma = vma_to_resize(addr, old_len, new_len, flags); 839ecc1a899SAl Viro if (IS_ERR(vma)) { 840ecc1a899SAl Viro ret = PTR_ERR(vma); 841ecc1a899SAl Viro goto out; 842ecc1a899SAl Viro } 843ecc1a899SAl Viro 844e346b381SBrian Geffon /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */ 845e346b381SBrian Geffon if (flags & MREMAP_DONTUNMAP && 846e346b381SBrian Geffon !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) { 847e346b381SBrian Geffon ret = -ENOMEM; 848e346b381SBrian Geffon goto out; 849e346b381SBrian Geffon } 850e346b381SBrian Geffon 851e346b381SBrian Geffon if (flags & MREMAP_FIXED) 852e346b381SBrian Geffon map_flags |= MAP_FIXED; 853e346b381SBrian Geffon 854097eed10SAl Viro if (vma->vm_flags & VM_MAYSHARE) 855097eed10SAl Viro map_flags |= MAP_SHARED; 8569206de95SAl Viro 857097eed10SAl Viro ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + 858097eed10SAl Viro ((addr - vma->vm_start) >> PAGE_SHIFT), 859097eed10SAl Viro map_flags); 860ff68dac6SGaowei Pu if (IS_ERR_VALUE(ret)) 861fdbef614SDmitry Safonov goto out; 862097eed10SAl Viro 863e346b381SBrian Geffon /* We got a new mapping */ 864e346b381SBrian Geffon if (!(flags & MREMAP_FIXED)) 865e346b381SBrian Geffon new_addr = ret; 866e346b381SBrian Geffon 867e346b381SBrian Geffon ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, 868897ab3e0SMike Rapoport uf_unmap); 869e346b381SBrian Geffon 870ecc1a899SAl Viro out: 871ecc1a899SAl Viro return ret; 872ecc1a899SAl Viro } 873ecc1a899SAl Viro 8741a0ef85fSAl Viro static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) 8751a0ef85fSAl Viro { 876f106af4eSAl Viro unsigned long end = vma->vm_end + delta; 877396a44ccSLiam R. Howlett 8789206de95SAl Viro if (end < vma->vm_end) /* overflow */ 8791a0ef85fSAl Viro return 0; 880396a44ccSLiam R. Howlett if (find_vma_intersection(vma->vm_mm, vma->vm_end, end)) 881f106af4eSAl Viro return 0; 882f106af4eSAl Viro if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 883f106af4eSAl Viro 0, MAP_FIXED) & ~PAGE_MASK) 884f106af4eSAl Viro return 0; 8851a0ef85fSAl Viro return 1; 8861a0ef85fSAl Viro } 8871a0ef85fSAl Viro 8881da177e4SLinus Torvalds /* 8891da177e4SLinus Torvalds * Expand (or shrink) an existing mapping, potentially moving it at the 8901da177e4SLinus Torvalds * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) 8911da177e4SLinus Torvalds * 8921da177e4SLinus Torvalds * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise 8931da177e4SLinus Torvalds * This option implies MREMAP_MAYMOVE. 8941da177e4SLinus Torvalds */ 89563a81db1SAl Viro SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, 89663a81db1SAl Viro unsigned long, new_len, unsigned long, flags, 89763a81db1SAl Viro unsigned long, new_addr) 8981da177e4SLinus Torvalds { 899d0de32d9SHugh Dickins struct mm_struct *mm = current->mm; 9001da177e4SLinus Torvalds struct vm_area_struct *vma; 9011da177e4SLinus Torvalds unsigned long ret = -EINVAL; 90281909b84SMichel Lespinasse bool locked = false; 90385a06835SYang Shi bool downgraded = false; 90472f87654SPavel Emelyanov struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; 905b2282371SMike Rapoport LIST_HEAD(uf_unmap_early); 906897ab3e0SMike Rapoport LIST_HEAD(uf_unmap); 9071da177e4SLinus Torvalds 908b2a84de2SWill Deacon /* 909b2a84de2SWill Deacon * There is a deliberate asymmetry here: we strip the pointer tag 910b2a84de2SWill Deacon * from the old address but leave the new address alone. This is 911b2a84de2SWill Deacon * for consistency with mmap(), where we prevent the creation of 912b2a84de2SWill Deacon * aliasing mappings in userspace by leaving the tag bits of the 913b2a84de2SWill Deacon * mapping address intact. A non-zero tag will cause the subsequent 914b2a84de2SWill Deacon * range checks to reject the address as invalid. 915b2a84de2SWill Deacon * 916b2a84de2SWill Deacon * See Documentation/arm64/tagged-address-abi.rst for more information. 917b2a84de2SWill Deacon */ 918057d3389SAndrey Konovalov addr = untagged_addr(addr); 919057d3389SAndrey Konovalov 920e346b381SBrian Geffon if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) 9219a2458a6SRasmus Villemoes return ret; 9229a2458a6SRasmus Villemoes 9239a2458a6SRasmus Villemoes if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE)) 9249a2458a6SRasmus Villemoes return ret; 9251da177e4SLinus Torvalds 926e346b381SBrian Geffon /* 927e346b381SBrian Geffon * MREMAP_DONTUNMAP is always a move and it does not allow resizing 928e346b381SBrian Geffon * in the process. 929e346b381SBrian Geffon */ 930e346b381SBrian Geffon if (flags & MREMAP_DONTUNMAP && 931e346b381SBrian Geffon (!(flags & MREMAP_MAYMOVE) || old_len != new_len)) 932e346b381SBrian Geffon return ret; 933e346b381SBrian Geffon 934e346b381SBrian Geffon 935f19cb115SAlexander Kuleshov if (offset_in_page(addr)) 9369a2458a6SRasmus Villemoes return ret; 9371da177e4SLinus Torvalds 9381da177e4SLinus Torvalds old_len = PAGE_ALIGN(old_len); 9391da177e4SLinus Torvalds new_len = PAGE_ALIGN(new_len); 9401da177e4SLinus Torvalds 9411da177e4SLinus Torvalds /* 9421da177e4SLinus Torvalds * We allow a zero old-len as a special case 9431da177e4SLinus Torvalds * for DOS-emu "duplicate shm area" thing. But 9441da177e4SLinus Torvalds * a zero new-len is nonsensical. 9451da177e4SLinus Torvalds */ 9461da177e4SLinus Torvalds if (!new_len) 9479a2458a6SRasmus Villemoes return ret; 9489a2458a6SRasmus Villemoes 949d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(current->mm)) 950dc0ef0dfSMichal Hocko return -EINTR; 9510e6799dbSMiaohe Lin vma = vma_lookup(mm, addr); 9520e6799dbSMiaohe Lin if (!vma) { 9537d1e6496SNiels Dossche ret = -EFAULT; 954550a7d60SMina Almasry goto out; 955550a7d60SMina Almasry } 956550a7d60SMina Almasry 957550a7d60SMina Almasry if (is_vm_hugetlb_page(vma)) { 958550a7d60SMina Almasry struct hstate *h __maybe_unused = hstate_vma(vma); 959550a7d60SMina Almasry 960550a7d60SMina Almasry old_len = ALIGN(old_len, huge_page_size(h)); 961550a7d60SMina Almasry new_len = ALIGN(new_len, huge_page_size(h)); 962550a7d60SMina Almasry 963550a7d60SMina Almasry /* addrs must be huge page aligned */ 964550a7d60SMina Almasry if (addr & ~huge_page_mask(h)) 965550a7d60SMina Almasry goto out; 966550a7d60SMina Almasry if (new_addr & ~huge_page_mask(h)) 967550a7d60SMina Almasry goto out; 968550a7d60SMina Almasry 969550a7d60SMina Almasry /* 970550a7d60SMina Almasry * Don't allow remap expansion, because the underlying hugetlb 971550a7d60SMina Almasry * reservation is not yet capable to handle split reservation. 972550a7d60SMina Almasry */ 973550a7d60SMina Almasry if (new_len > old_len) 974550a7d60SMina Almasry goto out; 975550a7d60SMina Almasry } 9761da177e4SLinus Torvalds 977e346b381SBrian Geffon if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) { 97881909b84SMichel Lespinasse ret = mremap_to(addr, old_len, new_addr, new_len, 979e346b381SBrian Geffon &locked, flags, &uf, &uf_unmap_early, 980e346b381SBrian Geffon &uf_unmap); 9811da177e4SLinus Torvalds goto out; 9821da177e4SLinus Torvalds } 9831da177e4SLinus Torvalds 9841da177e4SLinus Torvalds /* 9851da177e4SLinus Torvalds * Always allow a shrinking remap: that just unmaps 9861da177e4SLinus Torvalds * the unnecessary pages.. 987183654ceSLiam R. Howlett * do_vmi_munmap does all the needed commit accounting, and 988c1e8d7c6SMichel Lespinasse * downgrades mmap_lock to read if so directed. 9891da177e4SLinus Torvalds */ 9901da177e4SLinus Torvalds if (old_len >= new_len) { 99185a06835SYang Shi int retval; 992183654ceSLiam R. Howlett VMA_ITERATOR(vmi, mm, addr + new_len); 99385a06835SYang Shi 994183654ceSLiam R. Howlett retval = do_vmi_munmap(&vmi, mm, addr + new_len, 99511f9a21aSLiam R. Howlett old_len - new_len, &uf_unmap, true); 99611f9a21aSLiam R. Howlett /* Returning 1 indicates mmap_lock is downgraded to read. */ 99711f9a21aSLiam R. Howlett if (retval == 1) { 99811f9a21aSLiam R. Howlett downgraded = true; 99911f9a21aSLiam R. Howlett } else if (retval < 0 && old_len != new_len) { 100085a06835SYang Shi ret = retval; 10011da177e4SLinus Torvalds goto out; 100211f9a21aSLiam R. Howlett } 100311f9a21aSLiam R. Howlett 10041da177e4SLinus Torvalds ret = addr; 10051da177e4SLinus Torvalds goto out; 10061da177e4SLinus Torvalds } 10071da177e4SLinus Torvalds 10081da177e4SLinus Torvalds /* 1009ecc1a899SAl Viro * Ok, we need to grow.. 10101da177e4SLinus Torvalds */ 1011fdbef614SDmitry Safonov vma = vma_to_resize(addr, old_len, new_len, flags); 101254f5de70SAl Viro if (IS_ERR(vma)) { 101354f5de70SAl Viro ret = PTR_ERR(vma); 10141da177e4SLinus Torvalds goto out; 10151da177e4SLinus Torvalds } 10161da177e4SLinus Torvalds 10171da177e4SLinus Torvalds /* old_len exactly to the end of the area.. 10181da177e4SLinus Torvalds */ 1019ecc1a899SAl Viro if (old_len == vma->vm_end - addr) { 10201da177e4SLinus Torvalds /* can we just expand the current mapping? */ 10211a0ef85fSAl Viro if (vma_expandable(vma, new_len - old_len)) { 1022fdbef614SDmitry Safonov long pages = (new_len - old_len) >> PAGE_SHIFT; 1023ca3d76b0SJakub Matěna unsigned long extension_start = addr + old_len; 1024ca3d76b0SJakub Matěna unsigned long extension_end = addr + new_len; 10256f12be79SVlastimil Babka pgoff_t extension_pgoff = vma->vm_pgoff + 10266f12be79SVlastimil Babka ((extension_start - vma->vm_start) >> PAGE_SHIFT); 1027a27a11f9SLiam R. Howlett VMA_ITERATOR(vmi, mm, extension_start); 1028fdbef614SDmitry Safonov 1029fdbef614SDmitry Safonov if (vma->vm_flags & VM_ACCOUNT) { 1030fdbef614SDmitry Safonov if (security_vm_enough_memory_mm(mm, pages)) { 1031fdbef614SDmitry Safonov ret = -ENOMEM; 1032fdbef614SDmitry Safonov goto out; 1033fdbef614SDmitry Safonov } 1034fdbef614SDmitry Safonov } 10351da177e4SLinus Torvalds 1036ca3d76b0SJakub Matěna /* 1037d014cd7cSVlastimil Babka * Function vma_merge() is called on the extension we 1038d014cd7cSVlastimil Babka * are adding to the already existing vma, vma_merge() 1039d014cd7cSVlastimil Babka * will merge this extension with the already existing 1040d014cd7cSVlastimil Babka * vma (expand operation itself) and possibly also with 1041d014cd7cSVlastimil Babka * the next vma if it becomes adjacent to the expanded 1042d014cd7cSVlastimil Babka * vma and otherwise compatible. 1043d014cd7cSVlastimil Babka * 1044d014cd7cSVlastimil Babka * However, vma_merge() can currently fail due to 1045d014cd7cSVlastimil Babka * is_mergeable_vma() check for vm_ops->close (see the 1046d014cd7cSVlastimil Babka * comment there). Yet this should not prevent vma 1047d014cd7cSVlastimil Babka * expanding, so perform a simple expand for such vma. 1048d014cd7cSVlastimil Babka * Ideally the check for close op should be only done 1049d014cd7cSVlastimil Babka * when a vma would be actually removed due to a merge. 1050ca3d76b0SJakub Matěna */ 1051d014cd7cSVlastimil Babka if (!vma->vm_ops || !vma->vm_ops->close) { 10529760ebffSLiam R. Howlett vma = vma_merge(&vmi, mm, vma, extension_start, 10539760ebffSLiam R. Howlett extension_end, vma->vm_flags, vma->anon_vma, 10549760ebffSLiam R. Howlett vma->vm_file, extension_pgoff, vma_policy(vma), 10559760ebffSLiam R. Howlett vma->vm_userfaultfd_ctx, anon_vma_name(vma)); 10567c9813e8SLiam R. Howlett } else if (vma_expand(&vmi, vma, vma->vm_start, 10577c9813e8SLiam R. Howlett addr + new_len, vma->vm_pgoff, NULL)) { 1058d014cd7cSVlastimil Babka vma = NULL; 1059d014cd7cSVlastimil Babka } 1060ca3d76b0SJakub Matěna if (!vma) { 1061fdbef614SDmitry Safonov vm_unacct_memory(pages); 10625beb4930SRik van Riel ret = -ENOMEM; 10635beb4930SRik van Riel goto out; 10645beb4930SRik van Riel } 10651da177e4SLinus Torvalds 106684638335SKonstantin Khlebnikov vm_stat_account(mm, vma->vm_flags, pages); 10671da177e4SLinus Torvalds if (vma->vm_flags & VM_LOCKED) { 1068d0de32d9SHugh Dickins mm->locked_vm += pages; 106981909b84SMichel Lespinasse locked = true; 107081909b84SMichel Lespinasse new_addr = addr; 10711da177e4SLinus Torvalds } 10721da177e4SLinus Torvalds ret = addr; 10731da177e4SLinus Torvalds goto out; 10741da177e4SLinus Torvalds } 10751da177e4SLinus Torvalds } 10761da177e4SLinus Torvalds 10771da177e4SLinus Torvalds /* 10781da177e4SLinus Torvalds * We weren't able to just expand or shrink the area, 10791da177e4SLinus Torvalds * we need to create a new one and move it.. 10801da177e4SLinus Torvalds */ 10811da177e4SLinus Torvalds ret = -ENOMEM; 10821da177e4SLinus Torvalds if (flags & MREMAP_MAYMOVE) { 10831da177e4SLinus Torvalds unsigned long map_flags = 0; 10841da177e4SLinus Torvalds if (vma->vm_flags & VM_MAYSHARE) 10851da177e4SLinus Torvalds map_flags |= MAP_SHARED; 10861da177e4SLinus Torvalds 10871da177e4SLinus Torvalds new_addr = get_unmapped_area(vma->vm_file, 0, new_len, 108893587414SAl Viro vma->vm_pgoff + 108993587414SAl Viro ((addr - vma->vm_start) >> PAGE_SHIFT), 109093587414SAl Viro map_flags); 1091ff68dac6SGaowei Pu if (IS_ERR_VALUE(new_addr)) { 10921da177e4SLinus Torvalds ret = new_addr; 1093ed032189SEric Paris goto out; 1094ed032189SEric Paris } 1095ed032189SEric Paris 109672f87654SPavel Emelyanov ret = move_vma(vma, addr, old_len, new_len, new_addr, 1097e346b381SBrian Geffon &locked, flags, &uf, &uf_unmap); 10981da177e4SLinus Torvalds } 10991da177e4SLinus Torvalds out: 1100fdbef614SDmitry Safonov if (offset_in_page(ret)) 1101fa1f68ccSZou Wei locked = false; 110285a06835SYang Shi if (downgraded) 1103d8ed45c5SMichel Lespinasse mmap_read_unlock(current->mm); 110485a06835SYang Shi else 1105d8ed45c5SMichel Lespinasse mmap_write_unlock(current->mm); 110681909b84SMichel Lespinasse if (locked && new_len > old_len) 110781909b84SMichel Lespinasse mm_populate(new_addr + old_len, new_len - old_len); 1108b2282371SMike Rapoport userfaultfd_unmap_complete(mm, &uf_unmap_early); 1109d1564926SBrian Geffon mremap_userfaultfd_complete(&uf, addr, ret, old_len); 1110897ab3e0SMike Rapoport userfaultfd_unmap_complete(mm, &uf_unmap); 11111da177e4SLinus Torvalds return ret; 11121da177e4SLinus Torvalds } 1113