1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * mm/mremap.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * (C) Copyright 1996 Linus Torvalds 61da177e4SLinus Torvalds * 7046c6884SAlan Cox * Address space accounting code <alan@lxorguk.ukuu.org.uk> 81da177e4SLinus Torvalds * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/mm.h> 12ca3d76b0SJakub Matěna #include <linux/mm_inline.h> 131da177e4SLinus Torvalds #include <linux/hugetlb.h> 141da177e4SLinus Torvalds #include <linux/shm.h> 151ff82995SHugh Dickins #include <linux/ksm.h> 161da177e4SLinus Torvalds #include <linux/mman.h> 171da177e4SLinus Torvalds #include <linux/swap.h> 18c59ede7bSRandy.Dunlap #include <linux/capability.h> 191da177e4SLinus Torvalds #include <linux/fs.h> 206dec97dcSCyrill Gorcunov #include <linux/swapops.h> 211da177e4SLinus Torvalds #include <linux/highmem.h> 221da177e4SLinus Torvalds #include <linux/security.h> 231da177e4SLinus Torvalds #include <linux/syscalls.h> 24cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 252581d202SPaul McQuade #include <linux/uaccess.h> 2672f87654SPavel Emelyanov #include <linux/userfaultfd_k.h> 27ca3d76b0SJakub Matěna #include <linux/mempolicy.h> 281da177e4SLinus Torvalds 291da177e4SLinus Torvalds #include <asm/cacheflush.h> 303bbda69cSAneesh Kumar K.V #include <asm/tlb.h> 310881ace2SAneesh Kumar K.V #include <asm/pgalloc.h> 321da177e4SLinus Torvalds 33ba470de4SRik van Riel #include "internal.h" 34ba470de4SRik van Riel 35c49dd340SKalesh Singh static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr) 361da177e4SLinus Torvalds { 371da177e4SLinus Torvalds pgd_t *pgd; 38c2febafcSKirill A. Shutemov p4d_t *p4d; 391da177e4SLinus Torvalds pud_t *pud; 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 421da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 431da177e4SLinus Torvalds return NULL; 441da177e4SLinus Torvalds 45c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 46c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 47c2febafcSKirill A. Shutemov return NULL; 48c2febafcSKirill A. Shutemov 49c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 501da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 511da177e4SLinus Torvalds return NULL; 521da177e4SLinus Torvalds 53c49dd340SKalesh Singh return pud; 54c49dd340SKalesh Singh } 55c49dd340SKalesh Singh 56c49dd340SKalesh Singh static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) 57c49dd340SKalesh Singh { 58c49dd340SKalesh Singh pud_t *pud; 59c49dd340SKalesh Singh pmd_t *pmd; 60c49dd340SKalesh Singh 61c49dd340SKalesh Singh pud = get_old_pud(mm, addr); 62c49dd340SKalesh Singh if (!pud) 63c49dd340SKalesh Singh return NULL; 64c49dd340SKalesh Singh 651da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 6637a1c49aSAndrea Arcangeli if (pmd_none(*pmd)) 671da177e4SLinus Torvalds return NULL; 681da177e4SLinus Torvalds 697be7a546SHugh Dickins return pmd; 701da177e4SLinus Torvalds } 711da177e4SLinus Torvalds 72c49dd340SKalesh Singh static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, 738ac1f832SAndrea Arcangeli unsigned long addr) 741da177e4SLinus Torvalds { 751da177e4SLinus Torvalds pgd_t *pgd; 76c2febafcSKirill A. Shutemov p4d_t *p4d; 771da177e4SLinus Torvalds 781da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 79c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, addr); 80c2febafcSKirill A. Shutemov if (!p4d) 81c2febafcSKirill A. Shutemov return NULL; 82c49dd340SKalesh Singh 83c49dd340SKalesh Singh return pud_alloc(mm, p4d, addr); 84c49dd340SKalesh Singh } 85c49dd340SKalesh Singh 86c49dd340SKalesh Singh static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, 87c49dd340SKalesh Singh unsigned long addr) 88c49dd340SKalesh Singh { 89c49dd340SKalesh Singh pud_t *pud; 90c49dd340SKalesh Singh pmd_t *pmd; 91c49dd340SKalesh Singh 92c49dd340SKalesh Singh pud = alloc_new_pud(mm, vma, addr); 931da177e4SLinus Torvalds if (!pud) 94c74df32cSHugh Dickins return NULL; 957be7a546SHugh Dickins 961da177e4SLinus Torvalds pmd = pmd_alloc(mm, pud, addr); 9757a8f0cdSHugh Dickins if (!pmd) 98c74df32cSHugh Dickins return NULL; 997be7a546SHugh Dickins 1008ac1f832SAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*pmd)); 101c74df32cSHugh Dickins 1027be7a546SHugh Dickins return pmd; 1031da177e4SLinus Torvalds } 1041da177e4SLinus Torvalds 1051d069b7dSHugh Dickins static void take_rmap_locks(struct vm_area_struct *vma) 1061d069b7dSHugh Dickins { 1071d069b7dSHugh Dickins if (vma->vm_file) 1081d069b7dSHugh Dickins i_mmap_lock_write(vma->vm_file->f_mapping); 1091d069b7dSHugh Dickins if (vma->anon_vma) 1101d069b7dSHugh Dickins anon_vma_lock_write(vma->anon_vma); 1111d069b7dSHugh Dickins } 1121d069b7dSHugh Dickins 1131d069b7dSHugh Dickins static void drop_rmap_locks(struct vm_area_struct *vma) 1141d069b7dSHugh Dickins { 1151d069b7dSHugh Dickins if (vma->anon_vma) 1161d069b7dSHugh Dickins anon_vma_unlock_write(vma->anon_vma); 1171d069b7dSHugh Dickins if (vma->vm_file) 1181d069b7dSHugh Dickins i_mmap_unlock_write(vma->vm_file->f_mapping); 1191d069b7dSHugh Dickins } 1201d069b7dSHugh Dickins 1216dec97dcSCyrill Gorcunov static pte_t move_soft_dirty_pte(pte_t pte) 1226dec97dcSCyrill Gorcunov { 1236dec97dcSCyrill Gorcunov /* 1246dec97dcSCyrill Gorcunov * Set soft dirty bit so we can notice 1256dec97dcSCyrill Gorcunov * in userspace the ptes were moved. 1266dec97dcSCyrill Gorcunov */ 1276dec97dcSCyrill Gorcunov #ifdef CONFIG_MEM_SOFT_DIRTY 1286dec97dcSCyrill Gorcunov if (pte_present(pte)) 1296dec97dcSCyrill Gorcunov pte = pte_mksoft_dirty(pte); 1306dec97dcSCyrill Gorcunov else if (is_swap_pte(pte)) 1316dec97dcSCyrill Gorcunov pte = pte_swp_mksoft_dirty(pte); 1326dec97dcSCyrill Gorcunov #endif 1336dec97dcSCyrill Gorcunov return pte; 1346dec97dcSCyrill Gorcunov } 1356dec97dcSCyrill Gorcunov 136a5be621eSHugh Dickins static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, 1377be7a546SHugh Dickins unsigned long old_addr, unsigned long old_end, 1387be7a546SHugh Dickins struct vm_area_struct *new_vma, pmd_t *new_pmd, 139eb66ae03SLinus Torvalds unsigned long new_addr, bool need_rmap_locks) 1401da177e4SLinus Torvalds { 1411da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 1427be7a546SHugh Dickins pte_t *old_pte, *new_pte, pte; 1434c21e2f2SHugh Dickins spinlock_t *old_ptl, *new_ptl; 1445d190420SAaron Lu bool force_flush = false; 1455d190420SAaron Lu unsigned long len = old_end - old_addr; 146a5be621eSHugh Dickins int err = 0; 1471da177e4SLinus Torvalds 1481da177e4SLinus Torvalds /* 149c8c06efaSDavidlohr Bueso * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma 15038a76013SMichel Lespinasse * locks to ensure that rmap will always observe either the old or the 15138a76013SMichel Lespinasse * new ptes. This is the easiest way to avoid races with 15238a76013SMichel Lespinasse * truncate_pagecache(), page migration, etc... 15338a76013SMichel Lespinasse * 15438a76013SMichel Lespinasse * When need_rmap_locks is false, we use other ways to avoid 15538a76013SMichel Lespinasse * such races: 15638a76013SMichel Lespinasse * 15738a76013SMichel Lespinasse * - During exec() shift_arg_pages(), we use a specially tagged vma 158222100eeSAnshuman Khandual * which rmap call sites look for using vma_is_temporary_stack(). 15938a76013SMichel Lespinasse * 16038a76013SMichel Lespinasse * - During mremap(), new_vma is often known to be placed after vma 16138a76013SMichel Lespinasse * in rmap traversal order. This ensures rmap will always observe 16238a76013SMichel Lespinasse * either the old pte, or the new pte, or both (the page table locks 16338a76013SMichel Lespinasse * serialize access to individual ptes, but only rmap traversal 16438a76013SMichel Lespinasse * order guarantees that we won't miss both the old and new ptes). 1651da177e4SLinus Torvalds */ 1661d069b7dSHugh Dickins if (need_rmap_locks) 1671d069b7dSHugh Dickins take_rmap_locks(vma); 1681da177e4SLinus Torvalds 1694c21e2f2SHugh Dickins /* 1704c21e2f2SHugh Dickins * We don't have to worry about the ordering of src and dst 171c1e8d7c6SMichel Lespinasse * pte locks because exclusive mmap_lock prevents deadlock. 1724c21e2f2SHugh Dickins */ 173c74df32cSHugh Dickins old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); 174a5be621eSHugh Dickins if (!old_pte) { 175a5be621eSHugh Dickins err = -EAGAIN; 176a5be621eSHugh Dickins goto out; 177a5be621eSHugh Dickins } 178a5be621eSHugh Dickins new_pte = pte_offset_map_nolock(mm, new_pmd, new_addr, &new_ptl); 179a5be621eSHugh Dickins if (!new_pte) { 180a5be621eSHugh Dickins pte_unmap_unlock(old_pte, old_ptl); 181a5be621eSHugh Dickins err = -EAGAIN; 182a5be621eSHugh Dickins goto out; 183a5be621eSHugh Dickins } 1844c21e2f2SHugh Dickins if (new_ptl != old_ptl) 185f20dc5f7SIngo Molnar spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 1863ea27719SMel Gorman flush_tlb_batched_pending(vma->vm_mm); 1876606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 1888b1f3124SNick Piggin 1897be7a546SHugh Dickins for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, 1907be7a546SHugh Dickins new_pte++, new_addr += PAGE_SIZE) { 191c33c7948SRyan Roberts if (pte_none(ptep_get(old_pte))) 1927be7a546SHugh Dickins continue; 1935d190420SAaron Lu 1947b6efc2bSAndrea Arcangeli pte = ptep_get_and_clear(mm, old_addr, old_pte); 195a2ce2666SAaron Lu /* 196eb66ae03SLinus Torvalds * If we are remapping a valid PTE, make sure 197a2ce2666SAaron Lu * to flush TLB before we drop the PTL for the 198eb66ae03SLinus Torvalds * PTE. 199a2ce2666SAaron Lu * 200eb66ae03SLinus Torvalds * NOTE! Both old and new PTL matter: the old one 201eb66ae03SLinus Torvalds * for racing with page_mkclean(), the new one to 202eb66ae03SLinus Torvalds * make sure the physical page stays valid until 203eb66ae03SLinus Torvalds * the TLB entry for the old mapping has been 204eb66ae03SLinus Torvalds * flushed. 205a2ce2666SAaron Lu */ 206eb66ae03SLinus Torvalds if (pte_present(pte)) 207a2ce2666SAaron Lu force_flush = true; 2087be7a546SHugh Dickins pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 2096dec97dcSCyrill Gorcunov pte = move_soft_dirty_pte(pte); 2106dec97dcSCyrill Gorcunov set_pte_at(mm, new_addr, new_pte, pte); 2111da177e4SLinus Torvalds } 2127be7a546SHugh Dickins 2136606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 214eb66ae03SLinus Torvalds if (force_flush) 215eb66ae03SLinus Torvalds flush_tlb_range(vma, old_end - len, old_end); 2164c21e2f2SHugh Dickins if (new_ptl != old_ptl) 2174c21e2f2SHugh Dickins spin_unlock(new_ptl); 218ece0e2b6SPeter Zijlstra pte_unmap(new_pte - 1); 219c74df32cSHugh Dickins pte_unmap_unlock(old_pte - 1, old_ptl); 220a5be621eSHugh Dickins out: 2211d069b7dSHugh Dickins if (need_rmap_locks) 2221d069b7dSHugh Dickins drop_rmap_locks(vma); 223a5be621eSHugh Dickins return err; 2241da177e4SLinus Torvalds } 2251da177e4SLinus Torvalds 2263bbda69cSAneesh Kumar K.V #ifndef arch_supports_page_table_move 2273bbda69cSAneesh Kumar K.V #define arch_supports_page_table_move arch_supports_page_table_move 2283bbda69cSAneesh Kumar K.V static inline bool arch_supports_page_table_move(void) 2293bbda69cSAneesh Kumar K.V { 2303bbda69cSAneesh Kumar K.V return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) || 2313bbda69cSAneesh Kumar K.V IS_ENABLED(CONFIG_HAVE_MOVE_PUD); 2323bbda69cSAneesh Kumar K.V } 2333bbda69cSAneesh Kumar K.V #endif 2343bbda69cSAneesh Kumar K.V 2352c91bd4aSJoel Fernandes (Google) #ifdef CONFIG_HAVE_MOVE_PMD 2362c91bd4aSJoel Fernandes (Google) static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, 237b8aa9d9dSWei Yang unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) 2382c91bd4aSJoel Fernandes (Google) { 2392c91bd4aSJoel Fernandes (Google) spinlock_t *old_ptl, *new_ptl; 2402c91bd4aSJoel Fernandes (Google) struct mm_struct *mm = vma->vm_mm; 2412c91bd4aSJoel Fernandes (Google) pmd_t pmd; 2422c91bd4aSJoel Fernandes (Google) 2433bbda69cSAneesh Kumar K.V if (!arch_supports_page_table_move()) 2443bbda69cSAneesh Kumar K.V return false; 2452c91bd4aSJoel Fernandes (Google) /* 2462c91bd4aSJoel Fernandes (Google) * The destination pmd shouldn't be established, free_pgtables() 247f81fdd0cSLinus Torvalds * should have released it. 248f81fdd0cSLinus Torvalds * 249f81fdd0cSLinus Torvalds * However, there's a case during execve() where we use mremap 250f81fdd0cSLinus Torvalds * to move the initial stack, and in that case the target area 251f81fdd0cSLinus Torvalds * may overlap the source area (always moving down). 252f81fdd0cSLinus Torvalds * 253f81fdd0cSLinus Torvalds * If everything is PMD-aligned, that works fine, as moving 254f81fdd0cSLinus Torvalds * each pmd down will clear the source pmd. But if we first 255f81fdd0cSLinus Torvalds * have a few 4kB-only pages that get moved down, and then 256f81fdd0cSLinus Torvalds * hit the "now the rest is PMD-aligned, let's do everything 257f81fdd0cSLinus Torvalds * one pmd at a time", we will still have the old (now empty 258f81fdd0cSLinus Torvalds * of any 4kB pages, but still there) PMD in the page table 259f81fdd0cSLinus Torvalds * tree. 260f81fdd0cSLinus Torvalds * 261f81fdd0cSLinus Torvalds * Warn on it once - because we really should try to figure 262f81fdd0cSLinus Torvalds * out how to do this better - but then say "I won't move 263f81fdd0cSLinus Torvalds * this pmd". 264f81fdd0cSLinus Torvalds * 265f81fdd0cSLinus Torvalds * One alternative might be to just unmap the target pmd at 266f81fdd0cSLinus Torvalds * this point, and verify that it really is empty. We'll see. 2672c91bd4aSJoel Fernandes (Google) */ 268f81fdd0cSLinus Torvalds if (WARN_ON_ONCE(!pmd_none(*new_pmd))) 2692c91bd4aSJoel Fernandes (Google) return false; 2702c91bd4aSJoel Fernandes (Google) 2712c91bd4aSJoel Fernandes (Google) /* 2722c91bd4aSJoel Fernandes (Google) * We don't have to worry about the ordering of src and dst 273c1e8d7c6SMichel Lespinasse * ptlocks because exclusive mmap_lock prevents deadlock. 2742c91bd4aSJoel Fernandes (Google) */ 2752c91bd4aSJoel Fernandes (Google) old_ptl = pmd_lock(vma->vm_mm, old_pmd); 2762c91bd4aSJoel Fernandes (Google) new_ptl = pmd_lockptr(mm, new_pmd); 2772c91bd4aSJoel Fernandes (Google) if (new_ptl != old_ptl) 2782c91bd4aSJoel Fernandes (Google) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 2792c91bd4aSJoel Fernandes (Google) 2802c91bd4aSJoel Fernandes (Google) /* Clear the pmd */ 2812c91bd4aSJoel Fernandes (Google) pmd = *old_pmd; 2822c91bd4aSJoel Fernandes (Google) pmd_clear(old_pmd); 2832c91bd4aSJoel Fernandes (Google) 2842c91bd4aSJoel Fernandes (Google) VM_BUG_ON(!pmd_none(*new_pmd)); 2852c91bd4aSJoel Fernandes (Google) 2860881ace2SAneesh Kumar K.V pmd_populate(mm, new_pmd, pmd_pgtable(pmd)); 2872c91bd4aSJoel Fernandes (Google) flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 2882c91bd4aSJoel Fernandes (Google) if (new_ptl != old_ptl) 2892c91bd4aSJoel Fernandes (Google) spin_unlock(new_ptl); 2902c91bd4aSJoel Fernandes (Google) spin_unlock(old_ptl); 2912c91bd4aSJoel Fernandes (Google) 2922c91bd4aSJoel Fernandes (Google) return true; 2932c91bd4aSJoel Fernandes (Google) } 294c49dd340SKalesh Singh #else 295c49dd340SKalesh Singh static inline bool move_normal_pmd(struct vm_area_struct *vma, 296c49dd340SKalesh Singh unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, 297c49dd340SKalesh Singh pmd_t *new_pmd) 298c49dd340SKalesh Singh { 299c49dd340SKalesh Singh return false; 300c49dd340SKalesh Singh } 3012c91bd4aSJoel Fernandes (Google) #endif 3022c91bd4aSJoel Fernandes (Google) 303d6655dffSAneesh Kumar K.V #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD) 304c49dd340SKalesh Singh static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, 305c49dd340SKalesh Singh unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) 306c49dd340SKalesh Singh { 307c49dd340SKalesh Singh spinlock_t *old_ptl, *new_ptl; 308c49dd340SKalesh Singh struct mm_struct *mm = vma->vm_mm; 309c49dd340SKalesh Singh pud_t pud; 310c49dd340SKalesh Singh 3113bbda69cSAneesh Kumar K.V if (!arch_supports_page_table_move()) 3123bbda69cSAneesh Kumar K.V return false; 313c49dd340SKalesh Singh /* 314c49dd340SKalesh Singh * The destination pud shouldn't be established, free_pgtables() 315c49dd340SKalesh Singh * should have released it. 316c49dd340SKalesh Singh */ 317c49dd340SKalesh Singh if (WARN_ON_ONCE(!pud_none(*new_pud))) 318c49dd340SKalesh Singh return false; 319c49dd340SKalesh Singh 320c49dd340SKalesh Singh /* 321c49dd340SKalesh Singh * We don't have to worry about the ordering of src and dst 322c49dd340SKalesh Singh * ptlocks because exclusive mmap_lock prevents deadlock. 323c49dd340SKalesh Singh */ 324c49dd340SKalesh Singh old_ptl = pud_lock(vma->vm_mm, old_pud); 325c49dd340SKalesh Singh new_ptl = pud_lockptr(mm, new_pud); 326c49dd340SKalesh Singh if (new_ptl != old_ptl) 327c49dd340SKalesh Singh spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 328c49dd340SKalesh Singh 329c49dd340SKalesh Singh /* Clear the pud */ 330c49dd340SKalesh Singh pud = *old_pud; 331c49dd340SKalesh Singh pud_clear(old_pud); 332c49dd340SKalesh Singh 333c49dd340SKalesh Singh VM_BUG_ON(!pud_none(*new_pud)); 334c49dd340SKalesh Singh 3350881ace2SAneesh Kumar K.V pud_populate(mm, new_pud, pud_pgtable(pud)); 336c49dd340SKalesh Singh flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE); 337c49dd340SKalesh Singh if (new_ptl != old_ptl) 338c49dd340SKalesh Singh spin_unlock(new_ptl); 339c49dd340SKalesh Singh spin_unlock(old_ptl); 340c49dd340SKalesh Singh 341c49dd340SKalesh Singh return true; 342c49dd340SKalesh Singh } 343c49dd340SKalesh Singh #else 344c49dd340SKalesh Singh static inline bool move_normal_pud(struct vm_area_struct *vma, 345c49dd340SKalesh Singh unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, 346c49dd340SKalesh Singh pud_t *new_pud) 347c49dd340SKalesh Singh { 348c49dd340SKalesh Singh return false; 349c49dd340SKalesh Singh } 350c49dd340SKalesh Singh #endif 351c49dd340SKalesh Singh 35254a948a1SAneesh Kumar K.V #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 3537d846db7SAneesh Kumar K.V static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, 3547d846db7SAneesh Kumar K.V unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) 3557d846db7SAneesh Kumar K.V { 3567d846db7SAneesh Kumar K.V spinlock_t *old_ptl, *new_ptl; 3577d846db7SAneesh Kumar K.V struct mm_struct *mm = vma->vm_mm; 3587d846db7SAneesh Kumar K.V pud_t pud; 3597d846db7SAneesh Kumar K.V 3607d846db7SAneesh Kumar K.V /* 3617d846db7SAneesh Kumar K.V * The destination pud shouldn't be established, free_pgtables() 3627d846db7SAneesh Kumar K.V * should have released it. 3637d846db7SAneesh Kumar K.V */ 3647d846db7SAneesh Kumar K.V if (WARN_ON_ONCE(!pud_none(*new_pud))) 3657d846db7SAneesh Kumar K.V return false; 3667d846db7SAneesh Kumar K.V 3677d846db7SAneesh Kumar K.V /* 3687d846db7SAneesh Kumar K.V * We don't have to worry about the ordering of src and dst 3697d846db7SAneesh Kumar K.V * ptlocks because exclusive mmap_lock prevents deadlock. 3707d846db7SAneesh Kumar K.V */ 3717d846db7SAneesh Kumar K.V old_ptl = pud_lock(vma->vm_mm, old_pud); 3727d846db7SAneesh Kumar K.V new_ptl = pud_lockptr(mm, new_pud); 3737d846db7SAneesh Kumar K.V if (new_ptl != old_ptl) 3747d846db7SAneesh Kumar K.V spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 3757d846db7SAneesh Kumar K.V 3767d846db7SAneesh Kumar K.V /* Clear the pud */ 3777d846db7SAneesh Kumar K.V pud = *old_pud; 3787d846db7SAneesh Kumar K.V pud_clear(old_pud); 3797d846db7SAneesh Kumar K.V 3807d846db7SAneesh Kumar K.V VM_BUG_ON(!pud_none(*new_pud)); 3817d846db7SAneesh Kumar K.V 3827d846db7SAneesh Kumar K.V /* Set the new pud */ 3837d846db7SAneesh Kumar K.V /* mark soft_ditry when we add pud level soft dirty support */ 3847d846db7SAneesh Kumar K.V set_pud_at(mm, new_addr, new_pud, pud); 3857d846db7SAneesh Kumar K.V flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE); 3867d846db7SAneesh Kumar K.V if (new_ptl != old_ptl) 3877d846db7SAneesh Kumar K.V spin_unlock(new_ptl); 3887d846db7SAneesh Kumar K.V spin_unlock(old_ptl); 3897d846db7SAneesh Kumar K.V 3907d846db7SAneesh Kumar K.V return true; 3917d846db7SAneesh Kumar K.V } 3927d846db7SAneesh Kumar K.V #else 3937d846db7SAneesh Kumar K.V static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, 3947d846db7SAneesh Kumar K.V unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) 3957d846db7SAneesh Kumar K.V { 3967d846db7SAneesh Kumar K.V WARN_ON_ONCE(1); 3977d846db7SAneesh Kumar K.V return false; 3987d846db7SAneesh Kumar K.V 3997d846db7SAneesh Kumar K.V } 4007d846db7SAneesh Kumar K.V #endif 4017d846db7SAneesh Kumar K.V 402c49dd340SKalesh Singh enum pgt_entry { 403c49dd340SKalesh Singh NORMAL_PMD, 404c49dd340SKalesh Singh HPAGE_PMD, 405c49dd340SKalesh Singh NORMAL_PUD, 4067d846db7SAneesh Kumar K.V HPAGE_PUD, 407c49dd340SKalesh Singh }; 408c49dd340SKalesh Singh 409c49dd340SKalesh Singh /* 410c49dd340SKalesh Singh * Returns an extent of the corresponding size for the pgt_entry specified if 411c49dd340SKalesh Singh * valid. Else returns a smaller extent bounded by the end of the source and 412c49dd340SKalesh Singh * destination pgt_entry. 413c49dd340SKalesh Singh */ 414a30a2909SArnd Bergmann static __always_inline unsigned long get_extent(enum pgt_entry entry, 415a30a2909SArnd Bergmann unsigned long old_addr, unsigned long old_end, 416a30a2909SArnd Bergmann unsigned long new_addr) 417c49dd340SKalesh Singh { 418c49dd340SKalesh Singh unsigned long next, extent, mask, size; 419c49dd340SKalesh Singh 420c49dd340SKalesh Singh switch (entry) { 421c49dd340SKalesh Singh case HPAGE_PMD: 422c49dd340SKalesh Singh case NORMAL_PMD: 423c49dd340SKalesh Singh mask = PMD_MASK; 424c49dd340SKalesh Singh size = PMD_SIZE; 425c49dd340SKalesh Singh break; 4267d846db7SAneesh Kumar K.V case HPAGE_PUD: 427c49dd340SKalesh Singh case NORMAL_PUD: 428c49dd340SKalesh Singh mask = PUD_MASK; 429c49dd340SKalesh Singh size = PUD_SIZE; 430c49dd340SKalesh Singh break; 431c49dd340SKalesh Singh default: 432c49dd340SKalesh Singh BUILD_BUG(); 433c49dd340SKalesh Singh break; 434c49dd340SKalesh Singh } 435c49dd340SKalesh Singh 436c49dd340SKalesh Singh next = (old_addr + size) & mask; 437c49dd340SKalesh Singh /* even if next overflowed, extent below will be ok */ 438e05986eeSKalesh Singh extent = next - old_addr; 439e05986eeSKalesh Singh if (extent > old_end - old_addr) 440e05986eeSKalesh Singh extent = old_end - old_addr; 441c49dd340SKalesh Singh next = (new_addr + size) & mask; 442c49dd340SKalesh Singh if (extent > next - new_addr) 443c49dd340SKalesh Singh extent = next - new_addr; 444c49dd340SKalesh Singh return extent; 445c49dd340SKalesh Singh } 446c49dd340SKalesh Singh 447c49dd340SKalesh Singh /* 448c49dd340SKalesh Singh * Attempts to speedup the move by moving entry at the level corresponding to 449c49dd340SKalesh Singh * pgt_entry. Returns true if the move was successful, else false. 450c49dd340SKalesh Singh */ 451c49dd340SKalesh Singh static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, 452c49dd340SKalesh Singh unsigned long old_addr, unsigned long new_addr, 453c49dd340SKalesh Singh void *old_entry, void *new_entry, bool need_rmap_locks) 454c49dd340SKalesh Singh { 455c49dd340SKalesh Singh bool moved = false; 456c49dd340SKalesh Singh 457c49dd340SKalesh Singh /* See comment in move_ptes() */ 458c49dd340SKalesh Singh if (need_rmap_locks) 459c49dd340SKalesh Singh take_rmap_locks(vma); 460c49dd340SKalesh Singh 461c49dd340SKalesh Singh switch (entry) { 462c49dd340SKalesh Singh case NORMAL_PMD: 463c49dd340SKalesh Singh moved = move_normal_pmd(vma, old_addr, new_addr, old_entry, 464c49dd340SKalesh Singh new_entry); 465c49dd340SKalesh Singh break; 466c49dd340SKalesh Singh case NORMAL_PUD: 467c49dd340SKalesh Singh moved = move_normal_pud(vma, old_addr, new_addr, old_entry, 468c49dd340SKalesh Singh new_entry); 469c49dd340SKalesh Singh break; 470c49dd340SKalesh Singh case HPAGE_PMD: 471c49dd340SKalesh Singh moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 472c49dd340SKalesh Singh move_huge_pmd(vma, old_addr, new_addr, old_entry, 473c49dd340SKalesh Singh new_entry); 474c49dd340SKalesh Singh break; 4757d846db7SAneesh Kumar K.V case HPAGE_PUD: 4767d846db7SAneesh Kumar K.V moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 4777d846db7SAneesh Kumar K.V move_huge_pud(vma, old_addr, new_addr, old_entry, 4787d846db7SAneesh Kumar K.V new_entry); 4797d846db7SAneesh Kumar K.V break; 4807d846db7SAneesh Kumar K.V 481c49dd340SKalesh Singh default: 482c49dd340SKalesh Singh WARN_ON_ONCE(1); 483c49dd340SKalesh Singh break; 484c49dd340SKalesh Singh } 485c49dd340SKalesh Singh 486c49dd340SKalesh Singh if (need_rmap_locks) 487c49dd340SKalesh Singh drop_rmap_locks(vma); 488c49dd340SKalesh Singh 489c49dd340SKalesh Singh return moved; 490c49dd340SKalesh Singh } 491c49dd340SKalesh Singh 492af8ca1c1SJoel Fernandes (Google) /* 493*b1e5a3deSJoel Fernandes (Google) * A helper to check if aligning down is OK. The aligned address should fall 494*b1e5a3deSJoel Fernandes (Google) * on *no mapping*. For the stack moving down, that's a special move within 495*b1e5a3deSJoel Fernandes (Google) * the VMA that is created to span the source and destination of the move, 496*b1e5a3deSJoel Fernandes (Google) * so we make an exception for it. 497af8ca1c1SJoel Fernandes (Google) */ 498af8ca1c1SJoel Fernandes (Google) static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align, 499*b1e5a3deSJoel Fernandes (Google) unsigned long mask, bool for_stack) 500af8ca1c1SJoel Fernandes (Google) { 501af8ca1c1SJoel Fernandes (Google) unsigned long addr_masked = addr_to_align & mask; 502af8ca1c1SJoel Fernandes (Google) 503af8ca1c1SJoel Fernandes (Google) /* 504af8ca1c1SJoel Fernandes (Google) * If @addr_to_align of either source or destination is not the beginning 505af8ca1c1SJoel Fernandes (Google) * of the corresponding VMA, we can't align down or we will destroy part 506af8ca1c1SJoel Fernandes (Google) * of the current mapping. 507af8ca1c1SJoel Fernandes (Google) */ 508*b1e5a3deSJoel Fernandes (Google) if (!for_stack && vma->vm_start != addr_to_align) 509af8ca1c1SJoel Fernandes (Google) return false; 510af8ca1c1SJoel Fernandes (Google) 511*b1e5a3deSJoel Fernandes (Google) /* In the stack case we explicitly permit in-VMA alignment. */ 512*b1e5a3deSJoel Fernandes (Google) if (for_stack && addr_masked >= vma->vm_start) 513*b1e5a3deSJoel Fernandes (Google) return true; 514*b1e5a3deSJoel Fernandes (Google) 515af8ca1c1SJoel Fernandes (Google) /* 516af8ca1c1SJoel Fernandes (Google) * Make sure the realignment doesn't cause the address to fall on an 517af8ca1c1SJoel Fernandes (Google) * existing mapping. 518af8ca1c1SJoel Fernandes (Google) */ 519af8ca1c1SJoel Fernandes (Google) return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL; 520af8ca1c1SJoel Fernandes (Google) } 521af8ca1c1SJoel Fernandes (Google) 522af8ca1c1SJoel Fernandes (Google) /* Opportunistically realign to specified boundary for faster copy. */ 523af8ca1c1SJoel Fernandes (Google) static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old_vma, 524af8ca1c1SJoel Fernandes (Google) unsigned long *new_addr, struct vm_area_struct *new_vma, 525*b1e5a3deSJoel Fernandes (Google) unsigned long mask, bool for_stack) 526af8ca1c1SJoel Fernandes (Google) { 527af8ca1c1SJoel Fernandes (Google) /* Skip if the addresses are already aligned. */ 528af8ca1c1SJoel Fernandes (Google) if ((*old_addr & ~mask) == 0) 529af8ca1c1SJoel Fernandes (Google) return; 530af8ca1c1SJoel Fernandes (Google) 531af8ca1c1SJoel Fernandes (Google) /* Only realign if the new and old addresses are mutually aligned. */ 532af8ca1c1SJoel Fernandes (Google) if ((*old_addr & ~mask) != (*new_addr & ~mask)) 533af8ca1c1SJoel Fernandes (Google) return; 534af8ca1c1SJoel Fernandes (Google) 535af8ca1c1SJoel Fernandes (Google) /* Ensure realignment doesn't cause overlap with existing mappings. */ 536*b1e5a3deSJoel Fernandes (Google) if (!can_align_down(old_vma, *old_addr, mask, for_stack) || 537*b1e5a3deSJoel Fernandes (Google) !can_align_down(new_vma, *new_addr, mask, for_stack)) 538af8ca1c1SJoel Fernandes (Google) return; 539af8ca1c1SJoel Fernandes (Google) 540af8ca1c1SJoel Fernandes (Google) *old_addr = *old_addr & mask; 541af8ca1c1SJoel Fernandes (Google) *new_addr = *new_addr & mask; 542af8ca1c1SJoel Fernandes (Google) } 543af8ca1c1SJoel Fernandes (Google) 544b6a2fea3SOllie Wild unsigned long move_page_tables(struct vm_area_struct *vma, 5451da177e4SLinus Torvalds unsigned long old_addr, struct vm_area_struct *new_vma, 54638a76013SMichel Lespinasse unsigned long new_addr, unsigned long len, 547*b1e5a3deSJoel Fernandes (Google) bool need_rmap_locks, bool for_stack) 5481da177e4SLinus Torvalds { 549c49dd340SKalesh Singh unsigned long extent, old_end; 550ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 5517be7a546SHugh Dickins pmd_t *old_pmd, *new_pmd; 5527d846db7SAneesh Kumar K.V pud_t *old_pud, *new_pud; 5531da177e4SLinus Torvalds 55401e67e04SPaolo Bonzini if (!len) 55501e67e04SPaolo Bonzini return 0; 55601e67e04SPaolo Bonzini 5577be7a546SHugh Dickins old_end = old_addr + len; 5581da177e4SLinus Torvalds 559550a7d60SMina Almasry if (is_vm_hugetlb_page(vma)) 560550a7d60SMina Almasry return move_hugetlb_page_tables(vma, new_vma, old_addr, 561550a7d60SMina Almasry new_addr, len); 562550a7d60SMina Almasry 563af8ca1c1SJoel Fernandes (Google) /* 564af8ca1c1SJoel Fernandes (Google) * If possible, realign addresses to PMD boundary for faster copy. 565af8ca1c1SJoel Fernandes (Google) * Only realign if the mremap copying hits a PMD boundary. 566af8ca1c1SJoel Fernandes (Google) */ 567*b1e5a3deSJoel Fernandes (Google) if (len >= PMD_SIZE - (old_addr & ~PMD_MASK)) 568*b1e5a3deSJoel Fernandes (Google) try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK, 569*b1e5a3deSJoel Fernandes (Google) for_stack); 570af8ca1c1SJoel Fernandes (Google) 5713d0b95cdSBaolin Wang flush_cache_range(vma, old_addr, old_end); 5727d4a8be0SAlistair Popple mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, 5736f4f13e8SJérôme Glisse old_addr, old_end); 574ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 5757b6efc2bSAndrea Arcangeli 5767be7a546SHugh Dickins for (; old_addr < old_end; old_addr += extent, new_addr += extent) { 5771da177e4SLinus Torvalds cond_resched(); 578c49dd340SKalesh Singh /* 579c49dd340SKalesh Singh * If extent is PUD-sized try to speed up the move by moving at the 580c49dd340SKalesh Singh * PUD level if possible. 581c49dd340SKalesh Singh */ 582c49dd340SKalesh Singh extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr); 583c49dd340SKalesh Singh 584c49dd340SKalesh Singh old_pud = get_old_pud(vma->vm_mm, old_addr); 585c49dd340SKalesh Singh if (!old_pud) 586c49dd340SKalesh Singh continue; 587c49dd340SKalesh Singh new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); 588c49dd340SKalesh Singh if (!new_pud) 589c49dd340SKalesh Singh break; 5907d846db7SAneesh Kumar K.V if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) { 5917d846db7SAneesh Kumar K.V if (extent == HPAGE_PUD_SIZE) { 5927d846db7SAneesh Kumar K.V move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr, 5937d846db7SAneesh Kumar K.V old_pud, new_pud, need_rmap_locks); 5947d846db7SAneesh Kumar K.V /* We ignore and continue on error? */ 5957d846db7SAneesh Kumar K.V continue; 5967d846db7SAneesh Kumar K.V } 5977d846db7SAneesh Kumar K.V } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) { 5987d846db7SAneesh Kumar K.V 599c49dd340SKalesh Singh if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr, 60097113eb3SAneesh Kumar K.V old_pud, new_pud, true)) 601c49dd340SKalesh Singh continue; 602c49dd340SKalesh Singh } 603c49dd340SKalesh Singh 604c49dd340SKalesh Singh extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr); 6057be7a546SHugh Dickins old_pmd = get_old_pmd(vma->vm_mm, old_addr); 6067be7a546SHugh Dickins if (!old_pmd) 6077be7a546SHugh Dickins continue; 6088ac1f832SAndrea Arcangeli new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); 6097be7a546SHugh Dickins if (!new_pmd) 6107be7a546SHugh Dickins break; 611a5be621eSHugh Dickins again: 612c49dd340SKalesh Singh if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || 613c49dd340SKalesh Singh pmd_devmap(*old_pmd)) { 614c49dd340SKalesh Singh if (extent == HPAGE_PMD_SIZE && 615c49dd340SKalesh Singh move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr, 616c49dd340SKalesh Singh old_pmd, new_pmd, need_rmap_locks)) 61737a1c49aSAndrea Arcangeli continue; 6184b471e88SKirill A. Shutemov split_huge_pmd(vma, old_pmd, old_addr); 619c49dd340SKalesh Singh } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) && 620c49dd340SKalesh Singh extent == PMD_SIZE) { 6212c91bd4aSJoel Fernandes (Google) /* 6222c91bd4aSJoel Fernandes (Google) * If the extent is PMD-sized, try to speed the move by 6232c91bd4aSJoel Fernandes (Google) * moving at the PMD level if possible. 6242c91bd4aSJoel Fernandes (Google) */ 625c49dd340SKalesh Singh if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr, 62697113eb3SAneesh Kumar K.V old_pmd, new_pmd, true)) 6272c91bd4aSJoel Fernandes (Google) continue; 62837a1c49aSAndrea Arcangeli } 629a5be621eSHugh Dickins if (pmd_none(*old_pmd)) 630a5be621eSHugh Dickins continue; 6314cf58924SJoel Fernandes (Google) if (pte_alloc(new_vma->vm_mm, new_pmd)) 63237a1c49aSAndrea Arcangeli break; 633a5be621eSHugh Dickins if (move_ptes(vma, old_pmd, old_addr, old_addr + extent, 634a5be621eSHugh Dickins new_vma, new_pmd, new_addr, need_rmap_locks) < 0) 635a5be621eSHugh Dickins goto again; 6361da177e4SLinus Torvalds } 6377b6efc2bSAndrea Arcangeli 638ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 6397be7a546SHugh Dickins 640af8ca1c1SJoel Fernandes (Google) /* 641af8ca1c1SJoel Fernandes (Google) * Prevent negative return values when {old,new}_addr was realigned 642af8ca1c1SJoel Fernandes (Google) * but we broke out of the above loop for the first PMD itself. 643af8ca1c1SJoel Fernandes (Google) */ 644af8ca1c1SJoel Fernandes (Google) if (len + old_addr < old_end) 645af8ca1c1SJoel Fernandes (Google) return 0; 646af8ca1c1SJoel Fernandes (Google) 6477be7a546SHugh Dickins return len + old_addr - old_end; /* how much done */ 6481da177e4SLinus Torvalds } 6491da177e4SLinus Torvalds 6501da177e4SLinus Torvalds static unsigned long move_vma(struct vm_area_struct *vma, 6511da177e4SLinus Torvalds unsigned long old_addr, unsigned long old_len, 65272f87654SPavel Emelyanov unsigned long new_len, unsigned long new_addr, 653e346b381SBrian Geffon bool *locked, unsigned long flags, 654e346b381SBrian Geffon struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) 6551da177e4SLinus Torvalds { 656fdbef614SDmitry Safonov long to_account = new_len - old_len; 6571da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 6581da177e4SLinus Torvalds struct vm_area_struct *new_vma; 6591da177e4SLinus Torvalds unsigned long vm_flags = vma->vm_flags; 6601da177e4SLinus Torvalds unsigned long new_pgoff; 6611da177e4SLinus Torvalds unsigned long moved_len; 6626b73cff2SLiam R. Howlett unsigned long account_start = 0; 6636b73cff2SLiam R. Howlett unsigned long account_end = 0; 664365e9c87SHugh Dickins unsigned long hiwater_vm; 66573d5e062SDmitry Safonov int err = 0; 66638a76013SMichel Lespinasse bool need_rmap_locks; 6676b73cff2SLiam R. Howlett struct vma_iterator vmi; 6681da177e4SLinus Torvalds 6691da177e4SLinus Torvalds /* 6701da177e4SLinus Torvalds * We'd prefer to avoid failure later on in do_munmap: 6711da177e4SLinus Torvalds * which may split one vma into three before unmapping. 6721da177e4SLinus Torvalds */ 6731da177e4SLinus Torvalds if (mm->map_count >= sysctl_max_map_count - 3) 6741da177e4SLinus Torvalds return -ENOMEM; 6751da177e4SLinus Torvalds 676fdbef614SDmitry Safonov if (unlikely(flags & MREMAP_DONTUNMAP)) 677fdbef614SDmitry Safonov to_account = new_len; 678fdbef614SDmitry Safonov 67973d5e062SDmitry Safonov if (vma->vm_ops && vma->vm_ops->may_split) { 68073d5e062SDmitry Safonov if (vma->vm_start != old_addr) 68173d5e062SDmitry Safonov err = vma->vm_ops->may_split(vma, old_addr); 68273d5e062SDmitry Safonov if (!err && vma->vm_end != old_addr + old_len) 68373d5e062SDmitry Safonov err = vma->vm_ops->may_split(vma, old_addr + old_len); 68473d5e062SDmitry Safonov if (err) 68573d5e062SDmitry Safonov return err; 68673d5e062SDmitry Safonov } 68773d5e062SDmitry Safonov 6881ff82995SHugh Dickins /* 6891ff82995SHugh Dickins * Advise KSM to break any KSM pages in the area to be moved: 6901ff82995SHugh Dickins * it would be confusing if they were to turn up at the new 6911ff82995SHugh Dickins * location, where they happen to coincide with different KSM 6921ff82995SHugh Dickins * pages recently unmapped. But leave vma->vm_flags as it was, 6931ff82995SHugh Dickins * so KSM can come around to merge on vma and new_vma afterwards. 6941ff82995SHugh Dickins */ 6957103ad32SHugh Dickins err = ksm_madvise(vma, old_addr, old_addr + old_len, 6967103ad32SHugh Dickins MADV_UNMERGEABLE, &vm_flags); 6977103ad32SHugh Dickins if (err) 6987103ad32SHugh Dickins return err; 6991ff82995SHugh Dickins 700fdbef614SDmitry Safonov if (vm_flags & VM_ACCOUNT) { 701fdbef614SDmitry Safonov if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT)) 702ad8ee77eSDmitry Safonov return -ENOMEM; 703ad8ee77eSDmitry Safonov } 704ad8ee77eSDmitry Safonov 705d6ac235dSSuren Baghdasaryan vma_start_write(vma); 7061da177e4SLinus Torvalds new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); 70738a76013SMichel Lespinasse new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, 70838a76013SMichel Lespinasse &need_rmap_locks); 709ad8ee77eSDmitry Safonov if (!new_vma) { 710fdbef614SDmitry Safonov if (vm_flags & VM_ACCOUNT) 711fdbef614SDmitry Safonov vm_unacct_memory(to_account >> PAGE_SHIFT); 7121da177e4SLinus Torvalds return -ENOMEM; 713ad8ee77eSDmitry Safonov } 7141da177e4SLinus Torvalds 71538a76013SMichel Lespinasse moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, 716*b1e5a3deSJoel Fernandes (Google) need_rmap_locks, false); 7171da177e4SLinus Torvalds if (moved_len < old_len) { 718df1eab30SOleg Nesterov err = -ENOMEM; 7195477e70aSOleg Nesterov } else if (vma->vm_ops && vma->vm_ops->mremap) { 72014d07113SBrian Geffon err = vma->vm_ops->mremap(new_vma); 721df1eab30SOleg Nesterov } 722df1eab30SOleg Nesterov 723df1eab30SOleg Nesterov if (unlikely(err)) { 7241da177e4SLinus Torvalds /* 7251da177e4SLinus Torvalds * On error, move entries back from new area to old, 7261da177e4SLinus Torvalds * which will succeed since page tables still there, 7271da177e4SLinus Torvalds * and then proceed to unmap new area instead of old. 7281da177e4SLinus Torvalds */ 72938a76013SMichel Lespinasse move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, 730*b1e5a3deSJoel Fernandes (Google) true, false); 7311da177e4SLinus Torvalds vma = new_vma; 7321da177e4SLinus Torvalds old_len = new_len; 7331da177e4SLinus Torvalds old_addr = new_addr; 734df1eab30SOleg Nesterov new_addr = err; 7354abad2caSLaurent Dufour } else { 73672f87654SPavel Emelyanov mremap_userfaultfd_prep(new_vma, uf); 7374abad2caSLaurent Dufour } 7381da177e4SLinus Torvalds 739550a7d60SMina Almasry if (is_vm_hugetlb_page(vma)) { 740550a7d60SMina Almasry clear_vma_resv_huge_pages(vma); 741550a7d60SMina Almasry } 742550a7d60SMina Almasry 7431da177e4SLinus Torvalds /* Conceal VM_ACCOUNT so old reservation is not undone */ 744ad8ee77eSDmitry Safonov if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) { 7451c71222eSSuren Baghdasaryan vm_flags_clear(vma, VM_ACCOUNT); 7466b73cff2SLiam R. Howlett if (vma->vm_start < old_addr) 7476b73cff2SLiam R. Howlett account_start = vma->vm_start; 7486b73cff2SLiam R. Howlett if (vma->vm_end > old_addr + old_len) 7496b73cff2SLiam R. Howlett account_end = vma->vm_end; 7501da177e4SLinus Torvalds } 7511da177e4SLinus Torvalds 75271799062SKirill Korotaev /* 753365e9c87SHugh Dickins * If we failed to move page tables we still do total_vm increment 754365e9c87SHugh Dickins * since do_munmap() will decrement it by old_len == new_len. 755365e9c87SHugh Dickins * 756365e9c87SHugh Dickins * Since total_vm is about to be raised artificially high for a 757365e9c87SHugh Dickins * moment, we need to restore high watermark afterwards: if stats 758365e9c87SHugh Dickins * are taken meanwhile, total_vm and hiwater_vm appear too high. 759365e9c87SHugh Dickins * If this were a serious issue, we'd add a flag to do_munmap(). 76071799062SKirill Korotaev */ 761365e9c87SHugh Dickins hiwater_vm = mm->hiwater_vm; 76284638335SKonstantin Khlebnikov vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); 76371799062SKirill Korotaev 764d9fe4fabSToshi Kani /* Tell pfnmap has moved from this vma */ 765d9fe4fabSToshi Kani if (unlikely(vma->vm_flags & VM_PFNMAP)) 766d155df53SMa Wupeng untrack_pfn_clear(vma); 767d9fe4fabSToshi Kani 768e346b381SBrian Geffon if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) { 769e346b381SBrian Geffon /* We always clear VM_LOCKED[ONFAULT] on the old vma */ 770e430a95aSSuren Baghdasaryan vm_flags_clear(vma, VM_LOCKED_MASK); 771e346b381SBrian Geffon 7721583aa27SLi Xinhai /* 7731583aa27SLi Xinhai * anon_vma links of the old vma is no longer needed after its page 7741583aa27SLi Xinhai * table has been moved. 7751583aa27SLi Xinhai */ 7761583aa27SLi Xinhai if (new_vma != vma && vma->vm_start == old_addr && 7771583aa27SLi Xinhai vma->vm_end == (old_addr + old_len)) 7781583aa27SLi Xinhai unlink_anon_vmas(vma); 7791583aa27SLi Xinhai 780e346b381SBrian Geffon /* Because we won't unmap we don't need to touch locked_vm */ 781ad8ee77eSDmitry Safonov return new_addr; 782e346b381SBrian Geffon } 783e346b381SBrian Geffon 7846b73cff2SLiam R. Howlett vma_iter_init(&vmi, mm, old_addr); 7853cec5049SLinus Torvalds if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) { 7861da177e4SLinus Torvalds /* OOM: unable to split vma, just get accounts right */ 787ad8ee77eSDmitry Safonov if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) 7885e22928aSChen Wandun vm_acct_memory(old_len >> PAGE_SHIFT); 7896b73cff2SLiam R. Howlett account_start = account_end = 0; 7901da177e4SLinus Torvalds } 791e346b381SBrian Geffon 792e346b381SBrian Geffon if (vm_flags & VM_LOCKED) { 793e346b381SBrian Geffon mm->locked_vm += new_len >> PAGE_SHIFT; 794e346b381SBrian Geffon *locked = true; 795e346b381SBrian Geffon } 796ad8ee77eSDmitry Safonov 797365e9c87SHugh Dickins mm->hiwater_vm = hiwater_vm; 7981da177e4SLinus Torvalds 7991da177e4SLinus Torvalds /* Restore VM_ACCOUNT if one or two pieces of vma left */ 8006b73cff2SLiam R. Howlett if (account_start) { 8016b73cff2SLiam R. Howlett vma = vma_prev(&vmi); 8021c71222eSSuren Baghdasaryan vm_flags_set(vma, VM_ACCOUNT); 8036b73cff2SLiam R. Howlett } 8046b73cff2SLiam R. Howlett 8056b73cff2SLiam R. Howlett if (account_end) { 8066b73cff2SLiam R. Howlett vma = vma_next(&vmi); 8071c71222eSSuren Baghdasaryan vm_flags_set(vma, VM_ACCOUNT); 8081da177e4SLinus Torvalds } 8091da177e4SLinus Torvalds 8101da177e4SLinus Torvalds return new_addr; 8111da177e4SLinus Torvalds } 8121da177e4SLinus Torvalds 81354f5de70SAl Viro static struct vm_area_struct *vma_to_resize(unsigned long addr, 814fdbef614SDmitry Safonov unsigned long old_len, unsigned long new_len, unsigned long flags) 81554f5de70SAl Viro { 81654f5de70SAl Viro struct mm_struct *mm = current->mm; 8175aaf07f0SLiam Howlett struct vm_area_struct *vma; 8181d391686SOleg Nesterov unsigned long pgoff; 81954f5de70SAl Viro 8205aaf07f0SLiam Howlett vma = vma_lookup(mm, addr); 8215aaf07f0SLiam Howlett if (!vma) 8226cd57613SDerek return ERR_PTR(-EFAULT); 82354f5de70SAl Viro 824dba58d3bSMike Kravetz /* 825dba58d3bSMike Kravetz * !old_len is a special case where an attempt is made to 'duplicate' 826dba58d3bSMike Kravetz * a mapping. This makes no sense for private mappings as it will 827dba58d3bSMike Kravetz * instead create a fresh/new mapping unrelated to the original. This 828dba58d3bSMike Kravetz * is contrary to the basic idea of mremap which creates new mappings 829dba58d3bSMike Kravetz * based on the original. There are no known use cases for this 830dba58d3bSMike Kravetz * behavior. As a result, fail such attempts. 831dba58d3bSMike Kravetz */ 832dba58d3bSMike Kravetz if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { 833dba58d3bSMike Kravetz pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid); 834dba58d3bSMike Kravetz return ERR_PTR(-EINVAL); 835dba58d3bSMike Kravetz } 836dba58d3bSMike Kravetz 837a4609387SBrian Geffon if ((flags & MREMAP_DONTUNMAP) && 838a4609387SBrian Geffon (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))) 839e346b381SBrian Geffon return ERR_PTR(-EINVAL); 840e346b381SBrian Geffon 84154f5de70SAl Viro /* We can't remap across vm area boundaries */ 84254f5de70SAl Viro if (old_len > vma->vm_end - addr) 8436cd57613SDerek return ERR_PTR(-EFAULT); 84454f5de70SAl Viro 8451d391686SOleg Nesterov if (new_len == old_len) 8461d391686SOleg Nesterov return vma; 847982134baSLinus Torvalds 8481d391686SOleg Nesterov /* Need to be careful about a growing mapping */ 849982134baSLinus Torvalds pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; 850982134baSLinus Torvalds pgoff += vma->vm_pgoff; 851982134baSLinus Torvalds if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) 8526cd57613SDerek return ERR_PTR(-EINVAL); 8531d391686SOleg Nesterov 8541d391686SOleg Nesterov if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) 8551d391686SOleg Nesterov return ERR_PTR(-EFAULT); 85654f5de70SAl Viro 857b0cc5e89SAndrew Morton if (!mlock_future_ok(mm, vma->vm_flags, new_len - old_len)) 8586cd57613SDerek return ERR_PTR(-EAGAIN); 85954f5de70SAl Viro 86084638335SKonstantin Khlebnikov if (!may_expand_vm(mm, vma->vm_flags, 86184638335SKonstantin Khlebnikov (new_len - old_len) >> PAGE_SHIFT)) 8626cd57613SDerek return ERR_PTR(-ENOMEM); 86354f5de70SAl Viro 86454f5de70SAl Viro return vma; 86554f5de70SAl Viro } 86654f5de70SAl Viro 86781909b84SMichel Lespinasse static unsigned long mremap_to(unsigned long addr, unsigned long old_len, 86872f87654SPavel Emelyanov unsigned long new_addr, unsigned long new_len, bool *locked, 869e346b381SBrian Geffon unsigned long flags, struct vm_userfaultfd_ctx *uf, 870b2282371SMike Rapoport struct list_head *uf_unmap_early, 871897ab3e0SMike Rapoport struct list_head *uf_unmap) 872ecc1a899SAl Viro { 873ecc1a899SAl Viro struct mm_struct *mm = current->mm; 874ecc1a899SAl Viro struct vm_area_struct *vma; 875ecc1a899SAl Viro unsigned long ret = -EINVAL; 876e346b381SBrian Geffon unsigned long map_flags = 0; 877ecc1a899SAl Viro 878f19cb115SAlexander Kuleshov if (offset_in_page(new_addr)) 879ecc1a899SAl Viro goto out; 880ecc1a899SAl Viro 881ecc1a899SAl Viro if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) 882ecc1a899SAl Viro goto out; 883ecc1a899SAl Viro 8849943242cSOleg Nesterov /* Ensure the old/new locations do not overlap */ 8859943242cSOleg Nesterov if (addr + old_len > new_addr && new_addr + new_len > addr) 886ecc1a899SAl Viro goto out; 887ecc1a899SAl Viro 888ea2c3f6fSOscar Salvador /* 889ea2c3f6fSOscar Salvador * move_vma() need us to stay 4 maps below the threshold, otherwise 890ea2c3f6fSOscar Salvador * it will bail out at the very beginning. 891ea2c3f6fSOscar Salvador * That is a problem if we have already unmaped the regions here 892ea2c3f6fSOscar Salvador * (new_addr, and old_addr), because userspace will not know the 893ea2c3f6fSOscar Salvador * state of the vma's after it gets -ENOMEM. 894ea2c3f6fSOscar Salvador * So, to avoid such scenario we can pre-compute if the whole 895ea2c3f6fSOscar Salvador * operation has high chances to success map-wise. 896ea2c3f6fSOscar Salvador * Worst-scenario case is when both vma's (new_addr and old_addr) get 897f0953a1bSIngo Molnar * split in 3 before unmapping it. 898ea2c3f6fSOscar Salvador * That means 2 more maps (1 for each) to the ones we already hold. 899ea2c3f6fSOscar Salvador * Check whether current map count plus 2 still leads us to 4 maps below 900ea2c3f6fSOscar Salvador * the threshold, otherwise return -ENOMEM here to be more safe. 901ea2c3f6fSOscar Salvador */ 902ea2c3f6fSOscar Salvador if ((mm->map_count + 2) >= sysctl_max_map_count - 3) 903ea2c3f6fSOscar Salvador return -ENOMEM; 904ea2c3f6fSOscar Salvador 905e346b381SBrian Geffon if (flags & MREMAP_FIXED) { 906b2282371SMike Rapoport ret = do_munmap(mm, new_addr, new_len, uf_unmap_early); 907ecc1a899SAl Viro if (ret) 908ecc1a899SAl Viro goto out; 909e346b381SBrian Geffon } 910ecc1a899SAl Viro 9113c9fe8b8SMiaohe Lin if (old_len > new_len) { 912897ab3e0SMike Rapoport ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap); 9133c9fe8b8SMiaohe Lin if (ret) 914ecc1a899SAl Viro goto out; 915ecc1a899SAl Viro old_len = new_len; 916ecc1a899SAl Viro } 917ecc1a899SAl Viro 918fdbef614SDmitry Safonov vma = vma_to_resize(addr, old_len, new_len, flags); 919ecc1a899SAl Viro if (IS_ERR(vma)) { 920ecc1a899SAl Viro ret = PTR_ERR(vma); 921ecc1a899SAl Viro goto out; 922ecc1a899SAl Viro } 923ecc1a899SAl Viro 924e346b381SBrian Geffon /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */ 925e346b381SBrian Geffon if (flags & MREMAP_DONTUNMAP && 926e346b381SBrian Geffon !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) { 927e346b381SBrian Geffon ret = -ENOMEM; 928e346b381SBrian Geffon goto out; 929e346b381SBrian Geffon } 930e346b381SBrian Geffon 931e346b381SBrian Geffon if (flags & MREMAP_FIXED) 932e346b381SBrian Geffon map_flags |= MAP_FIXED; 933e346b381SBrian Geffon 934097eed10SAl Viro if (vma->vm_flags & VM_MAYSHARE) 935097eed10SAl Viro map_flags |= MAP_SHARED; 9369206de95SAl Viro 937097eed10SAl Viro ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + 938097eed10SAl Viro ((addr - vma->vm_start) >> PAGE_SHIFT), 939097eed10SAl Viro map_flags); 940ff68dac6SGaowei Pu if (IS_ERR_VALUE(ret)) 941fdbef614SDmitry Safonov goto out; 942097eed10SAl Viro 943e346b381SBrian Geffon /* We got a new mapping */ 944e346b381SBrian Geffon if (!(flags & MREMAP_FIXED)) 945e346b381SBrian Geffon new_addr = ret; 946e346b381SBrian Geffon 947e346b381SBrian Geffon ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, 948897ab3e0SMike Rapoport uf_unmap); 949e346b381SBrian Geffon 950ecc1a899SAl Viro out: 951ecc1a899SAl Viro return ret; 952ecc1a899SAl Viro } 953ecc1a899SAl Viro 9541a0ef85fSAl Viro static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) 9551a0ef85fSAl Viro { 956f106af4eSAl Viro unsigned long end = vma->vm_end + delta; 957396a44ccSLiam R. Howlett 9589206de95SAl Viro if (end < vma->vm_end) /* overflow */ 9591a0ef85fSAl Viro return 0; 960396a44ccSLiam R. Howlett if (find_vma_intersection(vma->vm_mm, vma->vm_end, end)) 961f106af4eSAl Viro return 0; 962f106af4eSAl Viro if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 963f106af4eSAl Viro 0, MAP_FIXED) & ~PAGE_MASK) 964f106af4eSAl Viro return 0; 9651a0ef85fSAl Viro return 1; 9661a0ef85fSAl Viro } 9671a0ef85fSAl Viro 9681da177e4SLinus Torvalds /* 9691da177e4SLinus Torvalds * Expand (or shrink) an existing mapping, potentially moving it at the 9701da177e4SLinus Torvalds * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) 9711da177e4SLinus Torvalds * 9721da177e4SLinus Torvalds * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise 9731da177e4SLinus Torvalds * This option implies MREMAP_MAYMOVE. 9741da177e4SLinus Torvalds */ 97563a81db1SAl Viro SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, 97663a81db1SAl Viro unsigned long, new_len, unsigned long, flags, 97763a81db1SAl Viro unsigned long, new_addr) 9781da177e4SLinus Torvalds { 979d0de32d9SHugh Dickins struct mm_struct *mm = current->mm; 9801da177e4SLinus Torvalds struct vm_area_struct *vma; 9811da177e4SLinus Torvalds unsigned long ret = -EINVAL; 98281909b84SMichel Lespinasse bool locked = false; 98372f87654SPavel Emelyanov struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; 984b2282371SMike Rapoport LIST_HEAD(uf_unmap_early); 985897ab3e0SMike Rapoport LIST_HEAD(uf_unmap); 9861da177e4SLinus Torvalds 987b2a84de2SWill Deacon /* 988b2a84de2SWill Deacon * There is a deliberate asymmetry here: we strip the pointer tag 989b2a84de2SWill Deacon * from the old address but leave the new address alone. This is 990b2a84de2SWill Deacon * for consistency with mmap(), where we prevent the creation of 991b2a84de2SWill Deacon * aliasing mappings in userspace by leaving the tag bits of the 992b2a84de2SWill Deacon * mapping address intact. A non-zero tag will cause the subsequent 993b2a84de2SWill Deacon * range checks to reject the address as invalid. 994b2a84de2SWill Deacon * 995c3003405SJonathan Corbet * See Documentation/arch/arm64/tagged-address-abi.rst for more 996c3003405SJonathan Corbet * information. 997b2a84de2SWill Deacon */ 998057d3389SAndrey Konovalov addr = untagged_addr(addr); 999057d3389SAndrey Konovalov 1000e346b381SBrian Geffon if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) 10019a2458a6SRasmus Villemoes return ret; 10029a2458a6SRasmus Villemoes 10039a2458a6SRasmus Villemoes if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE)) 10049a2458a6SRasmus Villemoes return ret; 10051da177e4SLinus Torvalds 1006e346b381SBrian Geffon /* 1007e346b381SBrian Geffon * MREMAP_DONTUNMAP is always a move and it does not allow resizing 1008e346b381SBrian Geffon * in the process. 1009e346b381SBrian Geffon */ 1010e346b381SBrian Geffon if (flags & MREMAP_DONTUNMAP && 1011e346b381SBrian Geffon (!(flags & MREMAP_MAYMOVE) || old_len != new_len)) 1012e346b381SBrian Geffon return ret; 1013e346b381SBrian Geffon 1014e346b381SBrian Geffon 1015f19cb115SAlexander Kuleshov if (offset_in_page(addr)) 10169a2458a6SRasmus Villemoes return ret; 10171da177e4SLinus Torvalds 10181da177e4SLinus Torvalds old_len = PAGE_ALIGN(old_len); 10191da177e4SLinus Torvalds new_len = PAGE_ALIGN(new_len); 10201da177e4SLinus Torvalds 10211da177e4SLinus Torvalds /* 10221da177e4SLinus Torvalds * We allow a zero old-len as a special case 10231da177e4SLinus Torvalds * for DOS-emu "duplicate shm area" thing. But 10241da177e4SLinus Torvalds * a zero new-len is nonsensical. 10251da177e4SLinus Torvalds */ 10261da177e4SLinus Torvalds if (!new_len) 10279a2458a6SRasmus Villemoes return ret; 10289a2458a6SRasmus Villemoes 1029d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(current->mm)) 1030dc0ef0dfSMichal Hocko return -EINTR; 10310e6799dbSMiaohe Lin vma = vma_lookup(mm, addr); 10320e6799dbSMiaohe Lin if (!vma) { 10337d1e6496SNiels Dossche ret = -EFAULT; 1034550a7d60SMina Almasry goto out; 1035550a7d60SMina Almasry } 1036550a7d60SMina Almasry 1037550a7d60SMina Almasry if (is_vm_hugetlb_page(vma)) { 1038550a7d60SMina Almasry struct hstate *h __maybe_unused = hstate_vma(vma); 1039550a7d60SMina Almasry 1040550a7d60SMina Almasry old_len = ALIGN(old_len, huge_page_size(h)); 1041550a7d60SMina Almasry new_len = ALIGN(new_len, huge_page_size(h)); 1042550a7d60SMina Almasry 1043550a7d60SMina Almasry /* addrs must be huge page aligned */ 1044550a7d60SMina Almasry if (addr & ~huge_page_mask(h)) 1045550a7d60SMina Almasry goto out; 1046550a7d60SMina Almasry if (new_addr & ~huge_page_mask(h)) 1047550a7d60SMina Almasry goto out; 1048550a7d60SMina Almasry 1049550a7d60SMina Almasry /* 1050550a7d60SMina Almasry * Don't allow remap expansion, because the underlying hugetlb 1051550a7d60SMina Almasry * reservation is not yet capable to handle split reservation. 1052550a7d60SMina Almasry */ 1053550a7d60SMina Almasry if (new_len > old_len) 1054550a7d60SMina Almasry goto out; 1055550a7d60SMina Almasry } 10561da177e4SLinus Torvalds 1057e346b381SBrian Geffon if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) { 105881909b84SMichel Lespinasse ret = mremap_to(addr, old_len, new_addr, new_len, 1059e346b381SBrian Geffon &locked, flags, &uf, &uf_unmap_early, 1060e346b381SBrian Geffon &uf_unmap); 10611da177e4SLinus Torvalds goto out; 10621da177e4SLinus Torvalds } 10631da177e4SLinus Torvalds 10641da177e4SLinus Torvalds /* 10651da177e4SLinus Torvalds * Always allow a shrinking remap: that just unmaps 10661da177e4SLinus Torvalds * the unnecessary pages.. 1067183654ceSLiam R. Howlett * do_vmi_munmap does all the needed commit accounting, and 1068408579cdSLiam R. Howlett * unlocks the mmap_lock if so directed. 10691da177e4SLinus Torvalds */ 10701da177e4SLinus Torvalds if (old_len >= new_len) { 1071183654ceSLiam R. Howlett VMA_ITERATOR(vmi, mm, addr + new_len); 107285a06835SYang Shi 1073408579cdSLiam R. Howlett if (old_len == new_len) { 1074408579cdSLiam R. Howlett ret = addr; 10751da177e4SLinus Torvalds goto out; 107611f9a21aSLiam R. Howlett } 107711f9a21aSLiam R. Howlett 1078408579cdSLiam R. Howlett ret = do_vmi_munmap(&vmi, mm, addr + new_len, old_len - new_len, 1079408579cdSLiam R. Howlett &uf_unmap, true); 1080408579cdSLiam R. Howlett if (ret) 10811da177e4SLinus Torvalds goto out; 1082408579cdSLiam R. Howlett 1083408579cdSLiam R. Howlett ret = addr; 1084408579cdSLiam R. Howlett goto out_unlocked; 10851da177e4SLinus Torvalds } 10861da177e4SLinus Torvalds 10871da177e4SLinus Torvalds /* 1088ecc1a899SAl Viro * Ok, we need to grow.. 10891da177e4SLinus Torvalds */ 1090fdbef614SDmitry Safonov vma = vma_to_resize(addr, old_len, new_len, flags); 109154f5de70SAl Viro if (IS_ERR(vma)) { 109254f5de70SAl Viro ret = PTR_ERR(vma); 10931da177e4SLinus Torvalds goto out; 10941da177e4SLinus Torvalds } 10951da177e4SLinus Torvalds 10961da177e4SLinus Torvalds /* old_len exactly to the end of the area.. 10971da177e4SLinus Torvalds */ 1098ecc1a899SAl Viro if (old_len == vma->vm_end - addr) { 10991da177e4SLinus Torvalds /* can we just expand the current mapping? */ 11001a0ef85fSAl Viro if (vma_expandable(vma, new_len - old_len)) { 1101fdbef614SDmitry Safonov long pages = (new_len - old_len) >> PAGE_SHIFT; 1102ca3d76b0SJakub Matěna unsigned long extension_start = addr + old_len; 1103ca3d76b0SJakub Matěna unsigned long extension_end = addr + new_len; 11046f12be79SVlastimil Babka pgoff_t extension_pgoff = vma->vm_pgoff + 11056f12be79SVlastimil Babka ((extension_start - vma->vm_start) >> PAGE_SHIFT); 1106a27a11f9SLiam R. Howlett VMA_ITERATOR(vmi, mm, extension_start); 1107954652b9SAnthony Yznaga long charged = 0; 1108fdbef614SDmitry Safonov 1109fdbef614SDmitry Safonov if (vma->vm_flags & VM_ACCOUNT) { 1110fdbef614SDmitry Safonov if (security_vm_enough_memory_mm(mm, pages)) { 1111fdbef614SDmitry Safonov ret = -ENOMEM; 1112fdbef614SDmitry Safonov goto out; 1113fdbef614SDmitry Safonov } 1114954652b9SAnthony Yznaga charged = pages; 1115fdbef614SDmitry Safonov } 11161da177e4SLinus Torvalds 1117ca3d76b0SJakub Matěna /* 1118d014cd7cSVlastimil Babka * Function vma_merge() is called on the extension we 1119d014cd7cSVlastimil Babka * are adding to the already existing vma, vma_merge() 1120d014cd7cSVlastimil Babka * will merge this extension with the already existing 1121d014cd7cSVlastimil Babka * vma (expand operation itself) and possibly also with 1122d014cd7cSVlastimil Babka * the next vma if it becomes adjacent to the expanded 1123d014cd7cSVlastimil Babka * vma and otherwise compatible. 1124ca3d76b0SJakub Matěna */ 11259760ebffSLiam R. Howlett vma = vma_merge(&vmi, mm, vma, extension_start, 11269760ebffSLiam R. Howlett extension_end, vma->vm_flags, vma->anon_vma, 11279760ebffSLiam R. Howlett vma->vm_file, extension_pgoff, vma_policy(vma), 11289760ebffSLiam R. Howlett vma->vm_userfaultfd_ctx, anon_vma_name(vma)); 1129ca3d76b0SJakub Matěna if (!vma) { 1130954652b9SAnthony Yznaga vm_unacct_memory(charged); 11315beb4930SRik van Riel ret = -ENOMEM; 11325beb4930SRik van Riel goto out; 11335beb4930SRik van Riel } 11341da177e4SLinus Torvalds 113584638335SKonstantin Khlebnikov vm_stat_account(mm, vma->vm_flags, pages); 11361da177e4SLinus Torvalds if (vma->vm_flags & VM_LOCKED) { 1137d0de32d9SHugh Dickins mm->locked_vm += pages; 113881909b84SMichel Lespinasse locked = true; 113981909b84SMichel Lespinasse new_addr = addr; 11401da177e4SLinus Torvalds } 11411da177e4SLinus Torvalds ret = addr; 11421da177e4SLinus Torvalds goto out; 11431da177e4SLinus Torvalds } 11441da177e4SLinus Torvalds } 11451da177e4SLinus Torvalds 11461da177e4SLinus Torvalds /* 11471da177e4SLinus Torvalds * We weren't able to just expand or shrink the area, 11481da177e4SLinus Torvalds * we need to create a new one and move it.. 11491da177e4SLinus Torvalds */ 11501da177e4SLinus Torvalds ret = -ENOMEM; 11511da177e4SLinus Torvalds if (flags & MREMAP_MAYMOVE) { 11521da177e4SLinus Torvalds unsigned long map_flags = 0; 11531da177e4SLinus Torvalds if (vma->vm_flags & VM_MAYSHARE) 11541da177e4SLinus Torvalds map_flags |= MAP_SHARED; 11551da177e4SLinus Torvalds 11561da177e4SLinus Torvalds new_addr = get_unmapped_area(vma->vm_file, 0, new_len, 115793587414SAl Viro vma->vm_pgoff + 115893587414SAl Viro ((addr - vma->vm_start) >> PAGE_SHIFT), 115993587414SAl Viro map_flags); 1160ff68dac6SGaowei Pu if (IS_ERR_VALUE(new_addr)) { 11611da177e4SLinus Torvalds ret = new_addr; 1162ed032189SEric Paris goto out; 1163ed032189SEric Paris } 1164ed032189SEric Paris 116572f87654SPavel Emelyanov ret = move_vma(vma, addr, old_len, new_len, new_addr, 1166e346b381SBrian Geffon &locked, flags, &uf, &uf_unmap); 11671da177e4SLinus Torvalds } 11681da177e4SLinus Torvalds out: 1169fdbef614SDmitry Safonov if (offset_in_page(ret)) 1170fa1f68ccSZou Wei locked = false; 1171d8ed45c5SMichel Lespinasse mmap_write_unlock(current->mm); 117281909b84SMichel Lespinasse if (locked && new_len > old_len) 117381909b84SMichel Lespinasse mm_populate(new_addr + old_len, new_len - old_len); 1174408579cdSLiam R. Howlett out_unlocked: 1175b2282371SMike Rapoport userfaultfd_unmap_complete(mm, &uf_unmap_early); 1176d1564926SBrian Geffon mremap_userfaultfd_complete(&uf, addr, ret, old_len); 1177897ab3e0SMike Rapoport userfaultfd_unmap_complete(mm, &uf_unmap); 11781da177e4SLinus Torvalds return ret; 11791da177e4SLinus Torvalds } 1180