1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * mm/mremap.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * (C) Copyright 1996 Linus Torvalds 61da177e4SLinus Torvalds * 7046c6884SAlan Cox * Address space accounting code <alan@lxorguk.ukuu.org.uk> 81da177e4SLinus Torvalds * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds 111da177e4SLinus Torvalds #include <linux/mm.h> 121da177e4SLinus Torvalds #include <linux/hugetlb.h> 131da177e4SLinus Torvalds #include <linux/shm.h> 141ff82995SHugh Dickins #include <linux/ksm.h> 151da177e4SLinus Torvalds #include <linux/mman.h> 161da177e4SLinus Torvalds #include <linux/swap.h> 17c59ede7bSRandy.Dunlap #include <linux/capability.h> 181da177e4SLinus Torvalds #include <linux/fs.h> 196dec97dcSCyrill Gorcunov #include <linux/swapops.h> 201da177e4SLinus Torvalds #include <linux/highmem.h> 211da177e4SLinus Torvalds #include <linux/security.h> 221da177e4SLinus Torvalds #include <linux/syscalls.h> 23cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 242581d202SPaul McQuade #include <linux/uaccess.h> 2572f87654SPavel Emelyanov #include <linux/userfaultfd_k.h> 261da177e4SLinus Torvalds 271da177e4SLinus Torvalds #include <asm/cacheflush.h> 28*3bbda69cSAneesh Kumar K.V #include <asm/tlb.h> 290881ace2SAneesh Kumar K.V #include <asm/pgalloc.h> 301da177e4SLinus Torvalds 31ba470de4SRik van Riel #include "internal.h" 32ba470de4SRik van Riel 33c49dd340SKalesh Singh static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr) 341da177e4SLinus Torvalds { 351da177e4SLinus Torvalds pgd_t *pgd; 36c2febafcSKirill A. Shutemov p4d_t *p4d; 371da177e4SLinus Torvalds pud_t *pud; 381da177e4SLinus Torvalds 391da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 401da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 411da177e4SLinus Torvalds return NULL; 421da177e4SLinus Torvalds 43c2febafcSKirill A. Shutemov p4d = p4d_offset(pgd, addr); 44c2febafcSKirill A. Shutemov if (p4d_none_or_clear_bad(p4d)) 45c2febafcSKirill A. Shutemov return NULL; 46c2febafcSKirill A. Shutemov 47c2febafcSKirill A. Shutemov pud = pud_offset(p4d, addr); 481da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 491da177e4SLinus Torvalds return NULL; 501da177e4SLinus Torvalds 51c49dd340SKalesh Singh return pud; 52c49dd340SKalesh Singh } 53c49dd340SKalesh Singh 54c49dd340SKalesh Singh static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) 55c49dd340SKalesh Singh { 56c49dd340SKalesh Singh pud_t *pud; 57c49dd340SKalesh Singh pmd_t *pmd; 58c49dd340SKalesh Singh 59c49dd340SKalesh Singh pud = get_old_pud(mm, addr); 60c49dd340SKalesh Singh if (!pud) 61c49dd340SKalesh Singh return NULL; 62c49dd340SKalesh Singh 631da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 6437a1c49aSAndrea Arcangeli if (pmd_none(*pmd)) 651da177e4SLinus Torvalds return NULL; 661da177e4SLinus Torvalds 677be7a546SHugh Dickins return pmd; 681da177e4SLinus Torvalds } 691da177e4SLinus Torvalds 70c49dd340SKalesh Singh static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, 718ac1f832SAndrea Arcangeli unsigned long addr) 721da177e4SLinus Torvalds { 731da177e4SLinus Torvalds pgd_t *pgd; 74c2febafcSKirill A. Shutemov p4d_t *p4d; 751da177e4SLinus Torvalds 761da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 77c2febafcSKirill A. Shutemov p4d = p4d_alloc(mm, pgd, addr); 78c2febafcSKirill A. Shutemov if (!p4d) 79c2febafcSKirill A. Shutemov return NULL; 80c49dd340SKalesh Singh 81c49dd340SKalesh Singh return pud_alloc(mm, p4d, addr); 82c49dd340SKalesh Singh } 83c49dd340SKalesh Singh 84c49dd340SKalesh Singh static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, 85c49dd340SKalesh Singh unsigned long addr) 86c49dd340SKalesh Singh { 87c49dd340SKalesh Singh pud_t *pud; 88c49dd340SKalesh Singh pmd_t *pmd; 89c49dd340SKalesh Singh 90c49dd340SKalesh Singh pud = alloc_new_pud(mm, vma, addr); 911da177e4SLinus Torvalds if (!pud) 92c74df32cSHugh Dickins return NULL; 937be7a546SHugh Dickins 941da177e4SLinus Torvalds pmd = pmd_alloc(mm, pud, addr); 9557a8f0cdSHugh Dickins if (!pmd) 96c74df32cSHugh Dickins return NULL; 977be7a546SHugh Dickins 988ac1f832SAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*pmd)); 99c74df32cSHugh Dickins 1007be7a546SHugh Dickins return pmd; 1011da177e4SLinus Torvalds } 1021da177e4SLinus Torvalds 1031d069b7dSHugh Dickins static void take_rmap_locks(struct vm_area_struct *vma) 1041d069b7dSHugh Dickins { 1051d069b7dSHugh Dickins if (vma->vm_file) 1061d069b7dSHugh Dickins i_mmap_lock_write(vma->vm_file->f_mapping); 1071d069b7dSHugh Dickins if (vma->anon_vma) 1081d069b7dSHugh Dickins anon_vma_lock_write(vma->anon_vma); 1091d069b7dSHugh Dickins } 1101d069b7dSHugh Dickins 1111d069b7dSHugh Dickins static void drop_rmap_locks(struct vm_area_struct *vma) 1121d069b7dSHugh Dickins { 1131d069b7dSHugh Dickins if (vma->anon_vma) 1141d069b7dSHugh Dickins anon_vma_unlock_write(vma->anon_vma); 1151d069b7dSHugh Dickins if (vma->vm_file) 1161d069b7dSHugh Dickins i_mmap_unlock_write(vma->vm_file->f_mapping); 1171d069b7dSHugh Dickins } 1181d069b7dSHugh Dickins 1196dec97dcSCyrill Gorcunov static pte_t move_soft_dirty_pte(pte_t pte) 1206dec97dcSCyrill Gorcunov { 1216dec97dcSCyrill Gorcunov /* 1226dec97dcSCyrill Gorcunov * Set soft dirty bit so we can notice 1236dec97dcSCyrill Gorcunov * in userspace the ptes were moved. 1246dec97dcSCyrill Gorcunov */ 1256dec97dcSCyrill Gorcunov #ifdef CONFIG_MEM_SOFT_DIRTY 1266dec97dcSCyrill Gorcunov if (pte_present(pte)) 1276dec97dcSCyrill Gorcunov pte = pte_mksoft_dirty(pte); 1286dec97dcSCyrill Gorcunov else if (is_swap_pte(pte)) 1296dec97dcSCyrill Gorcunov pte = pte_swp_mksoft_dirty(pte); 1306dec97dcSCyrill Gorcunov #endif 1316dec97dcSCyrill Gorcunov return pte; 1326dec97dcSCyrill Gorcunov } 1336dec97dcSCyrill Gorcunov 1347be7a546SHugh Dickins static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, 1357be7a546SHugh Dickins unsigned long old_addr, unsigned long old_end, 1367be7a546SHugh Dickins struct vm_area_struct *new_vma, pmd_t *new_pmd, 137eb66ae03SLinus Torvalds unsigned long new_addr, bool need_rmap_locks) 1381da177e4SLinus Torvalds { 1391da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 1407be7a546SHugh Dickins pte_t *old_pte, *new_pte, pte; 1414c21e2f2SHugh Dickins spinlock_t *old_ptl, *new_ptl; 1425d190420SAaron Lu bool force_flush = false; 1435d190420SAaron Lu unsigned long len = old_end - old_addr; 1441da177e4SLinus Torvalds 1451da177e4SLinus Torvalds /* 146c8c06efaSDavidlohr Bueso * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma 14738a76013SMichel Lespinasse * locks to ensure that rmap will always observe either the old or the 14838a76013SMichel Lespinasse * new ptes. This is the easiest way to avoid races with 14938a76013SMichel Lespinasse * truncate_pagecache(), page migration, etc... 15038a76013SMichel Lespinasse * 15138a76013SMichel Lespinasse * When need_rmap_locks is false, we use other ways to avoid 15238a76013SMichel Lespinasse * such races: 15338a76013SMichel Lespinasse * 15438a76013SMichel Lespinasse * - During exec() shift_arg_pages(), we use a specially tagged vma 155222100eeSAnshuman Khandual * which rmap call sites look for using vma_is_temporary_stack(). 15638a76013SMichel Lespinasse * 15738a76013SMichel Lespinasse * - During mremap(), new_vma is often known to be placed after vma 15838a76013SMichel Lespinasse * in rmap traversal order. This ensures rmap will always observe 15938a76013SMichel Lespinasse * either the old pte, or the new pte, or both (the page table locks 16038a76013SMichel Lespinasse * serialize access to individual ptes, but only rmap traversal 16138a76013SMichel Lespinasse * order guarantees that we won't miss both the old and new ptes). 1621da177e4SLinus Torvalds */ 1631d069b7dSHugh Dickins if (need_rmap_locks) 1641d069b7dSHugh Dickins take_rmap_locks(vma); 1651da177e4SLinus Torvalds 1664c21e2f2SHugh Dickins /* 1674c21e2f2SHugh Dickins * We don't have to worry about the ordering of src and dst 168c1e8d7c6SMichel Lespinasse * pte locks because exclusive mmap_lock prevents deadlock. 1694c21e2f2SHugh Dickins */ 170c74df32cSHugh Dickins old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); 171ece0e2b6SPeter Zijlstra new_pte = pte_offset_map(new_pmd, new_addr); 1724c21e2f2SHugh Dickins new_ptl = pte_lockptr(mm, new_pmd); 1734c21e2f2SHugh Dickins if (new_ptl != old_ptl) 174f20dc5f7SIngo Molnar spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 1753ea27719SMel Gorman flush_tlb_batched_pending(vma->vm_mm); 1766606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 1778b1f3124SNick Piggin 1787be7a546SHugh Dickins for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, 1797be7a546SHugh Dickins new_pte++, new_addr += PAGE_SIZE) { 1807be7a546SHugh Dickins if (pte_none(*old_pte)) 1817be7a546SHugh Dickins continue; 1825d190420SAaron Lu 1837b6efc2bSAndrea Arcangeli pte = ptep_get_and_clear(mm, old_addr, old_pte); 184a2ce2666SAaron Lu /* 185eb66ae03SLinus Torvalds * If we are remapping a valid PTE, make sure 186a2ce2666SAaron Lu * to flush TLB before we drop the PTL for the 187eb66ae03SLinus Torvalds * PTE. 188a2ce2666SAaron Lu * 189eb66ae03SLinus Torvalds * NOTE! Both old and new PTL matter: the old one 190eb66ae03SLinus Torvalds * for racing with page_mkclean(), the new one to 191eb66ae03SLinus Torvalds * make sure the physical page stays valid until 192eb66ae03SLinus Torvalds * the TLB entry for the old mapping has been 193eb66ae03SLinus Torvalds * flushed. 194a2ce2666SAaron Lu */ 195eb66ae03SLinus Torvalds if (pte_present(pte)) 196a2ce2666SAaron Lu force_flush = true; 1977be7a546SHugh Dickins pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 1986dec97dcSCyrill Gorcunov pte = move_soft_dirty_pte(pte); 1996dec97dcSCyrill Gorcunov set_pte_at(mm, new_addr, new_pte, pte); 2001da177e4SLinus Torvalds } 2017be7a546SHugh Dickins 2026606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 203eb66ae03SLinus Torvalds if (force_flush) 204eb66ae03SLinus Torvalds flush_tlb_range(vma, old_end - len, old_end); 2054c21e2f2SHugh Dickins if (new_ptl != old_ptl) 2064c21e2f2SHugh Dickins spin_unlock(new_ptl); 207ece0e2b6SPeter Zijlstra pte_unmap(new_pte - 1); 208c74df32cSHugh Dickins pte_unmap_unlock(old_pte - 1, old_ptl); 2091d069b7dSHugh Dickins if (need_rmap_locks) 2101d069b7dSHugh Dickins drop_rmap_locks(vma); 2111da177e4SLinus Torvalds } 2121da177e4SLinus Torvalds 213*3bbda69cSAneesh Kumar K.V #ifndef arch_supports_page_table_move 214*3bbda69cSAneesh Kumar K.V #define arch_supports_page_table_move arch_supports_page_table_move 215*3bbda69cSAneesh Kumar K.V static inline bool arch_supports_page_table_move(void) 216*3bbda69cSAneesh Kumar K.V { 217*3bbda69cSAneesh Kumar K.V return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) || 218*3bbda69cSAneesh Kumar K.V IS_ENABLED(CONFIG_HAVE_MOVE_PUD); 219*3bbda69cSAneesh Kumar K.V } 220*3bbda69cSAneesh Kumar K.V #endif 221*3bbda69cSAneesh Kumar K.V 2222c91bd4aSJoel Fernandes (Google) #ifdef CONFIG_HAVE_MOVE_PMD 2232c91bd4aSJoel Fernandes (Google) static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, 224b8aa9d9dSWei Yang unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) 2252c91bd4aSJoel Fernandes (Google) { 2262c91bd4aSJoel Fernandes (Google) spinlock_t *old_ptl, *new_ptl; 2272c91bd4aSJoel Fernandes (Google) struct mm_struct *mm = vma->vm_mm; 2282c91bd4aSJoel Fernandes (Google) pmd_t pmd; 2292c91bd4aSJoel Fernandes (Google) 230*3bbda69cSAneesh Kumar K.V if (!arch_supports_page_table_move()) 231*3bbda69cSAneesh Kumar K.V return false; 2322c91bd4aSJoel Fernandes (Google) /* 2332c91bd4aSJoel Fernandes (Google) * The destination pmd shouldn't be established, free_pgtables() 234f81fdd0cSLinus Torvalds * should have released it. 235f81fdd0cSLinus Torvalds * 236f81fdd0cSLinus Torvalds * However, there's a case during execve() where we use mremap 237f81fdd0cSLinus Torvalds * to move the initial stack, and in that case the target area 238f81fdd0cSLinus Torvalds * may overlap the source area (always moving down). 239f81fdd0cSLinus Torvalds * 240f81fdd0cSLinus Torvalds * If everything is PMD-aligned, that works fine, as moving 241f81fdd0cSLinus Torvalds * each pmd down will clear the source pmd. But if we first 242f81fdd0cSLinus Torvalds * have a few 4kB-only pages that get moved down, and then 243f81fdd0cSLinus Torvalds * hit the "now the rest is PMD-aligned, let's do everything 244f81fdd0cSLinus Torvalds * one pmd at a time", we will still have the old (now empty 245f81fdd0cSLinus Torvalds * of any 4kB pages, but still there) PMD in the page table 246f81fdd0cSLinus Torvalds * tree. 247f81fdd0cSLinus Torvalds * 248f81fdd0cSLinus Torvalds * Warn on it once - because we really should try to figure 249f81fdd0cSLinus Torvalds * out how to do this better - but then say "I won't move 250f81fdd0cSLinus Torvalds * this pmd". 251f81fdd0cSLinus Torvalds * 252f81fdd0cSLinus Torvalds * One alternative might be to just unmap the target pmd at 253f81fdd0cSLinus Torvalds * this point, and verify that it really is empty. We'll see. 2542c91bd4aSJoel Fernandes (Google) */ 255f81fdd0cSLinus Torvalds if (WARN_ON_ONCE(!pmd_none(*new_pmd))) 2562c91bd4aSJoel Fernandes (Google) return false; 2572c91bd4aSJoel Fernandes (Google) 2582c91bd4aSJoel Fernandes (Google) /* 2592c91bd4aSJoel Fernandes (Google) * We don't have to worry about the ordering of src and dst 260c1e8d7c6SMichel Lespinasse * ptlocks because exclusive mmap_lock prevents deadlock. 2612c91bd4aSJoel Fernandes (Google) */ 2622c91bd4aSJoel Fernandes (Google) old_ptl = pmd_lock(vma->vm_mm, old_pmd); 2632c91bd4aSJoel Fernandes (Google) new_ptl = pmd_lockptr(mm, new_pmd); 2642c91bd4aSJoel Fernandes (Google) if (new_ptl != old_ptl) 2652c91bd4aSJoel Fernandes (Google) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 2662c91bd4aSJoel Fernandes (Google) 2672c91bd4aSJoel Fernandes (Google) /* Clear the pmd */ 2682c91bd4aSJoel Fernandes (Google) pmd = *old_pmd; 2692c91bd4aSJoel Fernandes (Google) pmd_clear(old_pmd); 2702c91bd4aSJoel Fernandes (Google) 2712c91bd4aSJoel Fernandes (Google) VM_BUG_ON(!pmd_none(*new_pmd)); 2722c91bd4aSJoel Fernandes (Google) 2730881ace2SAneesh Kumar K.V pmd_populate(mm, new_pmd, pmd_pgtable(pmd)); 2742c91bd4aSJoel Fernandes (Google) flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 2752c91bd4aSJoel Fernandes (Google) if (new_ptl != old_ptl) 2762c91bd4aSJoel Fernandes (Google) spin_unlock(new_ptl); 2772c91bd4aSJoel Fernandes (Google) spin_unlock(old_ptl); 2782c91bd4aSJoel Fernandes (Google) 2792c91bd4aSJoel Fernandes (Google) return true; 2802c91bd4aSJoel Fernandes (Google) } 281c49dd340SKalesh Singh #else 282c49dd340SKalesh Singh static inline bool move_normal_pmd(struct vm_area_struct *vma, 283c49dd340SKalesh Singh unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, 284c49dd340SKalesh Singh pmd_t *new_pmd) 285c49dd340SKalesh Singh { 286c49dd340SKalesh Singh return false; 287c49dd340SKalesh Singh } 2882c91bd4aSJoel Fernandes (Google) #endif 2892c91bd4aSJoel Fernandes (Google) 290d6655dffSAneesh Kumar K.V #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD) 291c49dd340SKalesh Singh static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, 292c49dd340SKalesh Singh unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) 293c49dd340SKalesh Singh { 294c49dd340SKalesh Singh spinlock_t *old_ptl, *new_ptl; 295c49dd340SKalesh Singh struct mm_struct *mm = vma->vm_mm; 296c49dd340SKalesh Singh pud_t pud; 297c49dd340SKalesh Singh 298*3bbda69cSAneesh Kumar K.V if (!arch_supports_page_table_move()) 299*3bbda69cSAneesh Kumar K.V return false; 300c49dd340SKalesh Singh /* 301c49dd340SKalesh Singh * The destination pud shouldn't be established, free_pgtables() 302c49dd340SKalesh Singh * should have released it. 303c49dd340SKalesh Singh */ 304c49dd340SKalesh Singh if (WARN_ON_ONCE(!pud_none(*new_pud))) 305c49dd340SKalesh Singh return false; 306c49dd340SKalesh Singh 307c49dd340SKalesh Singh /* 308c49dd340SKalesh Singh * We don't have to worry about the ordering of src and dst 309c49dd340SKalesh Singh * ptlocks because exclusive mmap_lock prevents deadlock. 310c49dd340SKalesh Singh */ 311c49dd340SKalesh Singh old_ptl = pud_lock(vma->vm_mm, old_pud); 312c49dd340SKalesh Singh new_ptl = pud_lockptr(mm, new_pud); 313c49dd340SKalesh Singh if (new_ptl != old_ptl) 314c49dd340SKalesh Singh spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 315c49dd340SKalesh Singh 316c49dd340SKalesh Singh /* Clear the pud */ 317c49dd340SKalesh Singh pud = *old_pud; 318c49dd340SKalesh Singh pud_clear(old_pud); 319c49dd340SKalesh Singh 320c49dd340SKalesh Singh VM_BUG_ON(!pud_none(*new_pud)); 321c49dd340SKalesh Singh 3220881ace2SAneesh Kumar K.V pud_populate(mm, new_pud, pud_pgtable(pud)); 323c49dd340SKalesh Singh flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE); 324c49dd340SKalesh Singh if (new_ptl != old_ptl) 325c49dd340SKalesh Singh spin_unlock(new_ptl); 326c49dd340SKalesh Singh spin_unlock(old_ptl); 327c49dd340SKalesh Singh 328c49dd340SKalesh Singh return true; 329c49dd340SKalesh Singh } 330c49dd340SKalesh Singh #else 331c49dd340SKalesh Singh static inline bool move_normal_pud(struct vm_area_struct *vma, 332c49dd340SKalesh Singh unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, 333c49dd340SKalesh Singh pud_t *new_pud) 334c49dd340SKalesh Singh { 335c49dd340SKalesh Singh return false; 336c49dd340SKalesh Singh } 337c49dd340SKalesh Singh #endif 338c49dd340SKalesh Singh 3397d846db7SAneesh Kumar K.V #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 3407d846db7SAneesh Kumar K.V static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, 3417d846db7SAneesh Kumar K.V unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) 3427d846db7SAneesh Kumar K.V { 3437d846db7SAneesh Kumar K.V spinlock_t *old_ptl, *new_ptl; 3447d846db7SAneesh Kumar K.V struct mm_struct *mm = vma->vm_mm; 3457d846db7SAneesh Kumar K.V pud_t pud; 3467d846db7SAneesh Kumar K.V 3477d846db7SAneesh Kumar K.V /* 3487d846db7SAneesh Kumar K.V * The destination pud shouldn't be established, free_pgtables() 3497d846db7SAneesh Kumar K.V * should have released it. 3507d846db7SAneesh Kumar K.V */ 3517d846db7SAneesh Kumar K.V if (WARN_ON_ONCE(!pud_none(*new_pud))) 3527d846db7SAneesh Kumar K.V return false; 3537d846db7SAneesh Kumar K.V 3547d846db7SAneesh Kumar K.V /* 3557d846db7SAneesh Kumar K.V * We don't have to worry about the ordering of src and dst 3567d846db7SAneesh Kumar K.V * ptlocks because exclusive mmap_lock prevents deadlock. 3577d846db7SAneesh Kumar K.V */ 3587d846db7SAneesh Kumar K.V old_ptl = pud_lock(vma->vm_mm, old_pud); 3597d846db7SAneesh Kumar K.V new_ptl = pud_lockptr(mm, new_pud); 3607d846db7SAneesh Kumar K.V if (new_ptl != old_ptl) 3617d846db7SAneesh Kumar K.V spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 3627d846db7SAneesh Kumar K.V 3637d846db7SAneesh Kumar K.V /* Clear the pud */ 3647d846db7SAneesh Kumar K.V pud = *old_pud; 3657d846db7SAneesh Kumar K.V pud_clear(old_pud); 3667d846db7SAneesh Kumar K.V 3677d846db7SAneesh Kumar K.V VM_BUG_ON(!pud_none(*new_pud)); 3687d846db7SAneesh Kumar K.V 3697d846db7SAneesh Kumar K.V /* Set the new pud */ 3707d846db7SAneesh Kumar K.V /* mark soft_ditry when we add pud level soft dirty support */ 3717d846db7SAneesh Kumar K.V set_pud_at(mm, new_addr, new_pud, pud); 3727d846db7SAneesh Kumar K.V flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE); 3737d846db7SAneesh Kumar K.V if (new_ptl != old_ptl) 3747d846db7SAneesh Kumar K.V spin_unlock(new_ptl); 3757d846db7SAneesh Kumar K.V spin_unlock(old_ptl); 3767d846db7SAneesh Kumar K.V 3777d846db7SAneesh Kumar K.V return true; 3787d846db7SAneesh Kumar K.V } 3797d846db7SAneesh Kumar K.V #else 3807d846db7SAneesh Kumar K.V static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, 3817d846db7SAneesh Kumar K.V unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) 3827d846db7SAneesh Kumar K.V { 3837d846db7SAneesh Kumar K.V WARN_ON_ONCE(1); 3847d846db7SAneesh Kumar K.V return false; 3857d846db7SAneesh Kumar K.V 3867d846db7SAneesh Kumar K.V } 3877d846db7SAneesh Kumar K.V #endif 3887d846db7SAneesh Kumar K.V 389c49dd340SKalesh Singh enum pgt_entry { 390c49dd340SKalesh Singh NORMAL_PMD, 391c49dd340SKalesh Singh HPAGE_PMD, 392c49dd340SKalesh Singh NORMAL_PUD, 3937d846db7SAneesh Kumar K.V HPAGE_PUD, 394c49dd340SKalesh Singh }; 395c49dd340SKalesh Singh 396c49dd340SKalesh Singh /* 397c49dd340SKalesh Singh * Returns an extent of the corresponding size for the pgt_entry specified if 398c49dd340SKalesh Singh * valid. Else returns a smaller extent bounded by the end of the source and 399c49dd340SKalesh Singh * destination pgt_entry. 400c49dd340SKalesh Singh */ 401a30a2909SArnd Bergmann static __always_inline unsigned long get_extent(enum pgt_entry entry, 402a30a2909SArnd Bergmann unsigned long old_addr, unsigned long old_end, 403a30a2909SArnd Bergmann unsigned long new_addr) 404c49dd340SKalesh Singh { 405c49dd340SKalesh Singh unsigned long next, extent, mask, size; 406c49dd340SKalesh Singh 407c49dd340SKalesh Singh switch (entry) { 408c49dd340SKalesh Singh case HPAGE_PMD: 409c49dd340SKalesh Singh case NORMAL_PMD: 410c49dd340SKalesh Singh mask = PMD_MASK; 411c49dd340SKalesh Singh size = PMD_SIZE; 412c49dd340SKalesh Singh break; 4137d846db7SAneesh Kumar K.V case HPAGE_PUD: 414c49dd340SKalesh Singh case NORMAL_PUD: 415c49dd340SKalesh Singh mask = PUD_MASK; 416c49dd340SKalesh Singh size = PUD_SIZE; 417c49dd340SKalesh Singh break; 418c49dd340SKalesh Singh default: 419c49dd340SKalesh Singh BUILD_BUG(); 420c49dd340SKalesh Singh break; 421c49dd340SKalesh Singh } 422c49dd340SKalesh Singh 423c49dd340SKalesh Singh next = (old_addr + size) & mask; 424c49dd340SKalesh Singh /* even if next overflowed, extent below will be ok */ 425e05986eeSKalesh Singh extent = next - old_addr; 426e05986eeSKalesh Singh if (extent > old_end - old_addr) 427e05986eeSKalesh Singh extent = old_end - old_addr; 428c49dd340SKalesh Singh next = (new_addr + size) & mask; 429c49dd340SKalesh Singh if (extent > next - new_addr) 430c49dd340SKalesh Singh extent = next - new_addr; 431c49dd340SKalesh Singh return extent; 432c49dd340SKalesh Singh } 433c49dd340SKalesh Singh 434c49dd340SKalesh Singh /* 435c49dd340SKalesh Singh * Attempts to speedup the move by moving entry at the level corresponding to 436c49dd340SKalesh Singh * pgt_entry. Returns true if the move was successful, else false. 437c49dd340SKalesh Singh */ 438c49dd340SKalesh Singh static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, 439c49dd340SKalesh Singh unsigned long old_addr, unsigned long new_addr, 440c49dd340SKalesh Singh void *old_entry, void *new_entry, bool need_rmap_locks) 441c49dd340SKalesh Singh { 442c49dd340SKalesh Singh bool moved = false; 443c49dd340SKalesh Singh 444c49dd340SKalesh Singh /* See comment in move_ptes() */ 445c49dd340SKalesh Singh if (need_rmap_locks) 446c49dd340SKalesh Singh take_rmap_locks(vma); 447c49dd340SKalesh Singh 448c49dd340SKalesh Singh switch (entry) { 449c49dd340SKalesh Singh case NORMAL_PMD: 450c49dd340SKalesh Singh moved = move_normal_pmd(vma, old_addr, new_addr, old_entry, 451c49dd340SKalesh Singh new_entry); 452c49dd340SKalesh Singh break; 453c49dd340SKalesh Singh case NORMAL_PUD: 454c49dd340SKalesh Singh moved = move_normal_pud(vma, old_addr, new_addr, old_entry, 455c49dd340SKalesh Singh new_entry); 456c49dd340SKalesh Singh break; 457c49dd340SKalesh Singh case HPAGE_PMD: 458c49dd340SKalesh Singh moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 459c49dd340SKalesh Singh move_huge_pmd(vma, old_addr, new_addr, old_entry, 460c49dd340SKalesh Singh new_entry); 461c49dd340SKalesh Singh break; 4627d846db7SAneesh Kumar K.V case HPAGE_PUD: 4637d846db7SAneesh Kumar K.V moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 4647d846db7SAneesh Kumar K.V move_huge_pud(vma, old_addr, new_addr, old_entry, 4657d846db7SAneesh Kumar K.V new_entry); 4667d846db7SAneesh Kumar K.V break; 4677d846db7SAneesh Kumar K.V 468c49dd340SKalesh Singh default: 469c49dd340SKalesh Singh WARN_ON_ONCE(1); 470c49dd340SKalesh Singh break; 471c49dd340SKalesh Singh } 472c49dd340SKalesh Singh 473c49dd340SKalesh Singh if (need_rmap_locks) 474c49dd340SKalesh Singh drop_rmap_locks(vma); 475c49dd340SKalesh Singh 476c49dd340SKalesh Singh return moved; 477c49dd340SKalesh Singh } 478c49dd340SKalesh Singh 479b6a2fea3SOllie Wild unsigned long move_page_tables(struct vm_area_struct *vma, 4801da177e4SLinus Torvalds unsigned long old_addr, struct vm_area_struct *new_vma, 48138a76013SMichel Lespinasse unsigned long new_addr, unsigned long len, 48238a76013SMichel Lespinasse bool need_rmap_locks) 4831da177e4SLinus Torvalds { 484c49dd340SKalesh Singh unsigned long extent, old_end; 485ac46d4f3SJérôme Glisse struct mmu_notifier_range range; 4867be7a546SHugh Dickins pmd_t *old_pmd, *new_pmd; 4877d846db7SAneesh Kumar K.V pud_t *old_pud, *new_pud; 4881da177e4SLinus Torvalds 4897be7a546SHugh Dickins old_end = old_addr + len; 4907be7a546SHugh Dickins flush_cache_range(vma, old_addr, old_end); 4911da177e4SLinus Torvalds 4926f4f13e8SJérôme Glisse mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, 4936f4f13e8SJérôme Glisse old_addr, old_end); 494ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_start(&range); 4957b6efc2bSAndrea Arcangeli 4967be7a546SHugh Dickins for (; old_addr < old_end; old_addr += extent, new_addr += extent) { 4971da177e4SLinus Torvalds cond_resched(); 498c49dd340SKalesh Singh /* 499c49dd340SKalesh Singh * If extent is PUD-sized try to speed up the move by moving at the 500c49dd340SKalesh Singh * PUD level if possible. 501c49dd340SKalesh Singh */ 502c49dd340SKalesh Singh extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr); 503c49dd340SKalesh Singh 504c49dd340SKalesh Singh old_pud = get_old_pud(vma->vm_mm, old_addr); 505c49dd340SKalesh Singh if (!old_pud) 506c49dd340SKalesh Singh continue; 507c49dd340SKalesh Singh new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); 508c49dd340SKalesh Singh if (!new_pud) 509c49dd340SKalesh Singh break; 5107d846db7SAneesh Kumar K.V if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) { 5117d846db7SAneesh Kumar K.V if (extent == HPAGE_PUD_SIZE) { 5127d846db7SAneesh Kumar K.V move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr, 5137d846db7SAneesh Kumar K.V old_pud, new_pud, need_rmap_locks); 5147d846db7SAneesh Kumar K.V /* We ignore and continue on error? */ 5157d846db7SAneesh Kumar K.V continue; 5167d846db7SAneesh Kumar K.V } 5177d846db7SAneesh Kumar K.V } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) { 5187d846db7SAneesh Kumar K.V 519c49dd340SKalesh Singh if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr, 52097113eb3SAneesh Kumar K.V old_pud, new_pud, true)) 521c49dd340SKalesh Singh continue; 522c49dd340SKalesh Singh } 523c49dd340SKalesh Singh 524c49dd340SKalesh Singh extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr); 5257be7a546SHugh Dickins old_pmd = get_old_pmd(vma->vm_mm, old_addr); 5267be7a546SHugh Dickins if (!old_pmd) 5277be7a546SHugh Dickins continue; 5288ac1f832SAndrea Arcangeli new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); 5297be7a546SHugh Dickins if (!new_pmd) 5307be7a546SHugh Dickins break; 531c49dd340SKalesh Singh if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || 532c49dd340SKalesh Singh pmd_devmap(*old_pmd)) { 533c49dd340SKalesh Singh if (extent == HPAGE_PMD_SIZE && 534c49dd340SKalesh Singh move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr, 535c49dd340SKalesh Singh old_pmd, new_pmd, need_rmap_locks)) 53637a1c49aSAndrea Arcangeli continue; 5374b471e88SKirill A. Shutemov split_huge_pmd(vma, old_pmd, old_addr); 538337d9abfSNaoya Horiguchi if (pmd_trans_unstable(old_pmd)) 5396b9116a6SKirill A. Shutemov continue; 540c49dd340SKalesh Singh } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) && 541c49dd340SKalesh Singh extent == PMD_SIZE) { 5422c91bd4aSJoel Fernandes (Google) /* 5432c91bd4aSJoel Fernandes (Google) * If the extent is PMD-sized, try to speed the move by 5442c91bd4aSJoel Fernandes (Google) * moving at the PMD level if possible. 5452c91bd4aSJoel Fernandes (Google) */ 546c49dd340SKalesh Singh if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr, 54797113eb3SAneesh Kumar K.V old_pmd, new_pmd, true)) 5482c91bd4aSJoel Fernandes (Google) continue; 54937a1c49aSAndrea Arcangeli } 5502c91bd4aSJoel Fernandes (Google) 5514cf58924SJoel Fernandes (Google) if (pte_alloc(new_vma->vm_mm, new_pmd)) 55237a1c49aSAndrea Arcangeli break; 5535d190420SAaron Lu move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, 554eb66ae03SLinus Torvalds new_pmd, new_addr, need_rmap_locks); 5551da177e4SLinus Torvalds } 5567b6efc2bSAndrea Arcangeli 557ac46d4f3SJérôme Glisse mmu_notifier_invalidate_range_end(&range); 5587be7a546SHugh Dickins 5597be7a546SHugh Dickins return len + old_addr - old_end; /* how much done */ 5601da177e4SLinus Torvalds } 5611da177e4SLinus Torvalds 5621da177e4SLinus Torvalds static unsigned long move_vma(struct vm_area_struct *vma, 5631da177e4SLinus Torvalds unsigned long old_addr, unsigned long old_len, 56472f87654SPavel Emelyanov unsigned long new_len, unsigned long new_addr, 565e346b381SBrian Geffon bool *locked, unsigned long flags, 566e346b381SBrian Geffon struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) 5671da177e4SLinus Torvalds { 5681da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 5691da177e4SLinus Torvalds struct vm_area_struct *new_vma; 5701da177e4SLinus Torvalds unsigned long vm_flags = vma->vm_flags; 5711da177e4SLinus Torvalds unsigned long new_pgoff; 5721da177e4SLinus Torvalds unsigned long moved_len; 5731da177e4SLinus Torvalds unsigned long excess = 0; 574365e9c87SHugh Dickins unsigned long hiwater_vm; 5751da177e4SLinus Torvalds int split = 0; 57673d5e062SDmitry Safonov int err = 0; 57738a76013SMichel Lespinasse bool need_rmap_locks; 5781da177e4SLinus Torvalds 5791da177e4SLinus Torvalds /* 5801da177e4SLinus Torvalds * We'd prefer to avoid failure later on in do_munmap: 5811da177e4SLinus Torvalds * which may split one vma into three before unmapping. 5821da177e4SLinus Torvalds */ 5831da177e4SLinus Torvalds if (mm->map_count >= sysctl_max_map_count - 3) 5841da177e4SLinus Torvalds return -ENOMEM; 5851da177e4SLinus Torvalds 58673d5e062SDmitry Safonov if (vma->vm_ops && vma->vm_ops->may_split) { 58773d5e062SDmitry Safonov if (vma->vm_start != old_addr) 58873d5e062SDmitry Safonov err = vma->vm_ops->may_split(vma, old_addr); 58973d5e062SDmitry Safonov if (!err && vma->vm_end != old_addr + old_len) 59073d5e062SDmitry Safonov err = vma->vm_ops->may_split(vma, old_addr + old_len); 59173d5e062SDmitry Safonov if (err) 59273d5e062SDmitry Safonov return err; 59373d5e062SDmitry Safonov } 59473d5e062SDmitry Safonov 5951ff82995SHugh Dickins /* 5961ff82995SHugh Dickins * Advise KSM to break any KSM pages in the area to be moved: 5971ff82995SHugh Dickins * it would be confusing if they were to turn up at the new 5981ff82995SHugh Dickins * location, where they happen to coincide with different KSM 5991ff82995SHugh Dickins * pages recently unmapped. But leave vma->vm_flags as it was, 6001ff82995SHugh Dickins * so KSM can come around to merge on vma and new_vma afterwards. 6011ff82995SHugh Dickins */ 6027103ad32SHugh Dickins err = ksm_madvise(vma, old_addr, old_addr + old_len, 6037103ad32SHugh Dickins MADV_UNMERGEABLE, &vm_flags); 6047103ad32SHugh Dickins if (err) 6057103ad32SHugh Dickins return err; 6061ff82995SHugh Dickins 607ad8ee77eSDmitry Safonov if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT)) { 608ad8ee77eSDmitry Safonov if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT)) 609ad8ee77eSDmitry Safonov return -ENOMEM; 610ad8ee77eSDmitry Safonov } 611ad8ee77eSDmitry Safonov 6121da177e4SLinus Torvalds new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); 61338a76013SMichel Lespinasse new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, 61438a76013SMichel Lespinasse &need_rmap_locks); 615ad8ee77eSDmitry Safonov if (!new_vma) { 616ad8ee77eSDmitry Safonov if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT)) 617ad8ee77eSDmitry Safonov vm_unacct_memory(new_len >> PAGE_SHIFT); 6181da177e4SLinus Torvalds return -ENOMEM; 619ad8ee77eSDmitry Safonov } 6201da177e4SLinus Torvalds 62138a76013SMichel Lespinasse moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, 62238a76013SMichel Lespinasse need_rmap_locks); 6231da177e4SLinus Torvalds if (moved_len < old_len) { 624df1eab30SOleg Nesterov err = -ENOMEM; 6255477e70aSOleg Nesterov } else if (vma->vm_ops && vma->vm_ops->mremap) { 62614d07113SBrian Geffon err = vma->vm_ops->mremap(new_vma); 627df1eab30SOleg Nesterov } 628df1eab30SOleg Nesterov 629df1eab30SOleg Nesterov if (unlikely(err)) { 6301da177e4SLinus Torvalds /* 6311da177e4SLinus Torvalds * On error, move entries back from new area to old, 6321da177e4SLinus Torvalds * which will succeed since page tables still there, 6331da177e4SLinus Torvalds * and then proceed to unmap new area instead of old. 6341da177e4SLinus Torvalds */ 63538a76013SMichel Lespinasse move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, 63638a76013SMichel Lespinasse true); 6371da177e4SLinus Torvalds vma = new_vma; 6381da177e4SLinus Torvalds old_len = new_len; 6391da177e4SLinus Torvalds old_addr = new_addr; 640df1eab30SOleg Nesterov new_addr = err; 6414abad2caSLaurent Dufour } else { 64272f87654SPavel Emelyanov mremap_userfaultfd_prep(new_vma, uf); 6434abad2caSLaurent Dufour } 6441da177e4SLinus Torvalds 6451da177e4SLinus Torvalds /* Conceal VM_ACCOUNT so old reservation is not undone */ 646ad8ee77eSDmitry Safonov if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) { 6471da177e4SLinus Torvalds vma->vm_flags &= ~VM_ACCOUNT; 6481da177e4SLinus Torvalds excess = vma->vm_end - vma->vm_start - old_len; 6491da177e4SLinus Torvalds if (old_addr > vma->vm_start && 6501da177e4SLinus Torvalds old_addr + old_len < vma->vm_end) 6511da177e4SLinus Torvalds split = 1; 6521da177e4SLinus Torvalds } 6531da177e4SLinus Torvalds 65471799062SKirill Korotaev /* 655365e9c87SHugh Dickins * If we failed to move page tables we still do total_vm increment 656365e9c87SHugh Dickins * since do_munmap() will decrement it by old_len == new_len. 657365e9c87SHugh Dickins * 658365e9c87SHugh Dickins * Since total_vm is about to be raised artificially high for a 659365e9c87SHugh Dickins * moment, we need to restore high watermark afterwards: if stats 660365e9c87SHugh Dickins * are taken meanwhile, total_vm and hiwater_vm appear too high. 661365e9c87SHugh Dickins * If this were a serious issue, we'd add a flag to do_munmap(). 66271799062SKirill Korotaev */ 663365e9c87SHugh Dickins hiwater_vm = mm->hiwater_vm; 66484638335SKonstantin Khlebnikov vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); 66571799062SKirill Korotaev 666d9fe4fabSToshi Kani /* Tell pfnmap has moved from this vma */ 667d9fe4fabSToshi Kani if (unlikely(vma->vm_flags & VM_PFNMAP)) 668d9fe4fabSToshi Kani untrack_pfn_moved(vma); 669d9fe4fabSToshi Kani 670e346b381SBrian Geffon if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) { 671e346b381SBrian Geffon /* We always clear VM_LOCKED[ONFAULT] on the old vma */ 672e346b381SBrian Geffon vma->vm_flags &= VM_LOCKED_CLEAR_MASK; 673e346b381SBrian Geffon 6741583aa27SLi Xinhai /* 6751583aa27SLi Xinhai * anon_vma links of the old vma is no longer needed after its page 6761583aa27SLi Xinhai * table has been moved. 6771583aa27SLi Xinhai */ 6781583aa27SLi Xinhai if (new_vma != vma && vma->vm_start == old_addr && 6791583aa27SLi Xinhai vma->vm_end == (old_addr + old_len)) 6801583aa27SLi Xinhai unlink_anon_vmas(vma); 6811583aa27SLi Xinhai 682e346b381SBrian Geffon /* Because we won't unmap we don't need to touch locked_vm */ 683ad8ee77eSDmitry Safonov return new_addr; 684e346b381SBrian Geffon } 685e346b381SBrian Geffon 686897ab3e0SMike Rapoport if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) { 6871da177e4SLinus Torvalds /* OOM: unable to split vma, just get accounts right */ 688ad8ee77eSDmitry Safonov if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) 68951df7bcbSDmitry Safonov vm_acct_memory(new_len >> PAGE_SHIFT); 6901da177e4SLinus Torvalds excess = 0; 6911da177e4SLinus Torvalds } 692e346b381SBrian Geffon 693e346b381SBrian Geffon if (vm_flags & VM_LOCKED) { 694e346b381SBrian Geffon mm->locked_vm += new_len >> PAGE_SHIFT; 695e346b381SBrian Geffon *locked = true; 696e346b381SBrian Geffon } 697ad8ee77eSDmitry Safonov 698365e9c87SHugh Dickins mm->hiwater_vm = hiwater_vm; 6991da177e4SLinus Torvalds 7001da177e4SLinus Torvalds /* Restore VM_ACCOUNT if one or two pieces of vma left */ 7011da177e4SLinus Torvalds if (excess) { 7021da177e4SLinus Torvalds vma->vm_flags |= VM_ACCOUNT; 7031da177e4SLinus Torvalds if (split) 7041da177e4SLinus Torvalds vma->vm_next->vm_flags |= VM_ACCOUNT; 7051da177e4SLinus Torvalds } 7061da177e4SLinus Torvalds 7071da177e4SLinus Torvalds return new_addr; 7081da177e4SLinus Torvalds } 7091da177e4SLinus Torvalds 71054f5de70SAl Viro static struct vm_area_struct *vma_to_resize(unsigned long addr, 711e346b381SBrian Geffon unsigned long old_len, unsigned long new_len, unsigned long flags, 712e346b381SBrian Geffon unsigned long *p) 71354f5de70SAl Viro { 71454f5de70SAl Viro struct mm_struct *mm = current->mm; 7155aaf07f0SLiam Howlett struct vm_area_struct *vma; 7161d391686SOleg Nesterov unsigned long pgoff; 71754f5de70SAl Viro 7185aaf07f0SLiam Howlett vma = vma_lookup(mm, addr); 7195aaf07f0SLiam Howlett if (!vma) 7206cd57613SDerek return ERR_PTR(-EFAULT); 72154f5de70SAl Viro 722dba58d3bSMike Kravetz /* 723dba58d3bSMike Kravetz * !old_len is a special case where an attempt is made to 'duplicate' 724dba58d3bSMike Kravetz * a mapping. This makes no sense for private mappings as it will 725dba58d3bSMike Kravetz * instead create a fresh/new mapping unrelated to the original. This 726dba58d3bSMike Kravetz * is contrary to the basic idea of mremap which creates new mappings 727dba58d3bSMike Kravetz * based on the original. There are no known use cases for this 728dba58d3bSMike Kravetz * behavior. As a result, fail such attempts. 729dba58d3bSMike Kravetz */ 730dba58d3bSMike Kravetz if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { 731dba58d3bSMike Kravetz pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid); 732dba58d3bSMike Kravetz return ERR_PTR(-EINVAL); 733dba58d3bSMike Kravetz } 734dba58d3bSMike Kravetz 735a4609387SBrian Geffon if ((flags & MREMAP_DONTUNMAP) && 736a4609387SBrian Geffon (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))) 737e346b381SBrian Geffon return ERR_PTR(-EINVAL); 738e346b381SBrian Geffon 73954f5de70SAl Viro if (is_vm_hugetlb_page(vma)) 7406cd57613SDerek return ERR_PTR(-EINVAL); 74154f5de70SAl Viro 74254f5de70SAl Viro /* We can't remap across vm area boundaries */ 74354f5de70SAl Viro if (old_len > vma->vm_end - addr) 7446cd57613SDerek return ERR_PTR(-EFAULT); 74554f5de70SAl Viro 7461d391686SOleg Nesterov if (new_len == old_len) 7471d391686SOleg Nesterov return vma; 748982134baSLinus Torvalds 7491d391686SOleg Nesterov /* Need to be careful about a growing mapping */ 750982134baSLinus Torvalds pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; 751982134baSLinus Torvalds pgoff += vma->vm_pgoff; 752982134baSLinus Torvalds if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) 7536cd57613SDerek return ERR_PTR(-EINVAL); 7541d391686SOleg Nesterov 7551d391686SOleg Nesterov if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) 7561d391686SOleg Nesterov return ERR_PTR(-EFAULT); 75754f5de70SAl Viro 75854f5de70SAl Viro if (vma->vm_flags & VM_LOCKED) { 75954f5de70SAl Viro unsigned long locked, lock_limit; 76054f5de70SAl Viro locked = mm->locked_vm << PAGE_SHIFT; 76159e99e5bSJiri Slaby lock_limit = rlimit(RLIMIT_MEMLOCK); 76254f5de70SAl Viro locked += new_len - old_len; 76354f5de70SAl Viro if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 7646cd57613SDerek return ERR_PTR(-EAGAIN); 76554f5de70SAl Viro } 76654f5de70SAl Viro 76784638335SKonstantin Khlebnikov if (!may_expand_vm(mm, vma->vm_flags, 76884638335SKonstantin Khlebnikov (new_len - old_len) >> PAGE_SHIFT)) 7696cd57613SDerek return ERR_PTR(-ENOMEM); 77054f5de70SAl Viro 77154f5de70SAl Viro if (vma->vm_flags & VM_ACCOUNT) { 77254f5de70SAl Viro unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; 773191c5424SAl Viro if (security_vm_enough_memory_mm(mm, charged)) 7746cd57613SDerek return ERR_PTR(-ENOMEM); 77554f5de70SAl Viro *p = charged; 77654f5de70SAl Viro } 77754f5de70SAl Viro 77854f5de70SAl Viro return vma; 77954f5de70SAl Viro } 78054f5de70SAl Viro 78181909b84SMichel Lespinasse static unsigned long mremap_to(unsigned long addr, unsigned long old_len, 78272f87654SPavel Emelyanov unsigned long new_addr, unsigned long new_len, bool *locked, 783e346b381SBrian Geffon unsigned long flags, struct vm_userfaultfd_ctx *uf, 784b2282371SMike Rapoport struct list_head *uf_unmap_early, 785897ab3e0SMike Rapoport struct list_head *uf_unmap) 786ecc1a899SAl Viro { 787ecc1a899SAl Viro struct mm_struct *mm = current->mm; 788ecc1a899SAl Viro struct vm_area_struct *vma; 789ecc1a899SAl Viro unsigned long ret = -EINVAL; 790ecc1a899SAl Viro unsigned long charged = 0; 791e346b381SBrian Geffon unsigned long map_flags = 0; 792ecc1a899SAl Viro 793f19cb115SAlexander Kuleshov if (offset_in_page(new_addr)) 794ecc1a899SAl Viro goto out; 795ecc1a899SAl Viro 796ecc1a899SAl Viro if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) 797ecc1a899SAl Viro goto out; 798ecc1a899SAl Viro 7999943242cSOleg Nesterov /* Ensure the old/new locations do not overlap */ 8009943242cSOleg Nesterov if (addr + old_len > new_addr && new_addr + new_len > addr) 801ecc1a899SAl Viro goto out; 802ecc1a899SAl Viro 803ea2c3f6fSOscar Salvador /* 804ea2c3f6fSOscar Salvador * move_vma() need us to stay 4 maps below the threshold, otherwise 805ea2c3f6fSOscar Salvador * it will bail out at the very beginning. 806ea2c3f6fSOscar Salvador * That is a problem if we have already unmaped the regions here 807ea2c3f6fSOscar Salvador * (new_addr, and old_addr), because userspace will not know the 808ea2c3f6fSOscar Salvador * state of the vma's after it gets -ENOMEM. 809ea2c3f6fSOscar Salvador * So, to avoid such scenario we can pre-compute if the whole 810ea2c3f6fSOscar Salvador * operation has high chances to success map-wise. 811ea2c3f6fSOscar Salvador * Worst-scenario case is when both vma's (new_addr and old_addr) get 812f0953a1bSIngo Molnar * split in 3 before unmapping it. 813ea2c3f6fSOscar Salvador * That means 2 more maps (1 for each) to the ones we already hold. 814ea2c3f6fSOscar Salvador * Check whether current map count plus 2 still leads us to 4 maps below 815ea2c3f6fSOscar Salvador * the threshold, otherwise return -ENOMEM here to be more safe. 816ea2c3f6fSOscar Salvador */ 817ea2c3f6fSOscar Salvador if ((mm->map_count + 2) >= sysctl_max_map_count - 3) 818ea2c3f6fSOscar Salvador return -ENOMEM; 819ea2c3f6fSOscar Salvador 820e346b381SBrian Geffon if (flags & MREMAP_FIXED) { 821b2282371SMike Rapoport ret = do_munmap(mm, new_addr, new_len, uf_unmap_early); 822ecc1a899SAl Viro if (ret) 823ecc1a899SAl Viro goto out; 824e346b381SBrian Geffon } 825ecc1a899SAl Viro 826ecc1a899SAl Viro if (old_len >= new_len) { 827897ab3e0SMike Rapoport ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap); 828ecc1a899SAl Viro if (ret && old_len != new_len) 829ecc1a899SAl Viro goto out; 830ecc1a899SAl Viro old_len = new_len; 831ecc1a899SAl Viro } 832ecc1a899SAl Viro 833e346b381SBrian Geffon vma = vma_to_resize(addr, old_len, new_len, flags, &charged); 834ecc1a899SAl Viro if (IS_ERR(vma)) { 835ecc1a899SAl Viro ret = PTR_ERR(vma); 836ecc1a899SAl Viro goto out; 837ecc1a899SAl Viro } 838ecc1a899SAl Viro 839e346b381SBrian Geffon /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */ 840e346b381SBrian Geffon if (flags & MREMAP_DONTUNMAP && 841e346b381SBrian Geffon !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) { 842e346b381SBrian Geffon ret = -ENOMEM; 843e346b381SBrian Geffon goto out; 844e346b381SBrian Geffon } 845e346b381SBrian Geffon 846e346b381SBrian Geffon if (flags & MREMAP_FIXED) 847e346b381SBrian Geffon map_flags |= MAP_FIXED; 848e346b381SBrian Geffon 849097eed10SAl Viro if (vma->vm_flags & VM_MAYSHARE) 850097eed10SAl Viro map_flags |= MAP_SHARED; 8519206de95SAl Viro 852097eed10SAl Viro ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + 853097eed10SAl Viro ((addr - vma->vm_start) >> PAGE_SHIFT), 854097eed10SAl Viro map_flags); 855ff68dac6SGaowei Pu if (IS_ERR_VALUE(ret)) 856097eed10SAl Viro goto out1; 857097eed10SAl Viro 858e346b381SBrian Geffon /* We got a new mapping */ 859e346b381SBrian Geffon if (!(flags & MREMAP_FIXED)) 860e346b381SBrian Geffon new_addr = ret; 861e346b381SBrian Geffon 862e346b381SBrian Geffon ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, 863897ab3e0SMike Rapoport uf_unmap); 864e346b381SBrian Geffon 865f19cb115SAlexander Kuleshov if (!(offset_in_page(ret))) 866097eed10SAl Viro goto out; 867e346b381SBrian Geffon 868097eed10SAl Viro out1: 869ecc1a899SAl Viro vm_unacct_memory(charged); 870ecc1a899SAl Viro 871ecc1a899SAl Viro out: 872ecc1a899SAl Viro return ret; 873ecc1a899SAl Viro } 874ecc1a899SAl Viro 8751a0ef85fSAl Viro static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) 8761a0ef85fSAl Viro { 877f106af4eSAl Viro unsigned long end = vma->vm_end + delta; 8789206de95SAl Viro if (end < vma->vm_end) /* overflow */ 8791a0ef85fSAl Viro return 0; 8809206de95SAl Viro if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ 881f106af4eSAl Viro return 0; 882f106af4eSAl Viro if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 883f106af4eSAl Viro 0, MAP_FIXED) & ~PAGE_MASK) 884f106af4eSAl Viro return 0; 8851a0ef85fSAl Viro return 1; 8861a0ef85fSAl Viro } 8871a0ef85fSAl Viro 8881da177e4SLinus Torvalds /* 8891da177e4SLinus Torvalds * Expand (or shrink) an existing mapping, potentially moving it at the 8901da177e4SLinus Torvalds * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) 8911da177e4SLinus Torvalds * 8921da177e4SLinus Torvalds * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise 8931da177e4SLinus Torvalds * This option implies MREMAP_MAYMOVE. 8941da177e4SLinus Torvalds */ 89563a81db1SAl Viro SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, 89663a81db1SAl Viro unsigned long, new_len, unsigned long, flags, 89763a81db1SAl Viro unsigned long, new_addr) 8981da177e4SLinus Torvalds { 899d0de32d9SHugh Dickins struct mm_struct *mm = current->mm; 9001da177e4SLinus Torvalds struct vm_area_struct *vma; 9011da177e4SLinus Torvalds unsigned long ret = -EINVAL; 9021da177e4SLinus Torvalds unsigned long charged = 0; 90381909b84SMichel Lespinasse bool locked = false; 90485a06835SYang Shi bool downgraded = false; 90572f87654SPavel Emelyanov struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; 906b2282371SMike Rapoport LIST_HEAD(uf_unmap_early); 907897ab3e0SMike Rapoport LIST_HEAD(uf_unmap); 9081da177e4SLinus Torvalds 909b2a84de2SWill Deacon /* 910b2a84de2SWill Deacon * There is a deliberate asymmetry here: we strip the pointer tag 911b2a84de2SWill Deacon * from the old address but leave the new address alone. This is 912b2a84de2SWill Deacon * for consistency with mmap(), where we prevent the creation of 913b2a84de2SWill Deacon * aliasing mappings in userspace by leaving the tag bits of the 914b2a84de2SWill Deacon * mapping address intact. A non-zero tag will cause the subsequent 915b2a84de2SWill Deacon * range checks to reject the address as invalid. 916b2a84de2SWill Deacon * 917b2a84de2SWill Deacon * See Documentation/arm64/tagged-address-abi.rst for more information. 918b2a84de2SWill Deacon */ 919057d3389SAndrey Konovalov addr = untagged_addr(addr); 920057d3389SAndrey Konovalov 921e346b381SBrian Geffon if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) 9229a2458a6SRasmus Villemoes return ret; 9239a2458a6SRasmus Villemoes 9249a2458a6SRasmus Villemoes if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE)) 9259a2458a6SRasmus Villemoes return ret; 9261da177e4SLinus Torvalds 927e346b381SBrian Geffon /* 928e346b381SBrian Geffon * MREMAP_DONTUNMAP is always a move and it does not allow resizing 929e346b381SBrian Geffon * in the process. 930e346b381SBrian Geffon */ 931e346b381SBrian Geffon if (flags & MREMAP_DONTUNMAP && 932e346b381SBrian Geffon (!(flags & MREMAP_MAYMOVE) || old_len != new_len)) 933e346b381SBrian Geffon return ret; 934e346b381SBrian Geffon 935e346b381SBrian Geffon 936f19cb115SAlexander Kuleshov if (offset_in_page(addr)) 9379a2458a6SRasmus Villemoes return ret; 9381da177e4SLinus Torvalds 9391da177e4SLinus Torvalds old_len = PAGE_ALIGN(old_len); 9401da177e4SLinus Torvalds new_len = PAGE_ALIGN(new_len); 9411da177e4SLinus Torvalds 9421da177e4SLinus Torvalds /* 9431da177e4SLinus Torvalds * We allow a zero old-len as a special case 9441da177e4SLinus Torvalds * for DOS-emu "duplicate shm area" thing. But 9451da177e4SLinus Torvalds * a zero new-len is nonsensical. 9461da177e4SLinus Torvalds */ 9471da177e4SLinus Torvalds if (!new_len) 9489a2458a6SRasmus Villemoes return ret; 9499a2458a6SRasmus Villemoes 950d8ed45c5SMichel Lespinasse if (mmap_write_lock_killable(current->mm)) 951dc0ef0dfSMichal Hocko return -EINTR; 9521da177e4SLinus Torvalds 953e346b381SBrian Geffon if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) { 95481909b84SMichel Lespinasse ret = mremap_to(addr, old_len, new_addr, new_len, 955e346b381SBrian Geffon &locked, flags, &uf, &uf_unmap_early, 956e346b381SBrian Geffon &uf_unmap); 9571da177e4SLinus Torvalds goto out; 9581da177e4SLinus Torvalds } 9591da177e4SLinus Torvalds 9601da177e4SLinus Torvalds /* 9611da177e4SLinus Torvalds * Always allow a shrinking remap: that just unmaps 9621da177e4SLinus Torvalds * the unnecessary pages.. 96385a06835SYang Shi * __do_munmap does all the needed commit accounting, and 964c1e8d7c6SMichel Lespinasse * downgrades mmap_lock to read if so directed. 9651da177e4SLinus Torvalds */ 9661da177e4SLinus Torvalds if (old_len >= new_len) { 96785a06835SYang Shi int retval; 96885a06835SYang Shi 96985a06835SYang Shi retval = __do_munmap(mm, addr+new_len, old_len - new_len, 97085a06835SYang Shi &uf_unmap, true); 97185a06835SYang Shi if (retval < 0 && old_len != new_len) { 97285a06835SYang Shi ret = retval; 9731da177e4SLinus Torvalds goto out; 974c1e8d7c6SMichel Lespinasse /* Returning 1 indicates mmap_lock is downgraded to read. */ 97585a06835SYang Shi } else if (retval == 1) 97685a06835SYang Shi downgraded = true; 9771da177e4SLinus Torvalds ret = addr; 9781da177e4SLinus Torvalds goto out; 9791da177e4SLinus Torvalds } 9801da177e4SLinus Torvalds 9811da177e4SLinus Torvalds /* 982ecc1a899SAl Viro * Ok, we need to grow.. 9831da177e4SLinus Torvalds */ 984e346b381SBrian Geffon vma = vma_to_resize(addr, old_len, new_len, flags, &charged); 98554f5de70SAl Viro if (IS_ERR(vma)) { 98654f5de70SAl Viro ret = PTR_ERR(vma); 9871da177e4SLinus Torvalds goto out; 9881da177e4SLinus Torvalds } 9891da177e4SLinus Torvalds 9901da177e4SLinus Torvalds /* old_len exactly to the end of the area.. 9911da177e4SLinus Torvalds */ 992ecc1a899SAl Viro if (old_len == vma->vm_end - addr) { 9931da177e4SLinus Torvalds /* can we just expand the current mapping? */ 9941a0ef85fSAl Viro if (vma_expandable(vma, new_len - old_len)) { 9951da177e4SLinus Torvalds int pages = (new_len - old_len) >> PAGE_SHIFT; 9961da177e4SLinus Torvalds 9975beb4930SRik van Riel if (vma_adjust(vma, vma->vm_start, addr + new_len, 9985beb4930SRik van Riel vma->vm_pgoff, NULL)) { 9995beb4930SRik van Riel ret = -ENOMEM; 10005beb4930SRik van Riel goto out; 10015beb4930SRik van Riel } 10021da177e4SLinus Torvalds 100384638335SKonstantin Khlebnikov vm_stat_account(mm, vma->vm_flags, pages); 10041da177e4SLinus Torvalds if (vma->vm_flags & VM_LOCKED) { 1005d0de32d9SHugh Dickins mm->locked_vm += pages; 100681909b84SMichel Lespinasse locked = true; 100781909b84SMichel Lespinasse new_addr = addr; 10081da177e4SLinus Torvalds } 10091da177e4SLinus Torvalds ret = addr; 10101da177e4SLinus Torvalds goto out; 10111da177e4SLinus Torvalds } 10121da177e4SLinus Torvalds } 10131da177e4SLinus Torvalds 10141da177e4SLinus Torvalds /* 10151da177e4SLinus Torvalds * We weren't able to just expand or shrink the area, 10161da177e4SLinus Torvalds * we need to create a new one and move it.. 10171da177e4SLinus Torvalds */ 10181da177e4SLinus Torvalds ret = -ENOMEM; 10191da177e4SLinus Torvalds if (flags & MREMAP_MAYMOVE) { 10201da177e4SLinus Torvalds unsigned long map_flags = 0; 10211da177e4SLinus Torvalds if (vma->vm_flags & VM_MAYSHARE) 10221da177e4SLinus Torvalds map_flags |= MAP_SHARED; 10231da177e4SLinus Torvalds 10241da177e4SLinus Torvalds new_addr = get_unmapped_area(vma->vm_file, 0, new_len, 102593587414SAl Viro vma->vm_pgoff + 102693587414SAl Viro ((addr - vma->vm_start) >> PAGE_SHIFT), 102793587414SAl Viro map_flags); 1028ff68dac6SGaowei Pu if (IS_ERR_VALUE(new_addr)) { 10291da177e4SLinus Torvalds ret = new_addr; 1030ed032189SEric Paris goto out; 1031ed032189SEric Paris } 1032ed032189SEric Paris 103372f87654SPavel Emelyanov ret = move_vma(vma, addr, old_len, new_len, new_addr, 1034e346b381SBrian Geffon &locked, flags, &uf, &uf_unmap); 10351da177e4SLinus Torvalds } 10361da177e4SLinus Torvalds out: 1037f19cb115SAlexander Kuleshov if (offset_in_page(ret)) { 10381da177e4SLinus Torvalds vm_unacct_memory(charged); 1039fa1f68ccSZou Wei locked = false; 1040d456fb9eSOleg Nesterov } 104185a06835SYang Shi if (downgraded) 1042d8ed45c5SMichel Lespinasse mmap_read_unlock(current->mm); 104385a06835SYang Shi else 1044d8ed45c5SMichel Lespinasse mmap_write_unlock(current->mm); 104581909b84SMichel Lespinasse if (locked && new_len > old_len) 104681909b84SMichel Lespinasse mm_populate(new_addr + old_len, new_len - old_len); 1047b2282371SMike Rapoport userfaultfd_unmap_complete(mm, &uf_unmap_early); 1048d1564926SBrian Geffon mremap_userfaultfd_complete(&uf, addr, ret, old_len); 1049897ab3e0SMike Rapoport userfaultfd_unmap_complete(mm, &uf_unmap); 10501da177e4SLinus Torvalds return ret; 10511da177e4SLinus Torvalds } 1052