11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/mremap.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * (C) Copyright 1996 Linus Torvalds 51da177e4SLinus Torvalds * 6046c6884SAlan Cox * Address space accounting code <alan@lxorguk.ukuu.org.uk> 71da177e4SLinus Torvalds * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 101da177e4SLinus Torvalds #include <linux/mm.h> 111da177e4SLinus Torvalds #include <linux/hugetlb.h> 121da177e4SLinus Torvalds #include <linux/shm.h> 131ff82995SHugh Dickins #include <linux/ksm.h> 141da177e4SLinus Torvalds #include <linux/mman.h> 151da177e4SLinus Torvalds #include <linux/swap.h> 16c59ede7bSRandy.Dunlap #include <linux/capability.h> 171da177e4SLinus Torvalds #include <linux/fs.h> 181da177e4SLinus Torvalds #include <linux/highmem.h> 191da177e4SLinus Torvalds #include <linux/security.h> 201da177e4SLinus Torvalds #include <linux/syscalls.h> 21cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h> 221da177e4SLinus Torvalds 231da177e4SLinus Torvalds #include <asm/uaccess.h> 241da177e4SLinus Torvalds #include <asm/cacheflush.h> 251da177e4SLinus Torvalds #include <asm/tlbflush.h> 261da177e4SLinus Torvalds 27ba470de4SRik van Riel #include "internal.h" 28ba470de4SRik van Riel 297be7a546SHugh Dickins static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) 301da177e4SLinus Torvalds { 311da177e4SLinus Torvalds pgd_t *pgd; 321da177e4SLinus Torvalds pud_t *pud; 331da177e4SLinus Torvalds pmd_t *pmd; 341da177e4SLinus Torvalds 351da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 361da177e4SLinus Torvalds if (pgd_none_or_clear_bad(pgd)) 371da177e4SLinus Torvalds return NULL; 381da177e4SLinus Torvalds 391da177e4SLinus Torvalds pud = pud_offset(pgd, addr); 401da177e4SLinus Torvalds if (pud_none_or_clear_bad(pud)) 411da177e4SLinus Torvalds return NULL; 421da177e4SLinus Torvalds 431da177e4SLinus Torvalds pmd = pmd_offset(pud, addr); 44bae9c19bSAndrea Arcangeli split_huge_page_pmd(mm, pmd); 451da177e4SLinus Torvalds if (pmd_none_or_clear_bad(pmd)) 461da177e4SLinus Torvalds return NULL; 471da177e4SLinus Torvalds 487be7a546SHugh Dickins return pmd; 491da177e4SLinus Torvalds } 501da177e4SLinus Torvalds 518ac1f832SAndrea Arcangeli static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, 528ac1f832SAndrea Arcangeli unsigned long addr) 531da177e4SLinus Torvalds { 541da177e4SLinus Torvalds pgd_t *pgd; 551da177e4SLinus Torvalds pud_t *pud; 56c74df32cSHugh Dickins pmd_t *pmd; 571da177e4SLinus Torvalds 581da177e4SLinus Torvalds pgd = pgd_offset(mm, addr); 591da177e4SLinus Torvalds pud = pud_alloc(mm, pgd, addr); 601da177e4SLinus Torvalds if (!pud) 61c74df32cSHugh Dickins return NULL; 627be7a546SHugh Dickins 631da177e4SLinus Torvalds pmd = pmd_alloc(mm, pud, addr); 647be7a546SHugh Dickins if (!pmd) 65c74df32cSHugh Dickins return NULL; 667be7a546SHugh Dickins 678ac1f832SAndrea Arcangeli VM_BUG_ON(pmd_trans_huge(*pmd)); 688ac1f832SAndrea Arcangeli if (pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, addr)) 69c74df32cSHugh Dickins return NULL; 70c74df32cSHugh Dickins 717be7a546SHugh Dickins return pmd; 721da177e4SLinus Torvalds } 731da177e4SLinus Torvalds 747be7a546SHugh Dickins static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, 757be7a546SHugh Dickins unsigned long old_addr, unsigned long old_end, 767be7a546SHugh Dickins struct vm_area_struct *new_vma, pmd_t *new_pmd, 777be7a546SHugh Dickins unsigned long new_addr) 781da177e4SLinus Torvalds { 791da177e4SLinus Torvalds struct address_space *mapping = NULL; 801da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 817be7a546SHugh Dickins pte_t *old_pte, *new_pte, pte; 824c21e2f2SHugh Dickins spinlock_t *old_ptl, *new_ptl; 831da177e4SLinus Torvalds 841da177e4SLinus Torvalds if (vma->vm_file) { 851da177e4SLinus Torvalds /* 861da177e4SLinus Torvalds * Subtle point from Rajesh Venkatasubramanian: before 8725d9e2d1Snpiggin@suse.de * moving file-based ptes, we must lock truncate_pagecache 8825d9e2d1Snpiggin@suse.de * out, since it might clean the dst vma before the src vma, 891da177e4SLinus Torvalds * and we propagate stale pages into the dst afterward. 901da177e4SLinus Torvalds */ 911da177e4SLinus Torvalds mapping = vma->vm_file->f_mapping; 923d48ae45SPeter Zijlstra mutex_lock(&mapping->i_mmap_mutex); 931da177e4SLinus Torvalds } 941da177e4SLinus Torvalds 954c21e2f2SHugh Dickins /* 964c21e2f2SHugh Dickins * We don't have to worry about the ordering of src and dst 974c21e2f2SHugh Dickins * pte locks because exclusive mmap_sem prevents deadlock. 984c21e2f2SHugh Dickins */ 99c74df32cSHugh Dickins old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); 100ece0e2b6SPeter Zijlstra new_pte = pte_offset_map(new_pmd, new_addr); 1014c21e2f2SHugh Dickins new_ptl = pte_lockptr(mm, new_pmd); 1024c21e2f2SHugh Dickins if (new_ptl != old_ptl) 103f20dc5f7SIngo Molnar spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 1046606c3e0SZachary Amsden arch_enter_lazy_mmu_mode(); 1058b1f3124SNick Piggin 1067be7a546SHugh Dickins for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, 1077be7a546SHugh Dickins new_pte++, new_addr += PAGE_SIZE) { 1087be7a546SHugh Dickins if (pte_none(*old_pte)) 1097be7a546SHugh Dickins continue; 110*7b6efc2bSAndrea Arcangeli pte = ptep_get_and_clear(mm, old_addr, old_pte); 1117be7a546SHugh Dickins pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 1127be7a546SHugh Dickins set_pte_at(mm, new_addr, new_pte, pte); 1131da177e4SLinus Torvalds } 1147be7a546SHugh Dickins 1156606c3e0SZachary Amsden arch_leave_lazy_mmu_mode(); 1164c21e2f2SHugh Dickins if (new_ptl != old_ptl) 1174c21e2f2SHugh Dickins spin_unlock(new_ptl); 118ece0e2b6SPeter Zijlstra pte_unmap(new_pte - 1); 119c74df32cSHugh Dickins pte_unmap_unlock(old_pte - 1, old_ptl); 1201da177e4SLinus Torvalds if (mapping) 1213d48ae45SPeter Zijlstra mutex_unlock(&mapping->i_mmap_mutex); 1221da177e4SLinus Torvalds } 1231da177e4SLinus Torvalds 1247be7a546SHugh Dickins #define LATENCY_LIMIT (64 * PAGE_SIZE) 1257be7a546SHugh Dickins 126b6a2fea3SOllie Wild unsigned long move_page_tables(struct vm_area_struct *vma, 1271da177e4SLinus Torvalds unsigned long old_addr, struct vm_area_struct *new_vma, 1281da177e4SLinus Torvalds unsigned long new_addr, unsigned long len) 1291da177e4SLinus Torvalds { 1307be7a546SHugh Dickins unsigned long extent, next, old_end; 1317be7a546SHugh Dickins pmd_t *old_pmd, *new_pmd; 132*7b6efc2bSAndrea Arcangeli bool need_flush = false; 1331da177e4SLinus Torvalds 1347be7a546SHugh Dickins old_end = old_addr + len; 1357be7a546SHugh Dickins flush_cache_range(vma, old_addr, old_end); 1361da177e4SLinus Torvalds 137*7b6efc2bSAndrea Arcangeli mmu_notifier_invalidate_range_start(vma->vm_mm, old_addr, old_end); 138*7b6efc2bSAndrea Arcangeli 1397be7a546SHugh Dickins for (; old_addr < old_end; old_addr += extent, new_addr += extent) { 1401da177e4SLinus Torvalds cond_resched(); 1417be7a546SHugh Dickins next = (old_addr + PMD_SIZE) & PMD_MASK; 142ebed4846SAndrea Arcangeli /* even if next overflowed, extent below will be ok */ 1437be7a546SHugh Dickins extent = next - old_addr; 144ebed4846SAndrea Arcangeli if (extent > old_end - old_addr) 145ebed4846SAndrea Arcangeli extent = old_end - old_addr; 1467be7a546SHugh Dickins old_pmd = get_old_pmd(vma->vm_mm, old_addr); 1477be7a546SHugh Dickins if (!old_pmd) 1487be7a546SHugh Dickins continue; 1498ac1f832SAndrea Arcangeli new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); 1507be7a546SHugh Dickins if (!new_pmd) 1517be7a546SHugh Dickins break; 1527be7a546SHugh Dickins next = (new_addr + PMD_SIZE) & PMD_MASK; 1537be7a546SHugh Dickins if (extent > next - new_addr) 1547be7a546SHugh Dickins extent = next - new_addr; 1557be7a546SHugh Dickins if (extent > LATENCY_LIMIT) 1567be7a546SHugh Dickins extent = LATENCY_LIMIT; 1577be7a546SHugh Dickins move_ptes(vma, old_pmd, old_addr, old_addr + extent, 1587be7a546SHugh Dickins new_vma, new_pmd, new_addr); 159*7b6efc2bSAndrea Arcangeli need_flush = true; 1601da177e4SLinus Torvalds } 161*7b6efc2bSAndrea Arcangeli if (likely(need_flush)) 162*7b6efc2bSAndrea Arcangeli flush_tlb_range(vma, old_end-len, old_addr); 163*7b6efc2bSAndrea Arcangeli 164*7b6efc2bSAndrea Arcangeli mmu_notifier_invalidate_range_end(vma->vm_mm, old_end-len, old_end); 1657be7a546SHugh Dickins 1667be7a546SHugh Dickins return len + old_addr - old_end; /* how much done */ 1671da177e4SLinus Torvalds } 1681da177e4SLinus Torvalds 1691da177e4SLinus Torvalds static unsigned long move_vma(struct vm_area_struct *vma, 1701da177e4SLinus Torvalds unsigned long old_addr, unsigned long old_len, 1711da177e4SLinus Torvalds unsigned long new_len, unsigned long new_addr) 1721da177e4SLinus Torvalds { 1731da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 1741da177e4SLinus Torvalds struct vm_area_struct *new_vma; 1751da177e4SLinus Torvalds unsigned long vm_flags = vma->vm_flags; 1761da177e4SLinus Torvalds unsigned long new_pgoff; 1771da177e4SLinus Torvalds unsigned long moved_len; 1781da177e4SLinus Torvalds unsigned long excess = 0; 179365e9c87SHugh Dickins unsigned long hiwater_vm; 1801da177e4SLinus Torvalds int split = 0; 1817103ad32SHugh Dickins int err; 1821da177e4SLinus Torvalds 1831da177e4SLinus Torvalds /* 1841da177e4SLinus Torvalds * We'd prefer to avoid failure later on in do_munmap: 1851da177e4SLinus Torvalds * which may split one vma into three before unmapping. 1861da177e4SLinus Torvalds */ 1871da177e4SLinus Torvalds if (mm->map_count >= sysctl_max_map_count - 3) 1881da177e4SLinus Torvalds return -ENOMEM; 1891da177e4SLinus Torvalds 1901ff82995SHugh Dickins /* 1911ff82995SHugh Dickins * Advise KSM to break any KSM pages in the area to be moved: 1921ff82995SHugh Dickins * it would be confusing if they were to turn up at the new 1931ff82995SHugh Dickins * location, where they happen to coincide with different KSM 1941ff82995SHugh Dickins * pages recently unmapped. But leave vma->vm_flags as it was, 1951ff82995SHugh Dickins * so KSM can come around to merge on vma and new_vma afterwards. 1961ff82995SHugh Dickins */ 1977103ad32SHugh Dickins err = ksm_madvise(vma, old_addr, old_addr + old_len, 1987103ad32SHugh Dickins MADV_UNMERGEABLE, &vm_flags); 1997103ad32SHugh Dickins if (err) 2007103ad32SHugh Dickins return err; 2011ff82995SHugh Dickins 2021da177e4SLinus Torvalds new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); 2031da177e4SLinus Torvalds new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff); 2041da177e4SLinus Torvalds if (!new_vma) 2051da177e4SLinus Torvalds return -ENOMEM; 2061da177e4SLinus Torvalds 2071da177e4SLinus Torvalds moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len); 2081da177e4SLinus Torvalds if (moved_len < old_len) { 2091da177e4SLinus Torvalds /* 2101da177e4SLinus Torvalds * On error, move entries back from new area to old, 2111da177e4SLinus Torvalds * which will succeed since page tables still there, 2121da177e4SLinus Torvalds * and then proceed to unmap new area instead of old. 2131da177e4SLinus Torvalds */ 2141da177e4SLinus Torvalds move_page_tables(new_vma, new_addr, vma, old_addr, moved_len); 2151da177e4SLinus Torvalds vma = new_vma; 2161da177e4SLinus Torvalds old_len = new_len; 2171da177e4SLinus Torvalds old_addr = new_addr; 2181da177e4SLinus Torvalds new_addr = -ENOMEM; 2191da177e4SLinus Torvalds } 2201da177e4SLinus Torvalds 2211da177e4SLinus Torvalds /* Conceal VM_ACCOUNT so old reservation is not undone */ 2221da177e4SLinus Torvalds if (vm_flags & VM_ACCOUNT) { 2231da177e4SLinus Torvalds vma->vm_flags &= ~VM_ACCOUNT; 2241da177e4SLinus Torvalds excess = vma->vm_end - vma->vm_start - old_len; 2251da177e4SLinus Torvalds if (old_addr > vma->vm_start && 2261da177e4SLinus Torvalds old_addr + old_len < vma->vm_end) 2271da177e4SLinus Torvalds split = 1; 2281da177e4SLinus Torvalds } 2291da177e4SLinus Torvalds 23071799062SKirill Korotaev /* 231365e9c87SHugh Dickins * If we failed to move page tables we still do total_vm increment 232365e9c87SHugh Dickins * since do_munmap() will decrement it by old_len == new_len. 233365e9c87SHugh Dickins * 234365e9c87SHugh Dickins * Since total_vm is about to be raised artificially high for a 235365e9c87SHugh Dickins * moment, we need to restore high watermark afterwards: if stats 236365e9c87SHugh Dickins * are taken meanwhile, total_vm and hiwater_vm appear too high. 237365e9c87SHugh Dickins * If this were a serious issue, we'd add a flag to do_munmap(). 23871799062SKirill Korotaev */ 239365e9c87SHugh Dickins hiwater_vm = mm->hiwater_vm; 24071799062SKirill Korotaev mm->total_vm += new_len >> PAGE_SHIFT; 241ab50b8edSHugh Dickins vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); 24271799062SKirill Korotaev 2431da177e4SLinus Torvalds if (do_munmap(mm, old_addr, old_len) < 0) { 2441da177e4SLinus Torvalds /* OOM: unable to split vma, just get accounts right */ 2451da177e4SLinus Torvalds vm_unacct_memory(excess >> PAGE_SHIFT); 2461da177e4SLinus Torvalds excess = 0; 2471da177e4SLinus Torvalds } 248365e9c87SHugh Dickins mm->hiwater_vm = hiwater_vm; 2491da177e4SLinus Torvalds 2501da177e4SLinus Torvalds /* Restore VM_ACCOUNT if one or two pieces of vma left */ 2511da177e4SLinus Torvalds if (excess) { 2521da177e4SLinus Torvalds vma->vm_flags |= VM_ACCOUNT; 2531da177e4SLinus Torvalds if (split) 2541da177e4SLinus Torvalds vma->vm_next->vm_flags |= VM_ACCOUNT; 2551da177e4SLinus Torvalds } 2561da177e4SLinus Torvalds 2571da177e4SLinus Torvalds if (vm_flags & VM_LOCKED) { 2581da177e4SLinus Torvalds mm->locked_vm += new_len >> PAGE_SHIFT; 2591da177e4SLinus Torvalds if (new_len > old_len) 260ba470de4SRik van Riel mlock_vma_pages_range(new_vma, new_addr + old_len, 2611da177e4SLinus Torvalds new_addr + new_len); 2621da177e4SLinus Torvalds } 2631da177e4SLinus Torvalds 2641da177e4SLinus Torvalds return new_addr; 2651da177e4SLinus Torvalds } 2661da177e4SLinus Torvalds 26754f5de70SAl Viro static struct vm_area_struct *vma_to_resize(unsigned long addr, 26854f5de70SAl Viro unsigned long old_len, unsigned long new_len, unsigned long *p) 26954f5de70SAl Viro { 27054f5de70SAl Viro struct mm_struct *mm = current->mm; 27154f5de70SAl Viro struct vm_area_struct *vma = find_vma(mm, addr); 27254f5de70SAl Viro 27354f5de70SAl Viro if (!vma || vma->vm_start > addr) 27454f5de70SAl Viro goto Efault; 27554f5de70SAl Viro 27654f5de70SAl Viro if (is_vm_hugetlb_page(vma)) 27754f5de70SAl Viro goto Einval; 27854f5de70SAl Viro 27954f5de70SAl Viro /* We can't remap across vm area boundaries */ 28054f5de70SAl Viro if (old_len > vma->vm_end - addr) 28154f5de70SAl Viro goto Efault; 28254f5de70SAl Viro 283982134baSLinus Torvalds /* Need to be careful about a growing mapping */ 284982134baSLinus Torvalds if (new_len > old_len) { 285982134baSLinus Torvalds unsigned long pgoff; 286982134baSLinus Torvalds 287982134baSLinus Torvalds if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) 28854f5de70SAl Viro goto Efault; 289982134baSLinus Torvalds pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; 290982134baSLinus Torvalds pgoff += vma->vm_pgoff; 291982134baSLinus Torvalds if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) 292982134baSLinus Torvalds goto Einval; 29354f5de70SAl Viro } 29454f5de70SAl Viro 29554f5de70SAl Viro if (vma->vm_flags & VM_LOCKED) { 29654f5de70SAl Viro unsigned long locked, lock_limit; 29754f5de70SAl Viro locked = mm->locked_vm << PAGE_SHIFT; 29859e99e5bSJiri Slaby lock_limit = rlimit(RLIMIT_MEMLOCK); 29954f5de70SAl Viro locked += new_len - old_len; 30054f5de70SAl Viro if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 30154f5de70SAl Viro goto Eagain; 30254f5de70SAl Viro } 30354f5de70SAl Viro 30454f5de70SAl Viro if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) 30554f5de70SAl Viro goto Enomem; 30654f5de70SAl Viro 30754f5de70SAl Viro if (vma->vm_flags & VM_ACCOUNT) { 30854f5de70SAl Viro unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; 30954f5de70SAl Viro if (security_vm_enough_memory(charged)) 31054f5de70SAl Viro goto Efault; 31154f5de70SAl Viro *p = charged; 31254f5de70SAl Viro } 31354f5de70SAl Viro 31454f5de70SAl Viro return vma; 31554f5de70SAl Viro 31654f5de70SAl Viro Efault: /* very odd choice for most of the cases, but... */ 31754f5de70SAl Viro return ERR_PTR(-EFAULT); 31854f5de70SAl Viro Einval: 31954f5de70SAl Viro return ERR_PTR(-EINVAL); 32054f5de70SAl Viro Enomem: 32154f5de70SAl Viro return ERR_PTR(-ENOMEM); 32254f5de70SAl Viro Eagain: 32354f5de70SAl Viro return ERR_PTR(-EAGAIN); 32454f5de70SAl Viro } 32554f5de70SAl Viro 326ecc1a899SAl Viro static unsigned long mremap_to(unsigned long addr, 327ecc1a899SAl Viro unsigned long old_len, unsigned long new_addr, 328ecc1a899SAl Viro unsigned long new_len) 329ecc1a899SAl Viro { 330ecc1a899SAl Viro struct mm_struct *mm = current->mm; 331ecc1a899SAl Viro struct vm_area_struct *vma; 332ecc1a899SAl Viro unsigned long ret = -EINVAL; 333ecc1a899SAl Viro unsigned long charged = 0; 334097eed10SAl Viro unsigned long map_flags; 335ecc1a899SAl Viro 336ecc1a899SAl Viro if (new_addr & ~PAGE_MASK) 337ecc1a899SAl Viro goto out; 338ecc1a899SAl Viro 339ecc1a899SAl Viro if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) 340ecc1a899SAl Viro goto out; 341ecc1a899SAl Viro 342ecc1a899SAl Viro /* Check if the location we're moving into overlaps the 343ecc1a899SAl Viro * old location at all, and fail if it does. 344ecc1a899SAl Viro */ 345ecc1a899SAl Viro if ((new_addr <= addr) && (new_addr+new_len) > addr) 346ecc1a899SAl Viro goto out; 347ecc1a899SAl Viro 348ecc1a899SAl Viro if ((addr <= new_addr) && (addr+old_len) > new_addr) 349ecc1a899SAl Viro goto out; 350ecc1a899SAl Viro 351ecc1a899SAl Viro ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); 352ecc1a899SAl Viro if (ret) 353ecc1a899SAl Viro goto out; 354ecc1a899SAl Viro 355ecc1a899SAl Viro ret = do_munmap(mm, new_addr, new_len); 356ecc1a899SAl Viro if (ret) 357ecc1a899SAl Viro goto out; 358ecc1a899SAl Viro 359ecc1a899SAl Viro if (old_len >= new_len) { 360ecc1a899SAl Viro ret = do_munmap(mm, addr+new_len, old_len - new_len); 361ecc1a899SAl Viro if (ret && old_len != new_len) 362ecc1a899SAl Viro goto out; 363ecc1a899SAl Viro old_len = new_len; 364ecc1a899SAl Viro } 365ecc1a899SAl Viro 366ecc1a899SAl Viro vma = vma_to_resize(addr, old_len, new_len, &charged); 367ecc1a899SAl Viro if (IS_ERR(vma)) { 368ecc1a899SAl Viro ret = PTR_ERR(vma); 369ecc1a899SAl Viro goto out; 370ecc1a899SAl Viro } 371ecc1a899SAl Viro 372097eed10SAl Viro map_flags = MAP_FIXED; 373097eed10SAl Viro if (vma->vm_flags & VM_MAYSHARE) 374097eed10SAl Viro map_flags |= MAP_SHARED; 3759206de95SAl Viro 376097eed10SAl Viro ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + 377097eed10SAl Viro ((addr - vma->vm_start) >> PAGE_SHIFT), 378097eed10SAl Viro map_flags); 379ecc1a899SAl Viro if (ret & ~PAGE_MASK) 380097eed10SAl Viro goto out1; 381097eed10SAl Viro 382097eed10SAl Viro ret = move_vma(vma, addr, old_len, new_len, new_addr); 383097eed10SAl Viro if (!(ret & ~PAGE_MASK)) 384097eed10SAl Viro goto out; 385097eed10SAl Viro out1: 386ecc1a899SAl Viro vm_unacct_memory(charged); 387ecc1a899SAl Viro 388ecc1a899SAl Viro out: 389ecc1a899SAl Viro return ret; 390ecc1a899SAl Viro } 391ecc1a899SAl Viro 3921a0ef85fSAl Viro static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) 3931a0ef85fSAl Viro { 394f106af4eSAl Viro unsigned long end = vma->vm_end + delta; 3959206de95SAl Viro if (end < vma->vm_end) /* overflow */ 3961a0ef85fSAl Viro return 0; 3979206de95SAl Viro if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ 398f106af4eSAl Viro return 0; 399f106af4eSAl Viro if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 400f106af4eSAl Viro 0, MAP_FIXED) & ~PAGE_MASK) 401f106af4eSAl Viro return 0; 4021a0ef85fSAl Viro return 1; 4031a0ef85fSAl Viro } 4041a0ef85fSAl Viro 4051da177e4SLinus Torvalds /* 4061da177e4SLinus Torvalds * Expand (or shrink) an existing mapping, potentially moving it at the 4071da177e4SLinus Torvalds * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) 4081da177e4SLinus Torvalds * 4091da177e4SLinus Torvalds * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise 4101da177e4SLinus Torvalds * This option implies MREMAP_MAYMOVE. 4111da177e4SLinus Torvalds */ 4121da177e4SLinus Torvalds unsigned long do_mremap(unsigned long addr, 4131da177e4SLinus Torvalds unsigned long old_len, unsigned long new_len, 4141da177e4SLinus Torvalds unsigned long flags, unsigned long new_addr) 4151da177e4SLinus Torvalds { 416d0de32d9SHugh Dickins struct mm_struct *mm = current->mm; 4171da177e4SLinus Torvalds struct vm_area_struct *vma; 4181da177e4SLinus Torvalds unsigned long ret = -EINVAL; 4191da177e4SLinus Torvalds unsigned long charged = 0; 4201da177e4SLinus Torvalds 4211da177e4SLinus Torvalds if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) 4221da177e4SLinus Torvalds goto out; 4231da177e4SLinus Torvalds 4241da177e4SLinus Torvalds if (addr & ~PAGE_MASK) 4251da177e4SLinus Torvalds goto out; 4261da177e4SLinus Torvalds 4271da177e4SLinus Torvalds old_len = PAGE_ALIGN(old_len); 4281da177e4SLinus Torvalds new_len = PAGE_ALIGN(new_len); 4291da177e4SLinus Torvalds 4301da177e4SLinus Torvalds /* 4311da177e4SLinus Torvalds * We allow a zero old-len as a special case 4321da177e4SLinus Torvalds * for DOS-emu "duplicate shm area" thing. But 4331da177e4SLinus Torvalds * a zero new-len is nonsensical. 4341da177e4SLinus Torvalds */ 4351da177e4SLinus Torvalds if (!new_len) 4361da177e4SLinus Torvalds goto out; 4371da177e4SLinus Torvalds 4381da177e4SLinus Torvalds if (flags & MREMAP_FIXED) { 439ecc1a899SAl Viro if (flags & MREMAP_MAYMOVE) 440ecc1a899SAl Viro ret = mremap_to(addr, old_len, new_addr, new_len); 4411da177e4SLinus Torvalds goto out; 4421da177e4SLinus Torvalds } 4431da177e4SLinus Torvalds 4441da177e4SLinus Torvalds /* 4451da177e4SLinus Torvalds * Always allow a shrinking remap: that just unmaps 4461da177e4SLinus Torvalds * the unnecessary pages.. 4471da177e4SLinus Torvalds * do_munmap does all the needed commit accounting 4481da177e4SLinus Torvalds */ 4491da177e4SLinus Torvalds if (old_len >= new_len) { 450d0de32d9SHugh Dickins ret = do_munmap(mm, addr+new_len, old_len - new_len); 4511da177e4SLinus Torvalds if (ret && old_len != new_len) 4521da177e4SLinus Torvalds goto out; 4531da177e4SLinus Torvalds ret = addr; 4541da177e4SLinus Torvalds goto out; 4551da177e4SLinus Torvalds } 4561da177e4SLinus Torvalds 4571da177e4SLinus Torvalds /* 458ecc1a899SAl Viro * Ok, we need to grow.. 4591da177e4SLinus Torvalds */ 46054f5de70SAl Viro vma = vma_to_resize(addr, old_len, new_len, &charged); 46154f5de70SAl Viro if (IS_ERR(vma)) { 46254f5de70SAl Viro ret = PTR_ERR(vma); 4631da177e4SLinus Torvalds goto out; 4641da177e4SLinus Torvalds } 4651da177e4SLinus Torvalds 4661da177e4SLinus Torvalds /* old_len exactly to the end of the area.. 4671da177e4SLinus Torvalds */ 468ecc1a899SAl Viro if (old_len == vma->vm_end - addr) { 4691da177e4SLinus Torvalds /* can we just expand the current mapping? */ 4701a0ef85fSAl Viro if (vma_expandable(vma, new_len - old_len)) { 4711da177e4SLinus Torvalds int pages = (new_len - old_len) >> PAGE_SHIFT; 4721da177e4SLinus Torvalds 4735beb4930SRik van Riel if (vma_adjust(vma, vma->vm_start, addr + new_len, 4745beb4930SRik van Riel vma->vm_pgoff, NULL)) { 4755beb4930SRik van Riel ret = -ENOMEM; 4765beb4930SRik van Riel goto out; 4775beb4930SRik van Riel } 4781da177e4SLinus Torvalds 479d0de32d9SHugh Dickins mm->total_vm += pages; 480d0de32d9SHugh Dickins vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); 4811da177e4SLinus Torvalds if (vma->vm_flags & VM_LOCKED) { 482d0de32d9SHugh Dickins mm->locked_vm += pages; 483ba470de4SRik van Riel mlock_vma_pages_range(vma, addr + old_len, 4841da177e4SLinus Torvalds addr + new_len); 4851da177e4SLinus Torvalds } 4861da177e4SLinus Torvalds ret = addr; 4871da177e4SLinus Torvalds goto out; 4881da177e4SLinus Torvalds } 4891da177e4SLinus Torvalds } 4901da177e4SLinus Torvalds 4911da177e4SLinus Torvalds /* 4921da177e4SLinus Torvalds * We weren't able to just expand or shrink the area, 4931da177e4SLinus Torvalds * we need to create a new one and move it.. 4941da177e4SLinus Torvalds */ 4951da177e4SLinus Torvalds ret = -ENOMEM; 4961da177e4SLinus Torvalds if (flags & MREMAP_MAYMOVE) { 4971da177e4SLinus Torvalds unsigned long map_flags = 0; 4981da177e4SLinus Torvalds if (vma->vm_flags & VM_MAYSHARE) 4991da177e4SLinus Torvalds map_flags |= MAP_SHARED; 5001da177e4SLinus Torvalds 5011da177e4SLinus Torvalds new_addr = get_unmapped_area(vma->vm_file, 0, new_len, 50293587414SAl Viro vma->vm_pgoff + 50393587414SAl Viro ((addr - vma->vm_start) >> PAGE_SHIFT), 50493587414SAl Viro map_flags); 505ed032189SEric Paris if (new_addr & ~PAGE_MASK) { 5061da177e4SLinus Torvalds ret = new_addr; 507ed032189SEric Paris goto out; 508ed032189SEric Paris } 509ed032189SEric Paris 510c80544dcSStephen Hemminger ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); 511ed032189SEric Paris if (ret) 5121da177e4SLinus Torvalds goto out; 5131da177e4SLinus Torvalds ret = move_vma(vma, addr, old_len, new_len, new_addr); 5141da177e4SLinus Torvalds } 5151da177e4SLinus Torvalds out: 5161da177e4SLinus Torvalds if (ret & ~PAGE_MASK) 5171da177e4SLinus Torvalds vm_unacct_memory(charged); 5181da177e4SLinus Torvalds return ret; 5191da177e4SLinus Torvalds } 5201da177e4SLinus Torvalds 5216a6160a7SHeiko Carstens SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, 5226a6160a7SHeiko Carstens unsigned long, new_len, unsigned long, flags, 5236a6160a7SHeiko Carstens unsigned long, new_addr) 5241da177e4SLinus Torvalds { 5251da177e4SLinus Torvalds unsigned long ret; 5261da177e4SLinus Torvalds 5271da177e4SLinus Torvalds down_write(¤t->mm->mmap_sem); 5281da177e4SLinus Torvalds ret = do_mremap(addr, old_len, new_len, flags, new_addr); 5291da177e4SLinus Torvalds up_write(¤t->mm->mmap_sem); 5301da177e4SLinus Torvalds return ret; 5311da177e4SLinus Torvalds } 532