1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * mm/mremap.c 4 * 5 * (C) Copyright 1996 Linus Torvalds 6 * 7 * Address space accounting code <alan@lxorguk.ukuu.org.uk> 8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/hugetlb.h> 13 #include <linux/shm.h> 14 #include <linux/ksm.h> 15 #include <linux/mman.h> 16 #include <linux/swap.h> 17 #include <linux/capability.h> 18 #include <linux/fs.h> 19 #include <linux/swapops.h> 20 #include <linux/highmem.h> 21 #include <linux/security.h> 22 #include <linux/syscalls.h> 23 #include <linux/mmu_notifier.h> 24 #include <linux/uaccess.h> 25 #include <linux/mm-arch-hooks.h> 26 #include <linux/userfaultfd_k.h> 27 28 #include <asm/cacheflush.h> 29 #include <asm/tlbflush.h> 30 31 #include "internal.h" 32 33 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) 34 { 35 pgd_t *pgd; 36 p4d_t *p4d; 37 pud_t *pud; 38 pmd_t *pmd; 39 40 pgd = pgd_offset(mm, addr); 41 if (pgd_none_or_clear_bad(pgd)) 42 return NULL; 43 44 p4d = p4d_offset(pgd, addr); 45 if (p4d_none_or_clear_bad(p4d)) 46 return NULL; 47 48 pud = pud_offset(p4d, addr); 49 if (pud_none_or_clear_bad(pud)) 50 return NULL; 51 52 pmd = pmd_offset(pud, addr); 53 if (pmd_none(*pmd)) 54 return NULL; 55 56 return pmd; 57 } 58 59 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, 60 unsigned long addr) 61 { 62 pgd_t *pgd; 63 p4d_t *p4d; 64 pud_t *pud; 65 pmd_t *pmd; 66 67 pgd = pgd_offset(mm, addr); 68 p4d = p4d_alloc(mm, pgd, addr); 69 if (!p4d) 70 return NULL; 71 pud = pud_alloc(mm, p4d, addr); 72 if (!pud) 73 return NULL; 74 75 pmd = pmd_alloc(mm, pud, addr); 76 if (!pmd) 77 return NULL; 78 79 VM_BUG_ON(pmd_trans_huge(*pmd)); 80 81 return pmd; 82 } 83 84 static void take_rmap_locks(struct vm_area_struct *vma) 85 { 86 if (vma->vm_file) 87 i_mmap_lock_write(vma->vm_file->f_mapping); 88 if (vma->anon_vma) 89 anon_vma_lock_write(vma->anon_vma); 90 } 91 92 static void drop_rmap_locks(struct vm_area_struct *vma) 93 { 94 if (vma->anon_vma) 95 anon_vma_unlock_write(vma->anon_vma); 96 if (vma->vm_file) 97 i_mmap_unlock_write(vma->vm_file->f_mapping); 98 } 99 100 static pte_t move_soft_dirty_pte(pte_t pte) 101 { 102 /* 103 * Set soft dirty bit so we can notice 104 * in userspace the ptes were moved. 105 */ 106 #ifdef CONFIG_MEM_SOFT_DIRTY 107 if (pte_present(pte)) 108 pte = pte_mksoft_dirty(pte); 109 else if (is_swap_pte(pte)) 110 pte = pte_swp_mksoft_dirty(pte); 111 #endif 112 return pte; 113 } 114 115 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, 116 unsigned long old_addr, unsigned long old_end, 117 struct vm_area_struct *new_vma, pmd_t *new_pmd, 118 unsigned long new_addr, bool need_rmap_locks) 119 { 120 struct mm_struct *mm = vma->vm_mm; 121 pte_t *old_pte, *new_pte, pte; 122 spinlock_t *old_ptl, *new_ptl; 123 bool force_flush = false; 124 unsigned long len = old_end - old_addr; 125 126 /* 127 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma 128 * locks to ensure that rmap will always observe either the old or the 129 * new ptes. This is the easiest way to avoid races with 130 * truncate_pagecache(), page migration, etc... 131 * 132 * When need_rmap_locks is false, we use other ways to avoid 133 * such races: 134 * 135 * - During exec() shift_arg_pages(), we use a specially tagged vma 136 * which rmap call sites look for using vma_is_temporary_stack(). 137 * 138 * - During mremap(), new_vma is often known to be placed after vma 139 * in rmap traversal order. This ensures rmap will always observe 140 * either the old pte, or the new pte, or both (the page table locks 141 * serialize access to individual ptes, but only rmap traversal 142 * order guarantees that we won't miss both the old and new ptes). 143 */ 144 if (need_rmap_locks) 145 take_rmap_locks(vma); 146 147 /* 148 * We don't have to worry about the ordering of src and dst 149 * pte locks because exclusive mmap_sem prevents deadlock. 150 */ 151 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); 152 new_pte = pte_offset_map(new_pmd, new_addr); 153 new_ptl = pte_lockptr(mm, new_pmd); 154 if (new_ptl != old_ptl) 155 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 156 flush_tlb_batched_pending(vma->vm_mm); 157 arch_enter_lazy_mmu_mode(); 158 159 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, 160 new_pte++, new_addr += PAGE_SIZE) { 161 if (pte_none(*old_pte)) 162 continue; 163 164 pte = ptep_get_and_clear(mm, old_addr, old_pte); 165 /* 166 * If we are remapping a valid PTE, make sure 167 * to flush TLB before we drop the PTL for the 168 * PTE. 169 * 170 * NOTE! Both old and new PTL matter: the old one 171 * for racing with page_mkclean(), the new one to 172 * make sure the physical page stays valid until 173 * the TLB entry for the old mapping has been 174 * flushed. 175 */ 176 if (pte_present(pte)) 177 force_flush = true; 178 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 179 pte = move_soft_dirty_pte(pte); 180 set_pte_at(mm, new_addr, new_pte, pte); 181 } 182 183 arch_leave_lazy_mmu_mode(); 184 if (force_flush) 185 flush_tlb_range(vma, old_end - len, old_end); 186 if (new_ptl != old_ptl) 187 spin_unlock(new_ptl); 188 pte_unmap(new_pte - 1); 189 pte_unmap_unlock(old_pte - 1, old_ptl); 190 if (need_rmap_locks) 191 drop_rmap_locks(vma); 192 } 193 194 #ifdef CONFIG_HAVE_MOVE_PMD 195 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, 196 unsigned long new_addr, unsigned long old_end, 197 pmd_t *old_pmd, pmd_t *new_pmd) 198 { 199 spinlock_t *old_ptl, *new_ptl; 200 struct mm_struct *mm = vma->vm_mm; 201 pmd_t pmd; 202 203 if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK) 204 || old_end - old_addr < PMD_SIZE) 205 return false; 206 207 /* 208 * The destination pmd shouldn't be established, free_pgtables() 209 * should have release it. 210 */ 211 if (WARN_ON(!pmd_none(*new_pmd))) 212 return false; 213 214 /* 215 * We don't have to worry about the ordering of src and dst 216 * ptlocks because exclusive mmap_sem prevents deadlock. 217 */ 218 old_ptl = pmd_lock(vma->vm_mm, old_pmd); 219 new_ptl = pmd_lockptr(mm, new_pmd); 220 if (new_ptl != old_ptl) 221 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 222 223 /* Clear the pmd */ 224 pmd = *old_pmd; 225 pmd_clear(old_pmd); 226 227 VM_BUG_ON(!pmd_none(*new_pmd)); 228 229 /* Set the new pmd */ 230 set_pmd_at(mm, new_addr, new_pmd, pmd); 231 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); 232 if (new_ptl != old_ptl) 233 spin_unlock(new_ptl); 234 spin_unlock(old_ptl); 235 236 return true; 237 } 238 #endif 239 240 unsigned long move_page_tables(struct vm_area_struct *vma, 241 unsigned long old_addr, struct vm_area_struct *new_vma, 242 unsigned long new_addr, unsigned long len, 243 bool need_rmap_locks) 244 { 245 unsigned long extent, next, old_end; 246 struct mmu_notifier_range range; 247 pmd_t *old_pmd, *new_pmd; 248 249 old_end = old_addr + len; 250 flush_cache_range(vma, old_addr, old_end); 251 252 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, 253 old_addr, old_end); 254 mmu_notifier_invalidate_range_start(&range); 255 256 for (; old_addr < old_end; old_addr += extent, new_addr += extent) { 257 cond_resched(); 258 next = (old_addr + PMD_SIZE) & PMD_MASK; 259 /* even if next overflowed, extent below will be ok */ 260 extent = next - old_addr; 261 if (extent > old_end - old_addr) 262 extent = old_end - old_addr; 263 old_pmd = get_old_pmd(vma->vm_mm, old_addr); 264 if (!old_pmd) 265 continue; 266 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); 267 if (!new_pmd) 268 break; 269 if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd)) { 270 if (extent == HPAGE_PMD_SIZE) { 271 bool moved; 272 /* See comment in move_ptes() */ 273 if (need_rmap_locks) 274 take_rmap_locks(vma); 275 moved = move_huge_pmd(vma, old_addr, new_addr, 276 old_end, old_pmd, new_pmd); 277 if (need_rmap_locks) 278 drop_rmap_locks(vma); 279 if (moved) 280 continue; 281 } 282 split_huge_pmd(vma, old_pmd, old_addr); 283 if (pmd_trans_unstable(old_pmd)) 284 continue; 285 } else if (extent == PMD_SIZE) { 286 #ifdef CONFIG_HAVE_MOVE_PMD 287 /* 288 * If the extent is PMD-sized, try to speed the move by 289 * moving at the PMD level if possible. 290 */ 291 bool moved; 292 293 if (need_rmap_locks) 294 take_rmap_locks(vma); 295 moved = move_normal_pmd(vma, old_addr, new_addr, 296 old_end, old_pmd, new_pmd); 297 if (need_rmap_locks) 298 drop_rmap_locks(vma); 299 if (moved) 300 continue; 301 #endif 302 } 303 304 if (pte_alloc(new_vma->vm_mm, new_pmd)) 305 break; 306 next = (new_addr + PMD_SIZE) & PMD_MASK; 307 if (extent > next - new_addr) 308 extent = next - new_addr; 309 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, 310 new_pmd, new_addr, need_rmap_locks); 311 } 312 313 mmu_notifier_invalidate_range_end(&range); 314 315 return len + old_addr - old_end; /* how much done */ 316 } 317 318 static unsigned long move_vma(struct vm_area_struct *vma, 319 unsigned long old_addr, unsigned long old_len, 320 unsigned long new_len, unsigned long new_addr, 321 bool *locked, unsigned long flags, 322 struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) 323 { 324 struct mm_struct *mm = vma->vm_mm; 325 struct vm_area_struct *new_vma; 326 unsigned long vm_flags = vma->vm_flags; 327 unsigned long new_pgoff; 328 unsigned long moved_len; 329 unsigned long excess = 0; 330 unsigned long hiwater_vm; 331 int split = 0; 332 int err; 333 bool need_rmap_locks; 334 335 /* 336 * We'd prefer to avoid failure later on in do_munmap: 337 * which may split one vma into three before unmapping. 338 */ 339 if (mm->map_count >= sysctl_max_map_count - 3) 340 return -ENOMEM; 341 342 /* 343 * Advise KSM to break any KSM pages in the area to be moved: 344 * it would be confusing if they were to turn up at the new 345 * location, where they happen to coincide with different KSM 346 * pages recently unmapped. But leave vma->vm_flags as it was, 347 * so KSM can come around to merge on vma and new_vma afterwards. 348 */ 349 err = ksm_madvise(vma, old_addr, old_addr + old_len, 350 MADV_UNMERGEABLE, &vm_flags); 351 if (err) 352 return err; 353 354 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); 355 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, 356 &need_rmap_locks); 357 if (!new_vma) 358 return -ENOMEM; 359 360 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, 361 need_rmap_locks); 362 if (moved_len < old_len) { 363 err = -ENOMEM; 364 } else if (vma->vm_ops && vma->vm_ops->mremap) { 365 err = vma->vm_ops->mremap(new_vma); 366 } 367 368 if (unlikely(err)) { 369 /* 370 * On error, move entries back from new area to old, 371 * which will succeed since page tables still there, 372 * and then proceed to unmap new area instead of old. 373 */ 374 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, 375 true); 376 vma = new_vma; 377 old_len = new_len; 378 old_addr = new_addr; 379 new_addr = err; 380 } else { 381 mremap_userfaultfd_prep(new_vma, uf); 382 arch_remap(mm, old_addr, old_addr + old_len, 383 new_addr, new_addr + new_len); 384 } 385 386 /* Conceal VM_ACCOUNT so old reservation is not undone */ 387 if (vm_flags & VM_ACCOUNT) { 388 vma->vm_flags &= ~VM_ACCOUNT; 389 excess = vma->vm_end - vma->vm_start - old_len; 390 if (old_addr > vma->vm_start && 391 old_addr + old_len < vma->vm_end) 392 split = 1; 393 } 394 395 /* 396 * If we failed to move page tables we still do total_vm increment 397 * since do_munmap() will decrement it by old_len == new_len. 398 * 399 * Since total_vm is about to be raised artificially high for a 400 * moment, we need to restore high watermark afterwards: if stats 401 * are taken meanwhile, total_vm and hiwater_vm appear too high. 402 * If this were a serious issue, we'd add a flag to do_munmap(). 403 */ 404 hiwater_vm = mm->hiwater_vm; 405 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); 406 407 /* Tell pfnmap has moved from this vma */ 408 if (unlikely(vma->vm_flags & VM_PFNMAP)) 409 untrack_pfn_moved(vma); 410 411 if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) { 412 if (vm_flags & VM_ACCOUNT) { 413 /* Always put back VM_ACCOUNT since we won't unmap */ 414 vma->vm_flags |= VM_ACCOUNT; 415 416 vm_acct_memory(vma_pages(new_vma)); 417 } 418 419 /* We always clear VM_LOCKED[ONFAULT] on the old vma */ 420 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; 421 422 /* Because we won't unmap we don't need to touch locked_vm */ 423 goto out; 424 } 425 426 if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) { 427 /* OOM: unable to split vma, just get accounts right */ 428 vm_unacct_memory(excess >> PAGE_SHIFT); 429 excess = 0; 430 } 431 432 if (vm_flags & VM_LOCKED) { 433 mm->locked_vm += new_len >> PAGE_SHIFT; 434 *locked = true; 435 } 436 out: 437 mm->hiwater_vm = hiwater_vm; 438 439 /* Restore VM_ACCOUNT if one or two pieces of vma left */ 440 if (excess) { 441 vma->vm_flags |= VM_ACCOUNT; 442 if (split) 443 vma->vm_next->vm_flags |= VM_ACCOUNT; 444 } 445 446 return new_addr; 447 } 448 449 static struct vm_area_struct *vma_to_resize(unsigned long addr, 450 unsigned long old_len, unsigned long new_len, unsigned long flags, 451 unsigned long *p) 452 { 453 struct mm_struct *mm = current->mm; 454 struct vm_area_struct *vma = find_vma(mm, addr); 455 unsigned long pgoff; 456 457 if (!vma || vma->vm_start > addr) 458 return ERR_PTR(-EFAULT); 459 460 /* 461 * !old_len is a special case where an attempt is made to 'duplicate' 462 * a mapping. This makes no sense for private mappings as it will 463 * instead create a fresh/new mapping unrelated to the original. This 464 * is contrary to the basic idea of mremap which creates new mappings 465 * based on the original. There are no known use cases for this 466 * behavior. As a result, fail such attempts. 467 */ 468 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { 469 pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid); 470 return ERR_PTR(-EINVAL); 471 } 472 473 if (flags & MREMAP_DONTUNMAP && (!vma_is_anonymous(vma) || 474 vma->vm_flags & VM_SHARED)) 475 return ERR_PTR(-EINVAL); 476 477 if (is_vm_hugetlb_page(vma)) 478 return ERR_PTR(-EINVAL); 479 480 /* We can't remap across vm area boundaries */ 481 if (old_len > vma->vm_end - addr) 482 return ERR_PTR(-EFAULT); 483 484 if (new_len == old_len) 485 return vma; 486 487 /* Need to be careful about a growing mapping */ 488 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; 489 pgoff += vma->vm_pgoff; 490 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) 491 return ERR_PTR(-EINVAL); 492 493 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) 494 return ERR_PTR(-EFAULT); 495 496 if (vma->vm_flags & VM_LOCKED) { 497 unsigned long locked, lock_limit; 498 locked = mm->locked_vm << PAGE_SHIFT; 499 lock_limit = rlimit(RLIMIT_MEMLOCK); 500 locked += new_len - old_len; 501 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 502 return ERR_PTR(-EAGAIN); 503 } 504 505 if (!may_expand_vm(mm, vma->vm_flags, 506 (new_len - old_len) >> PAGE_SHIFT)) 507 return ERR_PTR(-ENOMEM); 508 509 if (vma->vm_flags & VM_ACCOUNT) { 510 unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; 511 if (security_vm_enough_memory_mm(mm, charged)) 512 return ERR_PTR(-ENOMEM); 513 *p = charged; 514 } 515 516 return vma; 517 } 518 519 static unsigned long mremap_to(unsigned long addr, unsigned long old_len, 520 unsigned long new_addr, unsigned long new_len, bool *locked, 521 unsigned long flags, struct vm_userfaultfd_ctx *uf, 522 struct list_head *uf_unmap_early, 523 struct list_head *uf_unmap) 524 { 525 struct mm_struct *mm = current->mm; 526 struct vm_area_struct *vma; 527 unsigned long ret = -EINVAL; 528 unsigned long charged = 0; 529 unsigned long map_flags = 0; 530 531 if (offset_in_page(new_addr)) 532 goto out; 533 534 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) 535 goto out; 536 537 /* Ensure the old/new locations do not overlap */ 538 if (addr + old_len > new_addr && new_addr + new_len > addr) 539 goto out; 540 541 /* 542 * move_vma() need us to stay 4 maps below the threshold, otherwise 543 * it will bail out at the very beginning. 544 * That is a problem if we have already unmaped the regions here 545 * (new_addr, and old_addr), because userspace will not know the 546 * state of the vma's after it gets -ENOMEM. 547 * So, to avoid such scenario we can pre-compute if the whole 548 * operation has high chances to success map-wise. 549 * Worst-scenario case is when both vma's (new_addr and old_addr) get 550 * split in 3 before unmaping it. 551 * That means 2 more maps (1 for each) to the ones we already hold. 552 * Check whether current map count plus 2 still leads us to 4 maps below 553 * the threshold, otherwise return -ENOMEM here to be more safe. 554 */ 555 if ((mm->map_count + 2) >= sysctl_max_map_count - 3) 556 return -ENOMEM; 557 558 if (flags & MREMAP_FIXED) { 559 ret = do_munmap(mm, new_addr, new_len, uf_unmap_early); 560 if (ret) 561 goto out; 562 } 563 564 if (old_len >= new_len) { 565 ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap); 566 if (ret && old_len != new_len) 567 goto out; 568 old_len = new_len; 569 } 570 571 vma = vma_to_resize(addr, old_len, new_len, flags, &charged); 572 if (IS_ERR(vma)) { 573 ret = PTR_ERR(vma); 574 goto out; 575 } 576 577 /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */ 578 if (flags & MREMAP_DONTUNMAP && 579 !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) { 580 ret = -ENOMEM; 581 goto out; 582 } 583 584 if (flags & MREMAP_FIXED) 585 map_flags |= MAP_FIXED; 586 587 if (vma->vm_flags & VM_MAYSHARE) 588 map_flags |= MAP_SHARED; 589 590 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + 591 ((addr - vma->vm_start) >> PAGE_SHIFT), 592 map_flags); 593 if (IS_ERR_VALUE(ret)) 594 goto out1; 595 596 /* We got a new mapping */ 597 if (!(flags & MREMAP_FIXED)) 598 new_addr = ret; 599 600 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, 601 uf_unmap); 602 603 if (!(offset_in_page(ret))) 604 goto out; 605 606 out1: 607 vm_unacct_memory(charged); 608 609 out: 610 return ret; 611 } 612 613 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) 614 { 615 unsigned long end = vma->vm_end + delta; 616 if (end < vma->vm_end) /* overflow */ 617 return 0; 618 if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ 619 return 0; 620 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 621 0, MAP_FIXED) & ~PAGE_MASK) 622 return 0; 623 return 1; 624 } 625 626 /* 627 * Expand (or shrink) an existing mapping, potentially moving it at the 628 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) 629 * 630 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise 631 * This option implies MREMAP_MAYMOVE. 632 */ 633 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, 634 unsigned long, new_len, unsigned long, flags, 635 unsigned long, new_addr) 636 { 637 struct mm_struct *mm = current->mm; 638 struct vm_area_struct *vma; 639 unsigned long ret = -EINVAL; 640 unsigned long charged = 0; 641 bool locked = false; 642 bool downgraded = false; 643 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; 644 LIST_HEAD(uf_unmap_early); 645 LIST_HEAD(uf_unmap); 646 647 /* 648 * There is a deliberate asymmetry here: we strip the pointer tag 649 * from the old address but leave the new address alone. This is 650 * for consistency with mmap(), where we prevent the creation of 651 * aliasing mappings in userspace by leaving the tag bits of the 652 * mapping address intact. A non-zero tag will cause the subsequent 653 * range checks to reject the address as invalid. 654 * 655 * See Documentation/arm64/tagged-address-abi.rst for more information. 656 */ 657 addr = untagged_addr(addr); 658 659 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) 660 return ret; 661 662 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE)) 663 return ret; 664 665 /* 666 * MREMAP_DONTUNMAP is always a move and it does not allow resizing 667 * in the process. 668 */ 669 if (flags & MREMAP_DONTUNMAP && 670 (!(flags & MREMAP_MAYMOVE) || old_len != new_len)) 671 return ret; 672 673 674 if (offset_in_page(addr)) 675 return ret; 676 677 old_len = PAGE_ALIGN(old_len); 678 new_len = PAGE_ALIGN(new_len); 679 680 /* 681 * We allow a zero old-len as a special case 682 * for DOS-emu "duplicate shm area" thing. But 683 * a zero new-len is nonsensical. 684 */ 685 if (!new_len) 686 return ret; 687 688 if (down_write_killable(¤t->mm->mmap_sem)) 689 return -EINTR; 690 691 if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) { 692 ret = mremap_to(addr, old_len, new_addr, new_len, 693 &locked, flags, &uf, &uf_unmap_early, 694 &uf_unmap); 695 goto out; 696 } 697 698 /* 699 * Always allow a shrinking remap: that just unmaps 700 * the unnecessary pages.. 701 * __do_munmap does all the needed commit accounting, and 702 * downgrades mmap_sem to read if so directed. 703 */ 704 if (old_len >= new_len) { 705 int retval; 706 707 retval = __do_munmap(mm, addr+new_len, old_len - new_len, 708 &uf_unmap, true); 709 if (retval < 0 && old_len != new_len) { 710 ret = retval; 711 goto out; 712 /* Returning 1 indicates mmap_sem is downgraded to read. */ 713 } else if (retval == 1) 714 downgraded = true; 715 ret = addr; 716 goto out; 717 } 718 719 /* 720 * Ok, we need to grow.. 721 */ 722 vma = vma_to_resize(addr, old_len, new_len, flags, &charged); 723 if (IS_ERR(vma)) { 724 ret = PTR_ERR(vma); 725 goto out; 726 } 727 728 /* old_len exactly to the end of the area.. 729 */ 730 if (old_len == vma->vm_end - addr) { 731 /* can we just expand the current mapping? */ 732 if (vma_expandable(vma, new_len - old_len)) { 733 int pages = (new_len - old_len) >> PAGE_SHIFT; 734 735 if (vma_adjust(vma, vma->vm_start, addr + new_len, 736 vma->vm_pgoff, NULL)) { 737 ret = -ENOMEM; 738 goto out; 739 } 740 741 vm_stat_account(mm, vma->vm_flags, pages); 742 if (vma->vm_flags & VM_LOCKED) { 743 mm->locked_vm += pages; 744 locked = true; 745 new_addr = addr; 746 } 747 ret = addr; 748 goto out; 749 } 750 } 751 752 /* 753 * We weren't able to just expand or shrink the area, 754 * we need to create a new one and move it.. 755 */ 756 ret = -ENOMEM; 757 if (flags & MREMAP_MAYMOVE) { 758 unsigned long map_flags = 0; 759 if (vma->vm_flags & VM_MAYSHARE) 760 map_flags |= MAP_SHARED; 761 762 new_addr = get_unmapped_area(vma->vm_file, 0, new_len, 763 vma->vm_pgoff + 764 ((addr - vma->vm_start) >> PAGE_SHIFT), 765 map_flags); 766 if (IS_ERR_VALUE(new_addr)) { 767 ret = new_addr; 768 goto out; 769 } 770 771 ret = move_vma(vma, addr, old_len, new_len, new_addr, 772 &locked, flags, &uf, &uf_unmap); 773 } 774 out: 775 if (offset_in_page(ret)) { 776 vm_unacct_memory(charged); 777 locked = 0; 778 } 779 if (downgraded) 780 up_read(¤t->mm->mmap_sem); 781 else 782 up_write(¤t->mm->mmap_sem); 783 if (locked && new_len > old_len) 784 mm_populate(new_addr + old_len, new_len - old_len); 785 userfaultfd_unmap_complete(mm, &uf_unmap_early); 786 mremap_userfaultfd_complete(&uf, addr, new_addr, old_len); 787 userfaultfd_unmap_complete(mm, &uf_unmap); 788 return ret; 789 } 790