1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * mm/mremap.c 4 * 5 * (C) Copyright 1996 Linus Torvalds 6 * 7 * Address space accounting code <alan@lxorguk.ukuu.org.uk> 8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/mm_inline.h> 13 #include <linux/hugetlb.h> 14 #include <linux/shm.h> 15 #include <linux/ksm.h> 16 #include <linux/mman.h> 17 #include <linux/swap.h> 18 #include <linux/capability.h> 19 #include <linux/fs.h> 20 #include <linux/swapops.h> 21 #include <linux/highmem.h> 22 #include <linux/security.h> 23 #include <linux/syscalls.h> 24 #include <linux/mmu_notifier.h> 25 #include <linux/uaccess.h> 26 #include <linux/userfaultfd_k.h> 27 #include <linux/mempolicy.h> 28 29 #include <asm/cacheflush.h> 30 #include <asm/tlb.h> 31 #include <asm/pgalloc.h> 32 33 #include "internal.h" 34 35 /* Classify the kind of remap operation being performed. */ 36 enum mremap_type { 37 MREMAP_INVALID, /* Initial state. */ 38 MREMAP_NO_RESIZE, /* old_len == new_len, if not moved, do nothing. */ 39 MREMAP_SHRINK, /* old_len > new_len. */ 40 MREMAP_EXPAND, /* old_len < new_len. */ 41 }; 42 43 /* 44 * Describes a VMA mremap() operation and is threaded throughout it. 45 * 46 * Any of the fields may be mutated by the operation, however these values will 47 * always accurately reflect the remap (for instance, we may adjust lengths and 48 * delta to account for hugetlb alignment). 49 */ 50 struct vma_remap_struct { 51 /* User-provided state. */ 52 unsigned long addr; /* User-specified address from which we remap. */ 53 unsigned long old_len; /* Length of range being remapped. */ 54 unsigned long new_len; /* Desired new length of mapping. */ 55 unsigned long flags; /* user-specified MREMAP_* flags. */ 56 unsigned long new_addr; /* Optionally, desired new address. */ 57 58 /* uffd state. */ 59 struct vm_userfaultfd_ctx *uf; 60 struct list_head *uf_unmap_early; 61 struct list_head *uf_unmap; 62 63 /* VMA state, determined in do_mremap(). */ 64 struct vm_area_struct *vma; 65 66 /* Internal state, determined in do_mremap(). */ 67 unsigned long delta; /* Absolute delta of old_len,new_len. */ 68 bool mlocked; /* Was the VMA mlock()'d? */ 69 enum mremap_type remap_type; /* expand, shrink, etc. */ 70 bool mmap_locked; /* Is mm currently write-locked? */ 71 unsigned long charged; /* If VM_ACCOUNT, # pages to account. */ 72 }; 73 74 static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr) 75 { 76 pgd_t *pgd; 77 p4d_t *p4d; 78 pud_t *pud; 79 80 pgd = pgd_offset(mm, addr); 81 if (pgd_none_or_clear_bad(pgd)) 82 return NULL; 83 84 p4d = p4d_offset(pgd, addr); 85 if (p4d_none_or_clear_bad(p4d)) 86 return NULL; 87 88 pud = pud_offset(p4d, addr); 89 if (pud_none_or_clear_bad(pud)) 90 return NULL; 91 92 return pud; 93 } 94 95 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) 96 { 97 pud_t *pud; 98 pmd_t *pmd; 99 100 pud = get_old_pud(mm, addr); 101 if (!pud) 102 return NULL; 103 104 pmd = pmd_offset(pud, addr); 105 if (pmd_none(*pmd)) 106 return NULL; 107 108 return pmd; 109 } 110 111 static pud_t *alloc_new_pud(struct mm_struct *mm, unsigned long addr) 112 { 113 pgd_t *pgd; 114 p4d_t *p4d; 115 116 pgd = pgd_offset(mm, addr); 117 p4d = p4d_alloc(mm, pgd, addr); 118 if (!p4d) 119 return NULL; 120 121 return pud_alloc(mm, p4d, addr); 122 } 123 124 static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr) 125 { 126 pud_t *pud; 127 pmd_t *pmd; 128 129 pud = alloc_new_pud(mm, addr); 130 if (!pud) 131 return NULL; 132 133 pmd = pmd_alloc(mm, pud, addr); 134 if (!pmd) 135 return NULL; 136 137 VM_BUG_ON(pmd_trans_huge(*pmd)); 138 139 return pmd; 140 } 141 142 static void take_rmap_locks(struct vm_area_struct *vma) 143 { 144 if (vma->vm_file) 145 i_mmap_lock_write(vma->vm_file->f_mapping); 146 if (vma->anon_vma) 147 anon_vma_lock_write(vma->anon_vma); 148 } 149 150 static void drop_rmap_locks(struct vm_area_struct *vma) 151 { 152 if (vma->anon_vma) 153 anon_vma_unlock_write(vma->anon_vma); 154 if (vma->vm_file) 155 i_mmap_unlock_write(vma->vm_file->f_mapping); 156 } 157 158 static pte_t move_soft_dirty_pte(pte_t pte) 159 { 160 /* 161 * Set soft dirty bit so we can notice 162 * in userspace the ptes were moved. 163 */ 164 #ifdef CONFIG_MEM_SOFT_DIRTY 165 if (pte_present(pte)) 166 pte = pte_mksoft_dirty(pte); 167 else if (is_swap_pte(pte)) 168 pte = pte_swp_mksoft_dirty(pte); 169 #endif 170 return pte; 171 } 172 173 static int move_ptes(struct pagetable_move_control *pmc, 174 unsigned long extent, pmd_t *old_pmd, pmd_t *new_pmd) 175 { 176 struct vm_area_struct *vma = pmc->old; 177 bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma); 178 struct mm_struct *mm = vma->vm_mm; 179 pte_t *old_pte, *new_pte, pte; 180 pmd_t dummy_pmdval; 181 spinlock_t *old_ptl, *new_ptl; 182 bool force_flush = false; 183 unsigned long old_addr = pmc->old_addr; 184 unsigned long new_addr = pmc->new_addr; 185 unsigned long old_end = old_addr + extent; 186 unsigned long len = old_end - old_addr; 187 int err = 0; 188 189 /* 190 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma 191 * locks to ensure that rmap will always observe either the old or the 192 * new ptes. This is the easiest way to avoid races with 193 * truncate_pagecache(), page migration, etc... 194 * 195 * When need_rmap_locks is false, we use other ways to avoid 196 * such races: 197 * 198 * - During exec() shift_arg_pages(), we use a specially tagged vma 199 * which rmap call sites look for using vma_is_temporary_stack(). 200 * 201 * - During mremap(), new_vma is often known to be placed after vma 202 * in rmap traversal order. This ensures rmap will always observe 203 * either the old pte, or the new pte, or both (the page table locks 204 * serialize access to individual ptes, but only rmap traversal 205 * order guarantees that we won't miss both the old and new ptes). 206 */ 207 if (pmc->need_rmap_locks) 208 take_rmap_locks(vma); 209 210 /* 211 * We don't have to worry about the ordering of src and dst 212 * pte locks because exclusive mmap_lock prevents deadlock. 213 */ 214 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); 215 if (!old_pte) { 216 err = -EAGAIN; 217 goto out; 218 } 219 /* 220 * Now new_pte is none, so hpage_collapse_scan_file() path can not find 221 * this by traversing file->f_mapping, so there is no concurrency with 222 * retract_page_tables(). In addition, we already hold the exclusive 223 * mmap_lock, so this new_pte page is stable, so there is no need to get 224 * pmdval and do pmd_same() check. 225 */ 226 new_pte = pte_offset_map_rw_nolock(mm, new_pmd, new_addr, &dummy_pmdval, 227 &new_ptl); 228 if (!new_pte) { 229 pte_unmap_unlock(old_pte, old_ptl); 230 err = -EAGAIN; 231 goto out; 232 } 233 if (new_ptl != old_ptl) 234 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 235 flush_tlb_batched_pending(vma->vm_mm); 236 arch_enter_lazy_mmu_mode(); 237 238 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, 239 new_pte++, new_addr += PAGE_SIZE) { 240 if (pte_none(ptep_get(old_pte))) 241 continue; 242 243 pte = ptep_get_and_clear(mm, old_addr, old_pte); 244 /* 245 * If we are remapping a valid PTE, make sure 246 * to flush TLB before we drop the PTL for the 247 * PTE. 248 * 249 * NOTE! Both old and new PTL matter: the old one 250 * for racing with folio_mkclean(), the new one to 251 * make sure the physical page stays valid until 252 * the TLB entry for the old mapping has been 253 * flushed. 254 */ 255 if (pte_present(pte)) 256 force_flush = true; 257 pte = move_pte(pte, old_addr, new_addr); 258 pte = move_soft_dirty_pte(pte); 259 260 if (need_clear_uffd_wp && pte_marker_uffd_wp(pte)) 261 pte_clear(mm, new_addr, new_pte); 262 else { 263 if (need_clear_uffd_wp) { 264 if (pte_present(pte)) 265 pte = pte_clear_uffd_wp(pte); 266 else if (is_swap_pte(pte)) 267 pte = pte_swp_clear_uffd_wp(pte); 268 } 269 set_pte_at(mm, new_addr, new_pte, pte); 270 } 271 } 272 273 arch_leave_lazy_mmu_mode(); 274 if (force_flush) 275 flush_tlb_range(vma, old_end - len, old_end); 276 if (new_ptl != old_ptl) 277 spin_unlock(new_ptl); 278 pte_unmap(new_pte - 1); 279 pte_unmap_unlock(old_pte - 1, old_ptl); 280 out: 281 if (pmc->need_rmap_locks) 282 drop_rmap_locks(vma); 283 return err; 284 } 285 286 #ifndef arch_supports_page_table_move 287 #define arch_supports_page_table_move arch_supports_page_table_move 288 static inline bool arch_supports_page_table_move(void) 289 { 290 return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) || 291 IS_ENABLED(CONFIG_HAVE_MOVE_PUD); 292 } 293 #endif 294 295 #ifdef CONFIG_HAVE_MOVE_PMD 296 static bool move_normal_pmd(struct pagetable_move_control *pmc, 297 pmd_t *old_pmd, pmd_t *new_pmd) 298 { 299 spinlock_t *old_ptl, *new_ptl; 300 struct vm_area_struct *vma = pmc->old; 301 struct mm_struct *mm = vma->vm_mm; 302 bool res = false; 303 pmd_t pmd; 304 305 if (!arch_supports_page_table_move()) 306 return false; 307 /* 308 * The destination pmd shouldn't be established, free_pgtables() 309 * should have released it. 310 * 311 * However, there's a case during execve() where we use mremap 312 * to move the initial stack, and in that case the target area 313 * may overlap the source area (always moving down). 314 * 315 * If everything is PMD-aligned, that works fine, as moving 316 * each pmd down will clear the source pmd. But if we first 317 * have a few 4kB-only pages that get moved down, and then 318 * hit the "now the rest is PMD-aligned, let's do everything 319 * one pmd at a time", we will still have the old (now empty 320 * of any 4kB pages, but still there) PMD in the page table 321 * tree. 322 * 323 * Warn on it once - because we really should try to figure 324 * out how to do this better - but then say "I won't move 325 * this pmd". 326 * 327 * One alternative might be to just unmap the target pmd at 328 * this point, and verify that it really is empty. We'll see. 329 */ 330 if (WARN_ON_ONCE(!pmd_none(*new_pmd))) 331 return false; 332 333 /* If this pmd belongs to a uffd vma with remap events disabled, we need 334 * to ensure that the uffd-wp state is cleared from all pgtables. This 335 * means recursing into lower page tables in move_page_tables(), and we 336 * can reuse the existing code if we simply treat the entry as "not 337 * moved". 338 */ 339 if (vma_has_uffd_without_event_remap(vma)) 340 return false; 341 342 /* 343 * We don't have to worry about the ordering of src and dst 344 * ptlocks because exclusive mmap_lock prevents deadlock. 345 */ 346 old_ptl = pmd_lock(mm, old_pmd); 347 new_ptl = pmd_lockptr(mm, new_pmd); 348 if (new_ptl != old_ptl) 349 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 350 351 pmd = *old_pmd; 352 353 /* Racing with collapse? */ 354 if (unlikely(!pmd_present(pmd) || pmd_leaf(pmd))) 355 goto out_unlock; 356 /* Clear the pmd */ 357 pmd_clear(old_pmd); 358 res = true; 359 360 VM_BUG_ON(!pmd_none(*new_pmd)); 361 362 pmd_populate(mm, new_pmd, pmd_pgtable(pmd)); 363 flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PMD_SIZE); 364 out_unlock: 365 if (new_ptl != old_ptl) 366 spin_unlock(new_ptl); 367 spin_unlock(old_ptl); 368 369 return res; 370 } 371 #else 372 static inline bool move_normal_pmd(struct pagetable_move_control *pmc, 373 pmd_t *old_pmd, pmd_t *new_pmd) 374 { 375 return false; 376 } 377 #endif 378 379 #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD) 380 static bool move_normal_pud(struct pagetable_move_control *pmc, 381 pud_t *old_pud, pud_t *new_pud) 382 { 383 spinlock_t *old_ptl, *new_ptl; 384 struct vm_area_struct *vma = pmc->old; 385 struct mm_struct *mm = vma->vm_mm; 386 pud_t pud; 387 388 if (!arch_supports_page_table_move()) 389 return false; 390 /* 391 * The destination pud shouldn't be established, free_pgtables() 392 * should have released it. 393 */ 394 if (WARN_ON_ONCE(!pud_none(*new_pud))) 395 return false; 396 397 /* If this pud belongs to a uffd vma with remap events disabled, we need 398 * to ensure that the uffd-wp state is cleared from all pgtables. This 399 * means recursing into lower page tables in move_page_tables(), and we 400 * can reuse the existing code if we simply treat the entry as "not 401 * moved". 402 */ 403 if (vma_has_uffd_without_event_remap(vma)) 404 return false; 405 406 /* 407 * We don't have to worry about the ordering of src and dst 408 * ptlocks because exclusive mmap_lock prevents deadlock. 409 */ 410 old_ptl = pud_lock(mm, old_pud); 411 new_ptl = pud_lockptr(mm, new_pud); 412 if (new_ptl != old_ptl) 413 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 414 415 /* Clear the pud */ 416 pud = *old_pud; 417 pud_clear(old_pud); 418 419 VM_BUG_ON(!pud_none(*new_pud)); 420 421 pud_populate(mm, new_pud, pud_pgtable(pud)); 422 flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PUD_SIZE); 423 if (new_ptl != old_ptl) 424 spin_unlock(new_ptl); 425 spin_unlock(old_ptl); 426 427 return true; 428 } 429 #else 430 static inline bool move_normal_pud(struct pagetable_move_control *pmc, 431 pud_t *old_pud, pud_t *new_pud) 432 { 433 return false; 434 } 435 #endif 436 437 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 438 static bool move_huge_pud(struct pagetable_move_control *pmc, 439 pud_t *old_pud, pud_t *new_pud) 440 { 441 spinlock_t *old_ptl, *new_ptl; 442 struct vm_area_struct *vma = pmc->old; 443 struct mm_struct *mm = vma->vm_mm; 444 pud_t pud; 445 446 /* 447 * The destination pud shouldn't be established, free_pgtables() 448 * should have released it. 449 */ 450 if (WARN_ON_ONCE(!pud_none(*new_pud))) 451 return false; 452 453 /* 454 * We don't have to worry about the ordering of src and dst 455 * ptlocks because exclusive mmap_lock prevents deadlock. 456 */ 457 old_ptl = pud_lock(mm, old_pud); 458 new_ptl = pud_lockptr(mm, new_pud); 459 if (new_ptl != old_ptl) 460 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 461 462 /* Clear the pud */ 463 pud = *old_pud; 464 pud_clear(old_pud); 465 466 VM_BUG_ON(!pud_none(*new_pud)); 467 468 /* Set the new pud */ 469 /* mark soft_ditry when we add pud level soft dirty support */ 470 set_pud_at(mm, pmc->new_addr, new_pud, pud); 471 flush_pud_tlb_range(vma, pmc->old_addr, pmc->old_addr + HPAGE_PUD_SIZE); 472 if (new_ptl != old_ptl) 473 spin_unlock(new_ptl); 474 spin_unlock(old_ptl); 475 476 return true; 477 } 478 #else 479 static bool move_huge_pud(struct pagetable_move_control *pmc, 480 pud_t *old_pud, pud_t *new_pud) 481 482 { 483 WARN_ON_ONCE(1); 484 return false; 485 486 } 487 #endif 488 489 enum pgt_entry { 490 NORMAL_PMD, 491 HPAGE_PMD, 492 NORMAL_PUD, 493 HPAGE_PUD, 494 }; 495 496 /* 497 * Returns an extent of the corresponding size for the pgt_entry specified if 498 * valid. Else returns a smaller extent bounded by the end of the source and 499 * destination pgt_entry. 500 */ 501 static __always_inline unsigned long get_extent(enum pgt_entry entry, 502 struct pagetable_move_control *pmc) 503 { 504 unsigned long next, extent, mask, size; 505 unsigned long old_addr = pmc->old_addr; 506 unsigned long old_end = pmc->old_end; 507 unsigned long new_addr = pmc->new_addr; 508 509 switch (entry) { 510 case HPAGE_PMD: 511 case NORMAL_PMD: 512 mask = PMD_MASK; 513 size = PMD_SIZE; 514 break; 515 case HPAGE_PUD: 516 case NORMAL_PUD: 517 mask = PUD_MASK; 518 size = PUD_SIZE; 519 break; 520 default: 521 BUILD_BUG(); 522 break; 523 } 524 525 next = (old_addr + size) & mask; 526 /* even if next overflowed, extent below will be ok */ 527 extent = next - old_addr; 528 if (extent > old_end - old_addr) 529 extent = old_end - old_addr; 530 next = (new_addr + size) & mask; 531 if (extent > next - new_addr) 532 extent = next - new_addr; 533 return extent; 534 } 535 536 /* 537 * Should move_pgt_entry() acquire the rmap locks? This is either expressed in 538 * the PMC, or overridden in the case of normal, larger page tables. 539 */ 540 static bool should_take_rmap_locks(struct pagetable_move_control *pmc, 541 enum pgt_entry entry) 542 { 543 switch (entry) { 544 case NORMAL_PMD: 545 case NORMAL_PUD: 546 return true; 547 default: 548 return pmc->need_rmap_locks; 549 } 550 } 551 552 /* 553 * Attempts to speedup the move by moving entry at the level corresponding to 554 * pgt_entry. Returns true if the move was successful, else false. 555 */ 556 static bool move_pgt_entry(struct pagetable_move_control *pmc, 557 enum pgt_entry entry, void *old_entry, void *new_entry) 558 { 559 bool moved = false; 560 bool need_rmap_locks = should_take_rmap_locks(pmc, entry); 561 562 /* See comment in move_ptes() */ 563 if (need_rmap_locks) 564 take_rmap_locks(pmc->old); 565 566 switch (entry) { 567 case NORMAL_PMD: 568 moved = move_normal_pmd(pmc, old_entry, new_entry); 569 break; 570 case NORMAL_PUD: 571 moved = move_normal_pud(pmc, old_entry, new_entry); 572 break; 573 case HPAGE_PMD: 574 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 575 move_huge_pmd(pmc->old, pmc->old_addr, pmc->new_addr, old_entry, 576 new_entry); 577 break; 578 case HPAGE_PUD: 579 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 580 move_huge_pud(pmc, old_entry, new_entry); 581 break; 582 583 default: 584 WARN_ON_ONCE(1); 585 break; 586 } 587 588 if (need_rmap_locks) 589 drop_rmap_locks(pmc->old); 590 591 return moved; 592 } 593 594 /* 595 * A helper to check if aligning down is OK. The aligned address should fall 596 * on *no mapping*. For the stack moving down, that's a special move within 597 * the VMA that is created to span the source and destination of the move, 598 * so we make an exception for it. 599 */ 600 static bool can_align_down(struct pagetable_move_control *pmc, 601 struct vm_area_struct *vma, unsigned long addr_to_align, 602 unsigned long mask) 603 { 604 unsigned long addr_masked = addr_to_align & mask; 605 606 /* 607 * If @addr_to_align of either source or destination is not the beginning 608 * of the corresponding VMA, we can't align down or we will destroy part 609 * of the current mapping. 610 */ 611 if (!pmc->for_stack && vma->vm_start != addr_to_align) 612 return false; 613 614 /* In the stack case we explicitly permit in-VMA alignment. */ 615 if (pmc->for_stack && addr_masked >= vma->vm_start) 616 return true; 617 618 /* 619 * Make sure the realignment doesn't cause the address to fall on an 620 * existing mapping. 621 */ 622 return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL; 623 } 624 625 /* 626 * Determine if are in fact able to realign for efficiency to a higher page 627 * table boundary. 628 */ 629 static bool can_realign_addr(struct pagetable_move_control *pmc, 630 unsigned long pagetable_mask) 631 { 632 unsigned long align_mask = ~pagetable_mask; 633 unsigned long old_align = pmc->old_addr & align_mask; 634 unsigned long new_align = pmc->new_addr & align_mask; 635 unsigned long pagetable_size = align_mask + 1; 636 unsigned long old_align_next = pagetable_size - old_align; 637 638 /* 639 * We don't want to have to go hunting for VMAs from the end of the old 640 * VMA to the next page table boundary, also we want to make sure the 641 * operation is wortwhile. 642 * 643 * So ensure that we only perform this realignment if the end of the 644 * range being copied reaches or crosses the page table boundary. 645 * 646 * boundary boundary 647 * .<- old_align -> . 648 * . |----------------.-----------| 649 * . | vma . | 650 * . |----------------.-----------| 651 * . <----------------.-----------> 652 * . len_in 653 * <-------------------------------> 654 * . pagetable_size . 655 * . <----------------> 656 * . old_align_next . 657 */ 658 if (pmc->len_in < old_align_next) 659 return false; 660 661 /* Skip if the addresses are already aligned. */ 662 if (old_align == 0) 663 return false; 664 665 /* Only realign if the new and old addresses are mutually aligned. */ 666 if (old_align != new_align) 667 return false; 668 669 /* Ensure realignment doesn't cause overlap with existing mappings. */ 670 if (!can_align_down(pmc, pmc->old, pmc->old_addr, pagetable_mask) || 671 !can_align_down(pmc, pmc->new, pmc->new_addr, pagetable_mask)) 672 return false; 673 674 return true; 675 } 676 677 /* 678 * Opportunistically realign to specified boundary for faster copy. 679 * 680 * Consider an mremap() of a VMA with page table boundaries as below, and no 681 * preceding VMAs from the lower page table boundary to the start of the VMA, 682 * with the end of the range reaching or crossing the page table boundary. 683 * 684 * boundary boundary 685 * . |----------------.-----------| 686 * . | vma . | 687 * . |----------------.-----------| 688 * . pmc->old_addr . pmc->old_end 689 * . <----------------------------> 690 * . move these page tables 691 * 692 * If we proceed with moving page tables in this scenario, we will have a lot of 693 * work to do traversing old page tables and establishing new ones in the 694 * destination across multiple lower level page tables. 695 * 696 * The idea here is simply to align pmc->old_addr, pmc->new_addr down to the 697 * page table boundary, so we can simply copy a single page table entry for the 698 * aligned portion of the VMA instead: 699 * 700 * boundary boundary 701 * . |----------------.-----------| 702 * . | vma . | 703 * . |----------------.-----------| 704 * pmc->old_addr . pmc->old_end 705 * <-------------------------------------------> 706 * . move these page tables 707 */ 708 static void try_realign_addr(struct pagetable_move_control *pmc, 709 unsigned long pagetable_mask) 710 { 711 712 if (!can_realign_addr(pmc, pagetable_mask)) 713 return; 714 715 /* 716 * Simply align to page table boundaries. Note that we do NOT update the 717 * pmc->old_end value, and since the move_page_tables() operation spans 718 * from [old_addr, old_end) (offsetting new_addr as it is performed), 719 * this simply changes the start of the copy, not the end. 720 */ 721 pmc->old_addr &= pagetable_mask; 722 pmc->new_addr &= pagetable_mask; 723 } 724 725 /* Is the page table move operation done? */ 726 static bool pmc_done(struct pagetable_move_control *pmc) 727 { 728 return pmc->old_addr >= pmc->old_end; 729 } 730 731 /* Advance to the next page table, offset by extent bytes. */ 732 static void pmc_next(struct pagetable_move_control *pmc, unsigned long extent) 733 { 734 pmc->old_addr += extent; 735 pmc->new_addr += extent; 736 } 737 738 /* 739 * Determine how many bytes in the specified input range have had their page 740 * tables moved so far. 741 */ 742 static unsigned long pmc_progress(struct pagetable_move_control *pmc) 743 { 744 unsigned long orig_old_addr = pmc->old_end - pmc->len_in; 745 unsigned long old_addr = pmc->old_addr; 746 747 /* 748 * Prevent negative return values when {old,new}_addr was realigned but 749 * we broke out of the loop in move_page_tables() for the first PMD 750 * itself. 751 */ 752 return old_addr < orig_old_addr ? 0 : old_addr - orig_old_addr; 753 } 754 755 unsigned long move_page_tables(struct pagetable_move_control *pmc) 756 { 757 unsigned long extent; 758 struct mmu_notifier_range range; 759 pmd_t *old_pmd, *new_pmd; 760 pud_t *old_pud, *new_pud; 761 struct mm_struct *mm = pmc->old->vm_mm; 762 763 if (!pmc->len_in) 764 return 0; 765 766 if (is_vm_hugetlb_page(pmc->old)) 767 return move_hugetlb_page_tables(pmc->old, pmc->new, pmc->old_addr, 768 pmc->new_addr, pmc->len_in); 769 770 /* 771 * If possible, realign addresses to PMD boundary for faster copy. 772 * Only realign if the mremap copying hits a PMD boundary. 773 */ 774 try_realign_addr(pmc, PMD_MASK); 775 776 flush_cache_range(pmc->old, pmc->old_addr, pmc->old_end); 777 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, mm, 778 pmc->old_addr, pmc->old_end); 779 mmu_notifier_invalidate_range_start(&range); 780 781 for (; !pmc_done(pmc); pmc_next(pmc, extent)) { 782 cond_resched(); 783 /* 784 * If extent is PUD-sized try to speed up the move by moving at the 785 * PUD level if possible. 786 */ 787 extent = get_extent(NORMAL_PUD, pmc); 788 789 old_pud = get_old_pud(mm, pmc->old_addr); 790 if (!old_pud) 791 continue; 792 new_pud = alloc_new_pud(mm, pmc->new_addr); 793 if (!new_pud) 794 break; 795 if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) { 796 if (extent == HPAGE_PUD_SIZE) { 797 move_pgt_entry(pmc, HPAGE_PUD, old_pud, new_pud); 798 /* We ignore and continue on error? */ 799 continue; 800 } 801 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) { 802 if (move_pgt_entry(pmc, NORMAL_PUD, old_pud, new_pud)) 803 continue; 804 } 805 806 extent = get_extent(NORMAL_PMD, pmc); 807 old_pmd = get_old_pmd(mm, pmc->old_addr); 808 if (!old_pmd) 809 continue; 810 new_pmd = alloc_new_pmd(mm, pmc->new_addr); 811 if (!new_pmd) 812 break; 813 again: 814 if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || 815 pmd_devmap(*old_pmd)) { 816 if (extent == HPAGE_PMD_SIZE && 817 move_pgt_entry(pmc, HPAGE_PMD, old_pmd, new_pmd)) 818 continue; 819 split_huge_pmd(pmc->old, old_pmd, pmc->old_addr); 820 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) && 821 extent == PMD_SIZE) { 822 /* 823 * If the extent is PMD-sized, try to speed the move by 824 * moving at the PMD level if possible. 825 */ 826 if (move_pgt_entry(pmc, NORMAL_PMD, old_pmd, new_pmd)) 827 continue; 828 } 829 if (pmd_none(*old_pmd)) 830 continue; 831 if (pte_alloc(pmc->new->vm_mm, new_pmd)) 832 break; 833 if (move_ptes(pmc, extent, old_pmd, new_pmd) < 0) 834 goto again; 835 } 836 837 mmu_notifier_invalidate_range_end(&range); 838 839 return pmc_progress(pmc); 840 } 841 842 /* Set vrm->delta to the difference in VMA size specified by user. */ 843 static void vrm_set_delta(struct vma_remap_struct *vrm) 844 { 845 vrm->delta = abs_diff(vrm->old_len, vrm->new_len); 846 } 847 848 /* Determine what kind of remap this is - shrink, expand or no resize at all. */ 849 static enum mremap_type vrm_remap_type(struct vma_remap_struct *vrm) 850 { 851 if (vrm->delta == 0) 852 return MREMAP_NO_RESIZE; 853 854 if (vrm->old_len > vrm->new_len) 855 return MREMAP_SHRINK; 856 857 return MREMAP_EXPAND; 858 } 859 860 /* 861 * When moving a VMA to vrm->new_adr, does this result in the new and old VMAs 862 * overlapping? 863 */ 864 static bool vrm_overlaps(struct vma_remap_struct *vrm) 865 { 866 unsigned long start_old = vrm->addr; 867 unsigned long start_new = vrm->new_addr; 868 unsigned long end_old = vrm->addr + vrm->old_len; 869 unsigned long end_new = vrm->new_addr + vrm->new_len; 870 871 /* 872 * start_old end_old 873 * |-----------| 874 * | | 875 * |-----------| 876 * |-------------| 877 * | | 878 * |-------------| 879 * start_new end_new 880 */ 881 if (end_old > start_new && end_new > start_old) 882 return true; 883 884 return false; 885 } 886 887 /* Do the mremap() flags require that the new_addr parameter be specified? */ 888 static bool vrm_implies_new_addr(struct vma_remap_struct *vrm) 889 { 890 return vrm->flags & (MREMAP_FIXED | MREMAP_DONTUNMAP); 891 } 892 893 /* 894 * Find an unmapped area for the requested vrm->new_addr. 895 * 896 * If MREMAP_FIXED then this is equivalent to a MAP_FIXED mmap() call. If only 897 * MREMAP_DONTUNMAP is set, then this is equivalent to providing a hint to 898 * mmap(), otherwise this is equivalent to mmap() specifying a NULL address. 899 * 900 * Returns 0 on success (with vrm->new_addr updated), or an error code upon 901 * failure. 902 */ 903 static unsigned long vrm_set_new_addr(struct vma_remap_struct *vrm) 904 { 905 struct vm_area_struct *vma = vrm->vma; 906 unsigned long map_flags = 0; 907 /* Page Offset _into_ the VMA. */ 908 pgoff_t internal_pgoff = (vrm->addr - vma->vm_start) >> PAGE_SHIFT; 909 pgoff_t pgoff = vma->vm_pgoff + internal_pgoff; 910 unsigned long new_addr = vrm_implies_new_addr(vrm) ? vrm->new_addr : 0; 911 unsigned long res; 912 913 if (vrm->flags & MREMAP_FIXED) 914 map_flags |= MAP_FIXED; 915 if (vma->vm_flags & VM_MAYSHARE) 916 map_flags |= MAP_SHARED; 917 918 res = get_unmapped_area(vma->vm_file, new_addr, vrm->new_len, pgoff, 919 map_flags); 920 if (IS_ERR_VALUE(res)) 921 return res; 922 923 vrm->new_addr = res; 924 return 0; 925 } 926 927 /* 928 * Keep track of pages which have been added to the memory mapping. If the VMA 929 * is accounted, also check to see if there is sufficient memory. 930 * 931 * Returns true on success, false if insufficient memory to charge. 932 */ 933 static bool vrm_charge(struct vma_remap_struct *vrm) 934 { 935 unsigned long charged; 936 937 if (!(vrm->vma->vm_flags & VM_ACCOUNT)) 938 return true; 939 940 /* 941 * If we don't unmap the old mapping, then we account the entirety of 942 * the length of the new one. Otherwise it's just the delta in size. 943 */ 944 if (vrm->flags & MREMAP_DONTUNMAP) 945 charged = vrm->new_len >> PAGE_SHIFT; 946 else 947 charged = vrm->delta >> PAGE_SHIFT; 948 949 950 /* This accounts 'charged' pages of memory. */ 951 if (security_vm_enough_memory_mm(current->mm, charged)) 952 return false; 953 954 vrm->charged = charged; 955 return true; 956 } 957 958 /* 959 * an error has occurred so we will not be using vrm->charged memory. Unaccount 960 * this memory if the VMA is accounted. 961 */ 962 static void vrm_uncharge(struct vma_remap_struct *vrm) 963 { 964 if (!(vrm->vma->vm_flags & VM_ACCOUNT)) 965 return; 966 967 vm_unacct_memory(vrm->charged); 968 vrm->charged = 0; 969 } 970 971 /* 972 * Update mm exec_vm, stack_vm, data_vm, and locked_vm fields as needed to 973 * account for 'bytes' memory used, and if locked, indicate this in the VRM so 974 * we can handle this correctly later. 975 */ 976 static void vrm_stat_account(struct vma_remap_struct *vrm, 977 unsigned long bytes) 978 { 979 unsigned long pages = bytes >> PAGE_SHIFT; 980 struct mm_struct *mm = current->mm; 981 struct vm_area_struct *vma = vrm->vma; 982 983 vm_stat_account(mm, vma->vm_flags, pages); 984 if (vma->vm_flags & VM_LOCKED) { 985 mm->locked_vm += pages; 986 vrm->mlocked = true; 987 } 988 } 989 990 /* 991 * Perform checks before attempting to write a VMA prior to it being 992 * moved. 993 */ 994 static unsigned long prep_move_vma(struct vma_remap_struct *vrm) 995 { 996 unsigned long err = 0; 997 struct vm_area_struct *vma = vrm->vma; 998 unsigned long old_addr = vrm->addr; 999 unsigned long old_len = vrm->old_len; 1000 unsigned long dummy = vma->vm_flags; 1001 1002 /* 1003 * We'd prefer to avoid failure later on in do_munmap: 1004 * which may split one vma into three before unmapping. 1005 */ 1006 if (current->mm->map_count >= sysctl_max_map_count - 3) 1007 return -ENOMEM; 1008 1009 if (vma->vm_ops && vma->vm_ops->may_split) { 1010 if (vma->vm_start != old_addr) 1011 err = vma->vm_ops->may_split(vma, old_addr); 1012 if (!err && vma->vm_end != old_addr + old_len) 1013 err = vma->vm_ops->may_split(vma, old_addr + old_len); 1014 if (err) 1015 return err; 1016 } 1017 1018 /* 1019 * Advise KSM to break any KSM pages in the area to be moved: 1020 * it would be confusing if they were to turn up at the new 1021 * location, where they happen to coincide with different KSM 1022 * pages recently unmapped. But leave vma->vm_flags as it was, 1023 * so KSM can come around to merge on vma and new_vma afterwards. 1024 */ 1025 err = ksm_madvise(vma, old_addr, old_addr + old_len, 1026 MADV_UNMERGEABLE, &dummy); 1027 if (err) 1028 return err; 1029 1030 return 0; 1031 } 1032 1033 /* 1034 * Unmap source VMA for VMA move, turning it from a copy to a move, being 1035 * careful to ensure we do not underflow memory account while doing so if an 1036 * accountable move. 1037 * 1038 * This is best effort, if we fail to unmap then we simply try to correct 1039 * accounting and exit. 1040 */ 1041 static void unmap_source_vma(struct vma_remap_struct *vrm) 1042 { 1043 struct mm_struct *mm = current->mm; 1044 unsigned long addr = vrm->addr; 1045 unsigned long len = vrm->old_len; 1046 struct vm_area_struct *vma = vrm->vma; 1047 VMA_ITERATOR(vmi, mm, addr); 1048 int err; 1049 unsigned long vm_start; 1050 unsigned long vm_end; 1051 /* 1052 * It might seem odd that we check for MREMAP_DONTUNMAP here, given this 1053 * function implies that we unmap the original VMA, which seems 1054 * contradictory. 1055 * 1056 * However, this occurs when this operation was attempted and an error 1057 * arose, in which case we _do_ wish to unmap the _new_ VMA, which means 1058 * we actually _do_ want it be unaccounted. 1059 */ 1060 bool accountable_move = (vma->vm_flags & VM_ACCOUNT) && 1061 !(vrm->flags & MREMAP_DONTUNMAP); 1062 1063 /* 1064 * So we perform a trick here to prevent incorrect accounting. Any merge 1065 * or new VMA allocation performed in copy_vma() does not adjust 1066 * accounting, it is expected that callers handle this. 1067 * 1068 * And indeed we already have, accounting appropriately in the case of 1069 * both in vrm_charge(). 1070 * 1071 * However, when we unmap the existing VMA (to effect the move), this 1072 * code will, if the VMA has VM_ACCOUNT set, attempt to unaccount 1073 * removed pages. 1074 * 1075 * To avoid this we temporarily clear this flag, reinstating on any 1076 * portions of the original VMA that remain. 1077 */ 1078 if (accountable_move) { 1079 vm_flags_clear(vma, VM_ACCOUNT); 1080 /* We are about to split vma, so store the start/end. */ 1081 vm_start = vma->vm_start; 1082 vm_end = vma->vm_end; 1083 } 1084 1085 err = do_vmi_munmap(&vmi, mm, addr, len, vrm->uf_unmap, /* unlock= */false); 1086 vrm->vma = NULL; /* Invalidated. */ 1087 if (err) { 1088 /* OOM: unable to split vma, just get accounts right */ 1089 vm_acct_memory(len >> PAGE_SHIFT); 1090 return; 1091 } 1092 1093 /* 1094 * If we mremap() from a VMA like this: 1095 * 1096 * addr end 1097 * | | 1098 * v v 1099 * |-------------| 1100 * | | 1101 * |-------------| 1102 * 1103 * Having cleared VM_ACCOUNT from the whole VMA, after we unmap above 1104 * we'll end up with: 1105 * 1106 * addr end 1107 * | | 1108 * v v 1109 * |---| |---| 1110 * | A | | B | 1111 * |---| |---| 1112 * 1113 * The VMI is still pointing at addr, so vma_prev() will give us A, and 1114 * a subsequent or lone vma_next() will give as B. 1115 * 1116 * do_vmi_munmap() will have restored the VMI back to addr. 1117 */ 1118 if (accountable_move) { 1119 unsigned long end = addr + len; 1120 1121 if (vm_start < addr) { 1122 struct vm_area_struct *prev = vma_prev(&vmi); 1123 1124 vm_flags_set(prev, VM_ACCOUNT); /* Acquires VMA lock. */ 1125 } 1126 1127 if (vm_end > end) { 1128 struct vm_area_struct *next = vma_next(&vmi); 1129 1130 vm_flags_set(next, VM_ACCOUNT); /* Acquires VMA lock. */ 1131 } 1132 } 1133 } 1134 1135 /* 1136 * Copy vrm->vma over to vrm->new_addr possibly adjusting size as part of the 1137 * process. Additionally handle an error occurring on moving of page tables, 1138 * where we reset vrm state to cause unmapping of the new VMA. 1139 * 1140 * Outputs the newly installed VMA to new_vma_ptr. Returns 0 on success or an 1141 * error code. 1142 */ 1143 static int copy_vma_and_data(struct vma_remap_struct *vrm, 1144 struct vm_area_struct **new_vma_ptr) 1145 { 1146 unsigned long internal_offset = vrm->addr - vrm->vma->vm_start; 1147 unsigned long internal_pgoff = internal_offset >> PAGE_SHIFT; 1148 unsigned long new_pgoff = vrm->vma->vm_pgoff + internal_pgoff; 1149 unsigned long moved_len; 1150 struct vm_area_struct *vma = vrm->vma; 1151 struct vm_area_struct *new_vma; 1152 int err = 0; 1153 PAGETABLE_MOVE(pmc, NULL, NULL, vrm->addr, vrm->new_addr, vrm->old_len); 1154 1155 new_vma = copy_vma(&vma, vrm->new_addr, vrm->new_len, new_pgoff, 1156 &pmc.need_rmap_locks); 1157 if (!new_vma) { 1158 vrm_uncharge(vrm); 1159 *new_vma_ptr = NULL; 1160 return -ENOMEM; 1161 } 1162 vrm->vma = vma; 1163 pmc.old = vma; 1164 pmc.new = new_vma; 1165 1166 moved_len = move_page_tables(&pmc); 1167 if (moved_len < vrm->old_len) 1168 err = -ENOMEM; 1169 else if (vma->vm_ops && vma->vm_ops->mremap) 1170 err = vma->vm_ops->mremap(new_vma); 1171 1172 if (unlikely(err)) { 1173 PAGETABLE_MOVE(pmc_revert, new_vma, vma, vrm->new_addr, 1174 vrm->addr, moved_len); 1175 1176 /* 1177 * On error, move entries back from new area to old, 1178 * which will succeed since page tables still there, 1179 * and then proceed to unmap new area instead of old. 1180 */ 1181 pmc_revert.need_rmap_locks = true; 1182 move_page_tables(&pmc_revert); 1183 1184 vrm->vma = new_vma; 1185 vrm->old_len = vrm->new_len; 1186 vrm->addr = vrm->new_addr; 1187 } else { 1188 mremap_userfaultfd_prep(new_vma, vrm->uf); 1189 } 1190 1191 if (is_vm_hugetlb_page(vma)) 1192 clear_vma_resv_huge_pages(vma); 1193 1194 /* Tell pfnmap has moved from this vma */ 1195 if (unlikely(vma->vm_flags & VM_PFNMAP)) 1196 untrack_pfn_clear(vma); 1197 1198 *new_vma_ptr = new_vma; 1199 return err; 1200 } 1201 1202 /* 1203 * Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() and 1204 * account flags on remaining VMA by convention (it cannot be mlock()'d any 1205 * longer, as pages in range are no longer mapped), and removing anon_vma_chain 1206 * links from it (if the entire VMA was copied over). 1207 */ 1208 static void dontunmap_complete(struct vma_remap_struct *vrm, 1209 struct vm_area_struct *new_vma) 1210 { 1211 unsigned long start = vrm->addr; 1212 unsigned long end = vrm->addr + vrm->old_len; 1213 unsigned long old_start = vrm->vma->vm_start; 1214 unsigned long old_end = vrm->vma->vm_end; 1215 1216 /* 1217 * We always clear VM_LOCKED[ONFAULT] | VM_ACCOUNT on the old 1218 * vma. 1219 */ 1220 vm_flags_clear(vrm->vma, VM_LOCKED_MASK | VM_ACCOUNT); 1221 1222 /* 1223 * anon_vma links of the old vma is no longer needed after its page 1224 * table has been moved. 1225 */ 1226 if (new_vma != vrm->vma && start == old_start && end == old_end) 1227 unlink_anon_vmas(vrm->vma); 1228 1229 /* Because we won't unmap we don't need to touch locked_vm. */ 1230 } 1231 1232 static unsigned long move_vma(struct vma_remap_struct *vrm) 1233 { 1234 struct mm_struct *mm = current->mm; 1235 struct vm_area_struct *new_vma; 1236 unsigned long hiwater_vm; 1237 int err; 1238 1239 err = prep_move_vma(vrm); 1240 if (err) 1241 return err; 1242 1243 /* If accounted, charge the number of bytes the operation will use. */ 1244 if (!vrm_charge(vrm)) 1245 return -ENOMEM; 1246 1247 /* We don't want racing faults. */ 1248 vma_start_write(vrm->vma); 1249 1250 /* Perform copy step. */ 1251 err = copy_vma_and_data(vrm, &new_vma); 1252 /* 1253 * If we established the copied-to VMA, we attempt to recover from the 1254 * error by setting the destination VMA to the source VMA and unmapping 1255 * it below. 1256 */ 1257 if (err && !new_vma) 1258 return err; 1259 1260 /* 1261 * If we failed to move page tables we still do total_vm increment 1262 * since do_munmap() will decrement it by old_len == new_len. 1263 * 1264 * Since total_vm is about to be raised artificially high for a 1265 * moment, we need to restore high watermark afterwards: if stats 1266 * are taken meanwhile, total_vm and hiwater_vm appear too high. 1267 * If this were a serious issue, we'd add a flag to do_munmap(). 1268 */ 1269 hiwater_vm = mm->hiwater_vm; 1270 1271 vrm_stat_account(vrm, vrm->new_len); 1272 if (unlikely(!err && (vrm->flags & MREMAP_DONTUNMAP))) 1273 dontunmap_complete(vrm, new_vma); 1274 else 1275 unmap_source_vma(vrm); 1276 1277 mm->hiwater_vm = hiwater_vm; 1278 1279 return err ? (unsigned long)err : vrm->new_addr; 1280 } 1281 1282 /* 1283 * resize_is_valid() - Ensure the vma can be resized to the new length at the give 1284 * address. 1285 * 1286 * Return 0 on success, error otherwise. 1287 */ 1288 static int resize_is_valid(struct vma_remap_struct *vrm) 1289 { 1290 struct mm_struct *mm = current->mm; 1291 struct vm_area_struct *vma = vrm->vma; 1292 unsigned long addr = vrm->addr; 1293 unsigned long old_len = vrm->old_len; 1294 unsigned long new_len = vrm->new_len; 1295 unsigned long pgoff; 1296 1297 /* 1298 * !old_len is a special case where an attempt is made to 'duplicate' 1299 * a mapping. This makes no sense for private mappings as it will 1300 * instead create a fresh/new mapping unrelated to the original. This 1301 * is contrary to the basic idea of mremap which creates new mappings 1302 * based on the original. There are no known use cases for this 1303 * behavior. As a result, fail such attempts. 1304 */ 1305 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { 1306 pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", 1307 current->comm, current->pid); 1308 return -EINVAL; 1309 } 1310 1311 if ((vrm->flags & MREMAP_DONTUNMAP) && 1312 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))) 1313 return -EINVAL; 1314 1315 /* We can't remap across vm area boundaries */ 1316 if (old_len > vma->vm_end - addr) 1317 return -EFAULT; 1318 1319 if (new_len == old_len) 1320 return 0; 1321 1322 /* Need to be careful about a growing mapping */ 1323 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; 1324 pgoff += vma->vm_pgoff; 1325 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) 1326 return -EINVAL; 1327 1328 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) 1329 return -EFAULT; 1330 1331 if (!mlock_future_ok(mm, vma->vm_flags, vrm->delta)) 1332 return -EAGAIN; 1333 1334 if (!may_expand_vm(mm, vma->vm_flags, vrm->delta >> PAGE_SHIFT)) 1335 return -ENOMEM; 1336 1337 return 0; 1338 } 1339 1340 /* 1341 * The user has requested that the VMA be shrunk (i.e., old_len > new_len), so 1342 * execute this, optionally dropping the mmap lock when we do so. 1343 * 1344 * In both cases this invalidates the VMA, however if we don't drop the lock, 1345 * then load the correct VMA into vrm->vma afterwards. 1346 */ 1347 static unsigned long shrink_vma(struct vma_remap_struct *vrm, 1348 bool drop_lock) 1349 { 1350 struct mm_struct *mm = current->mm; 1351 unsigned long unmap_start = vrm->addr + vrm->new_len; 1352 unsigned long unmap_bytes = vrm->delta; 1353 unsigned long res; 1354 VMA_ITERATOR(vmi, mm, unmap_start); 1355 1356 VM_BUG_ON(vrm->remap_type != MREMAP_SHRINK); 1357 1358 res = do_vmi_munmap(&vmi, mm, unmap_start, unmap_bytes, 1359 vrm->uf_unmap, drop_lock); 1360 vrm->vma = NULL; /* Invalidated. */ 1361 if (res) 1362 return res; 1363 1364 /* 1365 * If we've not dropped the lock, then we should reload the VMA to 1366 * replace the invalidated VMA with the one that may have now been 1367 * split. 1368 */ 1369 if (drop_lock) { 1370 vrm->mmap_locked = false; 1371 } else { 1372 vrm->vma = vma_lookup(mm, vrm->addr); 1373 if (!vrm->vma) 1374 return -EFAULT; 1375 } 1376 1377 return 0; 1378 } 1379 1380 /* 1381 * mremap_to() - remap a vma to a new location. 1382 * Returns: The new address of the vma or an error. 1383 */ 1384 static unsigned long mremap_to(struct vma_remap_struct *vrm) 1385 { 1386 struct mm_struct *mm = current->mm; 1387 unsigned long err; 1388 1389 /* Is the new length or address silly? */ 1390 if (vrm->new_len > TASK_SIZE || 1391 vrm->new_addr > TASK_SIZE - vrm->new_len) 1392 return -EINVAL; 1393 1394 if (vrm_overlaps(vrm)) 1395 return -EINVAL; 1396 1397 if (vrm->flags & MREMAP_FIXED) { 1398 /* 1399 * In mremap_to(). 1400 * VMA is moved to dst address, and munmap dst first. 1401 * do_munmap will check if dst is sealed. 1402 */ 1403 err = do_munmap(mm, vrm->new_addr, vrm->new_len, 1404 vrm->uf_unmap_early); 1405 vrm->vma = NULL; /* Invalidated. */ 1406 if (err) 1407 return err; 1408 1409 /* 1410 * If we remap a portion of a VMA elsewhere in the same VMA, 1411 * this can invalidate the old VMA. Reset. 1412 */ 1413 vrm->vma = vma_lookup(mm, vrm->addr); 1414 if (!vrm->vma) 1415 return -EFAULT; 1416 } 1417 1418 if (vrm->remap_type == MREMAP_SHRINK) { 1419 err = shrink_vma(vrm, /* drop_lock= */false); 1420 if (err) 1421 return err; 1422 1423 /* Set up for the move now shrink has been executed. */ 1424 vrm->old_len = vrm->new_len; 1425 } 1426 1427 err = resize_is_valid(vrm); 1428 if (err) 1429 return err; 1430 1431 /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */ 1432 if (vrm->flags & MREMAP_DONTUNMAP) { 1433 vm_flags_t vm_flags = vrm->vma->vm_flags; 1434 unsigned long pages = vrm->old_len >> PAGE_SHIFT; 1435 1436 if (!may_expand_vm(mm, vm_flags, pages)) 1437 return -ENOMEM; 1438 } 1439 1440 err = vrm_set_new_addr(vrm); 1441 if (err) 1442 return err; 1443 1444 return move_vma(vrm); 1445 } 1446 1447 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) 1448 { 1449 unsigned long end = vma->vm_end + delta; 1450 1451 if (end < vma->vm_end) /* overflow */ 1452 return 0; 1453 if (find_vma_intersection(vma->vm_mm, vma->vm_end, end)) 1454 return 0; 1455 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 1456 0, MAP_FIXED) & ~PAGE_MASK) 1457 return 0; 1458 return 1; 1459 } 1460 1461 /* Determine whether we are actually able to execute an in-place expansion. */ 1462 static bool vrm_can_expand_in_place(struct vma_remap_struct *vrm) 1463 { 1464 /* Number of bytes from vrm->addr to end of VMA. */ 1465 unsigned long suffix_bytes = vrm->vma->vm_end - vrm->addr; 1466 1467 /* If end of range aligns to end of VMA, we can just expand in-place. */ 1468 if (suffix_bytes != vrm->old_len) 1469 return false; 1470 1471 /* Check whether this is feasible. */ 1472 if (!vma_expandable(vrm->vma, vrm->delta)) 1473 return false; 1474 1475 return true; 1476 } 1477 1478 /* 1479 * Are the parameters passed to mremap() valid? If so return 0, otherwise return 1480 * error. 1481 */ 1482 static unsigned long check_mremap_params(struct vma_remap_struct *vrm) 1483 1484 { 1485 unsigned long addr = vrm->addr; 1486 unsigned long flags = vrm->flags; 1487 1488 /* Ensure no unexpected flag values. */ 1489 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) 1490 return -EINVAL; 1491 1492 /* Start address must be page-aligned. */ 1493 if (offset_in_page(addr)) 1494 return -EINVAL; 1495 1496 /* 1497 * We allow a zero old-len as a special case 1498 * for DOS-emu "duplicate shm area" thing. But 1499 * a zero new-len is nonsensical. 1500 */ 1501 if (!PAGE_ALIGN(vrm->new_len)) 1502 return -EINVAL; 1503 1504 /* Remainder of checks are for cases with specific new_addr. */ 1505 if (!vrm_implies_new_addr(vrm)) 1506 return 0; 1507 1508 /* The new address must be page-aligned. */ 1509 if (offset_in_page(vrm->new_addr)) 1510 return -EINVAL; 1511 1512 /* A fixed address implies a move. */ 1513 if (!(flags & MREMAP_MAYMOVE)) 1514 return -EINVAL; 1515 1516 /* MREMAP_DONTUNMAP does not allow resizing in the process. */ 1517 if (flags & MREMAP_DONTUNMAP && vrm->old_len != vrm->new_len) 1518 return -EINVAL; 1519 1520 /* 1521 * move_vma() need us to stay 4 maps below the threshold, otherwise 1522 * it will bail out at the very beginning. 1523 * That is a problem if we have already unmaped the regions here 1524 * (new_addr, and old_addr), because userspace will not know the 1525 * state of the vma's after it gets -ENOMEM. 1526 * So, to avoid such scenario we can pre-compute if the whole 1527 * operation has high chances to success map-wise. 1528 * Worst-scenario case is when both vma's (new_addr and old_addr) get 1529 * split in 3 before unmapping it. 1530 * That means 2 more maps (1 for each) to the ones we already hold. 1531 * Check whether current map count plus 2 still leads us to 4 maps below 1532 * the threshold, otherwise return -ENOMEM here to be more safe. 1533 */ 1534 if ((current->mm->map_count + 2) >= sysctl_max_map_count - 3) 1535 return -ENOMEM; 1536 1537 return 0; 1538 } 1539 1540 /* 1541 * We know we can expand the VMA in-place by delta pages, so do so. 1542 * 1543 * If we discover the VMA is locked, update mm_struct statistics accordingly and 1544 * indicate so to the caller. 1545 */ 1546 static unsigned long expand_vma_in_place(struct vma_remap_struct *vrm) 1547 { 1548 struct mm_struct *mm = current->mm; 1549 struct vm_area_struct *vma = vrm->vma; 1550 VMA_ITERATOR(vmi, mm, vma->vm_end); 1551 1552 if (!vrm_charge(vrm)) 1553 return -ENOMEM; 1554 1555 /* 1556 * Function vma_merge_extend() is called on the 1557 * extension we are adding to the already existing vma, 1558 * vma_merge_extend() will merge this extension with the 1559 * already existing vma (expand operation itself) and 1560 * possibly also with the next vma if it becomes 1561 * adjacent to the expanded vma and otherwise 1562 * compatible. 1563 */ 1564 vma = vma_merge_extend(&vmi, vma, vrm->delta); 1565 if (!vma) { 1566 vrm_uncharge(vrm); 1567 return -ENOMEM; 1568 } 1569 vrm->vma = vma; 1570 1571 vrm_stat_account(vrm, vrm->delta); 1572 1573 return 0; 1574 } 1575 1576 static bool align_hugetlb(struct vma_remap_struct *vrm) 1577 { 1578 struct hstate *h __maybe_unused = hstate_vma(vrm->vma); 1579 1580 vrm->old_len = ALIGN(vrm->old_len, huge_page_size(h)); 1581 vrm->new_len = ALIGN(vrm->new_len, huge_page_size(h)); 1582 1583 /* addrs must be huge page aligned */ 1584 if (vrm->addr & ~huge_page_mask(h)) 1585 return false; 1586 if (vrm->new_addr & ~huge_page_mask(h)) 1587 return false; 1588 1589 /* 1590 * Don't allow remap expansion, because the underlying hugetlb 1591 * reservation is not yet capable to handle split reservation. 1592 */ 1593 if (vrm->new_len > vrm->old_len) 1594 return false; 1595 1596 vrm_set_delta(vrm); 1597 1598 return true; 1599 } 1600 1601 /* 1602 * We are mremap()'ing without specifying a fixed address to move to, but are 1603 * requesting that the VMA's size be increased. 1604 * 1605 * Try to do so in-place, if this fails, then move the VMA to a new location to 1606 * action the change. 1607 */ 1608 static unsigned long expand_vma(struct vma_remap_struct *vrm) 1609 { 1610 unsigned long err; 1611 unsigned long addr = vrm->addr; 1612 1613 err = resize_is_valid(vrm); 1614 if (err) 1615 return err; 1616 1617 /* 1618 * [addr, old_len) spans precisely to the end of the VMA, so try to 1619 * expand it in-place. 1620 */ 1621 if (vrm_can_expand_in_place(vrm)) { 1622 err = expand_vma_in_place(vrm); 1623 if (err) 1624 return err; 1625 1626 /* 1627 * We want to populate the newly expanded portion of the VMA to 1628 * satisfy the expectation that mlock()'ing a VMA maintains all 1629 * of its pages in memory. 1630 */ 1631 if (vrm->mlocked) 1632 vrm->new_addr = addr; 1633 1634 /* OK we're done! */ 1635 return addr; 1636 } 1637 1638 /* 1639 * We weren't able to just expand or shrink the area, 1640 * we need to create a new one and move it. 1641 */ 1642 1643 /* We're not allowed to move the VMA, so error out. */ 1644 if (!(vrm->flags & MREMAP_MAYMOVE)) 1645 return -ENOMEM; 1646 1647 /* Find a new location to move the VMA to. */ 1648 err = vrm_set_new_addr(vrm); 1649 if (err) 1650 return err; 1651 1652 return move_vma(vrm); 1653 } 1654 1655 /* 1656 * Attempt to resize the VMA in-place, if we cannot, then move the VMA to the 1657 * first available address to perform the operation. 1658 */ 1659 static unsigned long mremap_at(struct vma_remap_struct *vrm) 1660 { 1661 unsigned long res; 1662 1663 switch (vrm->remap_type) { 1664 case MREMAP_INVALID: 1665 break; 1666 case MREMAP_NO_RESIZE: 1667 /* NO-OP CASE - resizing to the same size. */ 1668 return vrm->addr; 1669 case MREMAP_SHRINK: 1670 /* 1671 * SHRINK CASE. Can always be done in-place. 1672 * 1673 * Simply unmap the shrunken portion of the VMA. This does all 1674 * the needed commit accounting, and we indicate that the mmap 1675 * lock should be dropped. 1676 */ 1677 res = shrink_vma(vrm, /* drop_lock= */true); 1678 if (res) 1679 return res; 1680 1681 return vrm->addr; 1682 case MREMAP_EXPAND: 1683 return expand_vma(vrm); 1684 } 1685 1686 BUG(); 1687 } 1688 1689 static unsigned long do_mremap(struct vma_remap_struct *vrm) 1690 { 1691 struct mm_struct *mm = current->mm; 1692 struct vm_area_struct *vma; 1693 unsigned long ret; 1694 1695 ret = check_mremap_params(vrm); 1696 if (ret) 1697 return ret; 1698 1699 vrm->old_len = PAGE_ALIGN(vrm->old_len); 1700 vrm->new_len = PAGE_ALIGN(vrm->new_len); 1701 vrm_set_delta(vrm); 1702 1703 if (mmap_write_lock_killable(mm)) 1704 return -EINTR; 1705 vrm->mmap_locked = true; 1706 1707 vma = vrm->vma = vma_lookup(mm, vrm->addr); 1708 if (!vma) { 1709 ret = -EFAULT; 1710 goto out; 1711 } 1712 1713 /* If mseal()'d, mremap() is prohibited. */ 1714 if (!can_modify_vma(vma)) { 1715 ret = -EPERM; 1716 goto out; 1717 } 1718 1719 /* Align to hugetlb page size, if required. */ 1720 if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm)) { 1721 ret = -EINVAL; 1722 goto out; 1723 } 1724 1725 vrm->remap_type = vrm_remap_type(vrm); 1726 1727 /* Actually execute mremap. */ 1728 ret = vrm_implies_new_addr(vrm) ? mremap_to(vrm) : mremap_at(vrm); 1729 1730 out: 1731 if (vrm->mmap_locked) { 1732 mmap_write_unlock(mm); 1733 vrm->mmap_locked = false; 1734 1735 if (!offset_in_page(ret) && vrm->mlocked && vrm->new_len > vrm->old_len) 1736 mm_populate(vrm->new_addr + vrm->old_len, vrm->delta); 1737 } 1738 1739 userfaultfd_unmap_complete(mm, vrm->uf_unmap_early); 1740 mremap_userfaultfd_complete(vrm->uf, vrm->addr, ret, vrm->old_len); 1741 userfaultfd_unmap_complete(mm, vrm->uf_unmap); 1742 1743 return ret; 1744 } 1745 1746 /* 1747 * Expand (or shrink) an existing mapping, potentially moving it at the 1748 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) 1749 * 1750 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise 1751 * This option implies MREMAP_MAYMOVE. 1752 */ 1753 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, 1754 unsigned long, new_len, unsigned long, flags, 1755 unsigned long, new_addr) 1756 { 1757 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; 1758 LIST_HEAD(uf_unmap_early); 1759 LIST_HEAD(uf_unmap); 1760 /* 1761 * There is a deliberate asymmetry here: we strip the pointer tag 1762 * from the old address but leave the new address alone. This is 1763 * for consistency with mmap(), where we prevent the creation of 1764 * aliasing mappings in userspace by leaving the tag bits of the 1765 * mapping address intact. A non-zero tag will cause the subsequent 1766 * range checks to reject the address as invalid. 1767 * 1768 * See Documentation/arch/arm64/tagged-address-abi.rst for more 1769 * information. 1770 */ 1771 struct vma_remap_struct vrm = { 1772 .addr = untagged_addr(addr), 1773 .old_len = old_len, 1774 .new_len = new_len, 1775 .flags = flags, 1776 .new_addr = new_addr, 1777 1778 .uf = &uf, 1779 .uf_unmap_early = &uf_unmap_early, 1780 .uf_unmap = &uf_unmap, 1781 1782 .remap_type = MREMAP_INVALID, /* We set later. */ 1783 }; 1784 1785 return do_mremap(&vrm); 1786 } 1787