1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * mm/mremap.c 4 * 5 * (C) Copyright 1996 Linus Torvalds 6 * 7 * Address space accounting code <alan@lxorguk.ukuu.org.uk> 8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/mm_inline.h> 13 #include <linux/hugetlb.h> 14 #include <linux/shm.h> 15 #include <linux/ksm.h> 16 #include <linux/mman.h> 17 #include <linux/swap.h> 18 #include <linux/capability.h> 19 #include <linux/fs.h> 20 #include <linux/leafops.h> 21 #include <linux/highmem.h> 22 #include <linux/security.h> 23 #include <linux/syscalls.h> 24 #include <linux/mmu_notifier.h> 25 #include <linux/uaccess.h> 26 #include <linux/userfaultfd_k.h> 27 #include <linux/mempolicy.h> 28 #include <linux/pgalloc.h> 29 30 #include <asm/cacheflush.h> 31 #include <asm/tlb.h> 32 33 #include "internal.h" 34 35 /* Classify the kind of remap operation being performed. */ 36 enum mremap_type { 37 MREMAP_INVALID, /* Initial state. */ 38 MREMAP_NO_RESIZE, /* old_len == new_len, if not moved, do nothing. */ 39 MREMAP_SHRINK, /* old_len > new_len. */ 40 MREMAP_EXPAND, /* old_len < new_len. */ 41 }; 42 43 /* 44 * Describes a VMA mremap() operation and is threaded throughout it. 45 * 46 * Any of the fields may be mutated by the operation, however these values will 47 * always accurately reflect the remap (for instance, we may adjust lengths and 48 * delta to account for hugetlb alignment). 49 */ 50 struct vma_remap_struct { 51 /* User-provided state. */ 52 unsigned long addr; /* User-specified address from which we remap. */ 53 unsigned long old_len; /* Length of range being remapped. */ 54 unsigned long new_len; /* Desired new length of mapping. */ 55 const unsigned long flags; /* user-specified MREMAP_* flags. */ 56 unsigned long new_addr; /* Optionally, desired new address. */ 57 58 /* uffd state. */ 59 struct vm_userfaultfd_ctx *uf; 60 struct list_head *uf_unmap_early; 61 struct list_head *uf_unmap; 62 63 /* VMA state, determined in do_mremap(). */ 64 struct vm_area_struct *vma; 65 66 /* Internal state, determined in do_mremap(). */ 67 unsigned long delta; /* Absolute delta of old_len,new_len. */ 68 bool populate_expand; /* mlock()'d expanded, must populate. */ 69 enum mremap_type remap_type; /* expand, shrink, etc. */ 70 bool mmap_locked; /* Is mm currently write-locked? */ 71 unsigned long charged; /* If VM_ACCOUNT, # pages to account. */ 72 bool vmi_needs_invalidate; /* Is the VMA iterator invalidated? */ 73 }; 74 75 static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr) 76 { 77 pgd_t *pgd; 78 p4d_t *p4d; 79 pud_t *pud; 80 81 pgd = pgd_offset(mm, addr); 82 if (pgd_none_or_clear_bad(pgd)) 83 return NULL; 84 85 p4d = p4d_offset(pgd, addr); 86 if (p4d_none_or_clear_bad(p4d)) 87 return NULL; 88 89 pud = pud_offset(p4d, addr); 90 if (pud_none_or_clear_bad(pud)) 91 return NULL; 92 93 return pud; 94 } 95 96 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) 97 { 98 pud_t *pud; 99 pmd_t *pmd; 100 101 pud = get_old_pud(mm, addr); 102 if (!pud) 103 return NULL; 104 105 pmd = pmd_offset(pud, addr); 106 if (pmd_none(*pmd)) 107 return NULL; 108 109 return pmd; 110 } 111 112 static pud_t *alloc_new_pud(struct mm_struct *mm, unsigned long addr) 113 { 114 pgd_t *pgd; 115 p4d_t *p4d; 116 117 pgd = pgd_offset(mm, addr); 118 p4d = p4d_alloc(mm, pgd, addr); 119 if (!p4d) 120 return NULL; 121 122 return pud_alloc(mm, p4d, addr); 123 } 124 125 static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr) 126 { 127 pud_t *pud; 128 pmd_t *pmd; 129 130 pud = alloc_new_pud(mm, addr); 131 if (!pud) 132 return NULL; 133 134 pmd = pmd_alloc(mm, pud, addr); 135 if (!pmd) 136 return NULL; 137 138 VM_BUG_ON(pmd_trans_huge(*pmd)); 139 140 return pmd; 141 } 142 143 static void take_rmap_locks(struct vm_area_struct *vma) 144 { 145 if (vma->vm_file) 146 i_mmap_lock_write(vma->vm_file->f_mapping); 147 if (vma->anon_vma) 148 anon_vma_lock_write(vma->anon_vma); 149 } 150 151 static void drop_rmap_locks(struct vm_area_struct *vma) 152 { 153 if (vma->anon_vma) 154 anon_vma_unlock_write(vma->anon_vma); 155 if (vma->vm_file) 156 i_mmap_unlock_write(vma->vm_file->f_mapping); 157 } 158 159 static pte_t move_soft_dirty_pte(pte_t pte) 160 { 161 if (pte_none(pte)) 162 return pte; 163 164 /* 165 * Set soft dirty bit so we can notice 166 * in userspace the ptes were moved. 167 */ 168 if (pgtable_supports_soft_dirty()) { 169 if (pte_present(pte)) 170 pte = pte_mksoft_dirty(pte); 171 else 172 pte = pte_swp_mksoft_dirty(pte); 173 } 174 175 return pte; 176 } 177 178 static int mremap_folio_pte_batch(struct vm_area_struct *vma, unsigned long addr, 179 pte_t *ptep, pte_t pte, int max_nr) 180 { 181 struct folio *folio; 182 183 if (max_nr == 1) 184 return 1; 185 186 /* Avoid expensive folio lookup if we stand no chance of benefit. */ 187 if (pte_batch_hint(ptep, pte) == 1) 188 return 1; 189 190 folio = vm_normal_folio(vma, addr, pte); 191 if (!folio || !folio_test_large(folio)) 192 return 1; 193 194 return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr, FPB_RESPECT_WRITE); 195 } 196 197 static int move_ptes(struct pagetable_move_control *pmc, 198 unsigned long extent, pmd_t *old_pmd, pmd_t *new_pmd) 199 { 200 struct vm_area_struct *vma = pmc->old; 201 bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma); 202 struct mm_struct *mm = vma->vm_mm; 203 pte_t *old_ptep, *new_ptep; 204 pte_t old_pte, pte; 205 pmd_t dummy_pmdval; 206 spinlock_t *old_ptl, *new_ptl; 207 bool force_flush = false; 208 unsigned long old_addr = pmc->old_addr; 209 unsigned long new_addr = pmc->new_addr; 210 unsigned long old_end = old_addr + extent; 211 unsigned long len = old_end - old_addr; 212 int max_nr_ptes; 213 int nr_ptes; 214 int err = 0; 215 216 /* 217 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma 218 * locks to ensure that rmap will always observe either the old or the 219 * new ptes. This is the easiest way to avoid races with 220 * truncate_pagecache(), page migration, etc... 221 * 222 * When need_rmap_locks is false, we use other ways to avoid 223 * such races: 224 * 225 * - During exec() shift_arg_pages(), we use a specially tagged vma 226 * which rmap call sites look for using vma_is_temporary_stack(). 227 * 228 * - During mremap(), new_vma is often known to be placed after vma 229 * in rmap traversal order. This ensures rmap will always observe 230 * either the old pte, or the new pte, or both (the page table locks 231 * serialize access to individual ptes, but only rmap traversal 232 * order guarantees that we won't miss both the old and new ptes). 233 */ 234 if (pmc->need_rmap_locks) 235 take_rmap_locks(vma); 236 237 /* 238 * We don't have to worry about the ordering of src and dst 239 * pte locks because exclusive mmap_lock prevents deadlock. 240 */ 241 old_ptep = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); 242 if (!old_ptep) { 243 err = -EAGAIN; 244 goto out; 245 } 246 /* 247 * Now new_pte is none, so collapse_scan_file() path can not find 248 * this by traversing file->f_mapping, so there is no concurrency with 249 * retract_page_tables(). In addition, we already hold the exclusive 250 * mmap_lock, so this new_pte page is stable, so there is no need to get 251 * pmdval and do pmd_same() check. 252 */ 253 new_ptep = pte_offset_map_rw_nolock(mm, new_pmd, new_addr, &dummy_pmdval, 254 &new_ptl); 255 if (!new_ptep) { 256 pte_unmap_unlock(old_ptep, old_ptl); 257 err = -EAGAIN; 258 goto out; 259 } 260 if (new_ptl != old_ptl) 261 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 262 flush_tlb_batched_pending(vma->vm_mm); 263 lazy_mmu_mode_enable(); 264 265 for (; old_addr < old_end; old_ptep += nr_ptes, old_addr += nr_ptes * PAGE_SIZE, 266 new_ptep += nr_ptes, new_addr += nr_ptes * PAGE_SIZE) { 267 VM_WARN_ON_ONCE(!pte_none(*new_ptep)); 268 269 nr_ptes = 1; 270 max_nr_ptes = (old_end - old_addr) >> PAGE_SHIFT; 271 old_pte = ptep_get(old_ptep); 272 if (pte_none(old_pte)) 273 continue; 274 275 /* 276 * If we are remapping a valid PTE, make sure 277 * to flush TLB before we drop the PTL for the 278 * PTE. 279 * 280 * NOTE! Both old and new PTL matter: the old one 281 * for racing with folio_mkclean(), the new one to 282 * make sure the physical page stays valid until 283 * the TLB entry for the old mapping has been 284 * flushed. 285 */ 286 if (pte_present(old_pte)) { 287 nr_ptes = mremap_folio_pte_batch(vma, old_addr, old_ptep, 288 old_pte, max_nr_ptes); 289 force_flush = true; 290 } 291 pte = get_and_clear_ptes(mm, old_addr, old_ptep, nr_ptes); 292 pte = move_pte(pte, old_addr, new_addr); 293 pte = move_soft_dirty_pte(pte); 294 295 if (need_clear_uffd_wp && pte_is_uffd_wp_marker(pte)) 296 pte_clear(mm, new_addr, new_ptep); 297 else { 298 if (need_clear_uffd_wp) { 299 if (pte_present(pte)) 300 pte = pte_clear_uffd_wp(pte); 301 else 302 pte = pte_swp_clear_uffd_wp(pte); 303 } 304 set_ptes(mm, new_addr, new_ptep, pte, nr_ptes); 305 } 306 } 307 308 lazy_mmu_mode_disable(); 309 if (force_flush) 310 flush_tlb_range(vma, old_end - len, old_end); 311 if (new_ptl != old_ptl) 312 spin_unlock(new_ptl); 313 pte_unmap(new_ptep - 1); 314 pte_unmap_unlock(old_ptep - 1, old_ptl); 315 out: 316 if (pmc->need_rmap_locks) 317 drop_rmap_locks(vma); 318 return err; 319 } 320 321 #ifndef arch_supports_page_table_move 322 #define arch_supports_page_table_move arch_supports_page_table_move 323 static inline bool arch_supports_page_table_move(void) 324 { 325 return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) || 326 IS_ENABLED(CONFIG_HAVE_MOVE_PUD); 327 } 328 #endif 329 330 static inline bool uffd_supports_page_table_move(struct pagetable_move_control *pmc) 331 { 332 /* 333 * If we are moving a VMA that has uffd-wp registered but with 334 * remap events disabled (new VMA will not be registered with uffd), we 335 * need to ensure that the uffd-wp state is cleared from all pgtables. 336 * This means recursing into lower page tables in move_page_tables(). 337 * 338 * We might get called with VMAs reversed when recovering from a 339 * failed page table move. In that case, the 340 * "old"-but-actually-"originally new" VMA during recovery will not have 341 * a uffd context. Recursing into lower page tables during the original 342 * move but not during the recovery move will cause trouble, because we 343 * run into already-existing page tables. So check both VMAs. 344 */ 345 return !vma_has_uffd_without_event_remap(pmc->old) && 346 !vma_has_uffd_without_event_remap(pmc->new); 347 } 348 349 #ifdef CONFIG_HAVE_MOVE_PMD 350 static bool move_normal_pmd(struct pagetable_move_control *pmc, 351 pmd_t *old_pmd, pmd_t *new_pmd) 352 { 353 spinlock_t *old_ptl, *new_ptl; 354 struct vm_area_struct *vma = pmc->old; 355 struct mm_struct *mm = vma->vm_mm; 356 bool res = false; 357 pmd_t pmd; 358 359 if (!arch_supports_page_table_move()) 360 return false; 361 if (!uffd_supports_page_table_move(pmc)) 362 return false; 363 /* 364 * The destination pmd shouldn't be established, free_pgtables() 365 * should have released it. 366 * 367 * However, there's a case during execve() where we use mremap 368 * to move the initial stack, and in that case the target area 369 * may overlap the source area (always moving down). 370 * 371 * If everything is PMD-aligned, that works fine, as moving 372 * each pmd down will clear the source pmd. But if we first 373 * have a few 4kB-only pages that get moved down, and then 374 * hit the "now the rest is PMD-aligned, let's do everything 375 * one pmd at a time", we will still have the old (now empty 376 * of any 4kB pages, but still there) PMD in the page table 377 * tree. 378 * 379 * Warn on it once - because we really should try to figure 380 * out how to do this better - but then say "I won't move 381 * this pmd". 382 * 383 * One alternative might be to just unmap the target pmd at 384 * this point, and verify that it really is empty. We'll see. 385 */ 386 if (WARN_ON_ONCE(!pmd_none(*new_pmd))) 387 return false; 388 389 /* 390 * We don't have to worry about the ordering of src and dst 391 * ptlocks because exclusive mmap_lock prevents deadlock. 392 */ 393 old_ptl = pmd_lock(mm, old_pmd); 394 new_ptl = pmd_lockptr(mm, new_pmd); 395 if (new_ptl != old_ptl) 396 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 397 398 pmd = *old_pmd; 399 400 /* Racing with collapse? */ 401 if (unlikely(!pmd_present(pmd) || pmd_leaf(pmd))) 402 goto out_unlock; 403 /* Clear the pmd */ 404 pmd_clear(old_pmd); 405 res = true; 406 407 VM_BUG_ON(!pmd_none(*new_pmd)); 408 409 pmd_populate(mm, new_pmd, pmd_pgtable(pmd)); 410 flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PMD_SIZE); 411 out_unlock: 412 if (new_ptl != old_ptl) 413 spin_unlock(new_ptl); 414 spin_unlock(old_ptl); 415 416 return res; 417 } 418 #else 419 static inline bool move_normal_pmd(struct pagetable_move_control *pmc, 420 pmd_t *old_pmd, pmd_t *new_pmd) 421 { 422 return false; 423 } 424 #endif 425 426 #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD) 427 static bool move_normal_pud(struct pagetable_move_control *pmc, 428 pud_t *old_pud, pud_t *new_pud) 429 { 430 spinlock_t *old_ptl, *new_ptl; 431 struct vm_area_struct *vma = pmc->old; 432 struct mm_struct *mm = vma->vm_mm; 433 pud_t pud; 434 435 if (!arch_supports_page_table_move()) 436 return false; 437 if (!uffd_supports_page_table_move(pmc)) 438 return false; 439 /* 440 * The destination pud shouldn't be established, free_pgtables() 441 * should have released it. 442 */ 443 if (WARN_ON_ONCE(!pud_none(*new_pud))) 444 return false; 445 446 /* 447 * We don't have to worry about the ordering of src and dst 448 * ptlocks because exclusive mmap_lock prevents deadlock. 449 */ 450 old_ptl = pud_lock(mm, old_pud); 451 new_ptl = pud_lockptr(mm, new_pud); 452 if (new_ptl != old_ptl) 453 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 454 455 /* Clear the pud */ 456 pud = *old_pud; 457 pud_clear(old_pud); 458 459 VM_BUG_ON(!pud_none(*new_pud)); 460 461 pud_populate(mm, new_pud, pud_pgtable(pud)); 462 flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PUD_SIZE); 463 if (new_ptl != old_ptl) 464 spin_unlock(new_ptl); 465 spin_unlock(old_ptl); 466 467 return true; 468 } 469 #else 470 static inline bool move_normal_pud(struct pagetable_move_control *pmc, 471 pud_t *old_pud, pud_t *new_pud) 472 { 473 return false; 474 } 475 #endif 476 477 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 478 static bool move_huge_pud(struct pagetable_move_control *pmc, 479 pud_t *old_pud, pud_t *new_pud) 480 { 481 spinlock_t *old_ptl, *new_ptl; 482 struct vm_area_struct *vma = pmc->old; 483 struct mm_struct *mm = vma->vm_mm; 484 pud_t pud; 485 486 /* 487 * The destination pud shouldn't be established, free_pgtables() 488 * should have released it. 489 */ 490 if (WARN_ON_ONCE(!pud_none(*new_pud))) 491 return false; 492 493 /* 494 * We don't have to worry about the ordering of src and dst 495 * ptlocks because exclusive mmap_lock prevents deadlock. 496 */ 497 old_ptl = pud_lock(mm, old_pud); 498 new_ptl = pud_lockptr(mm, new_pud); 499 if (new_ptl != old_ptl) 500 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 501 502 /* Clear the pud */ 503 pud = *old_pud; 504 pud_clear(old_pud); 505 506 VM_BUG_ON(!pud_none(*new_pud)); 507 508 /* Set the new pud */ 509 /* mark soft_ditry when we add pud level soft dirty support */ 510 set_pud_at(mm, pmc->new_addr, new_pud, pud); 511 flush_pud_tlb_range(vma, pmc->old_addr, pmc->old_addr + HPAGE_PUD_SIZE); 512 if (new_ptl != old_ptl) 513 spin_unlock(new_ptl); 514 spin_unlock(old_ptl); 515 516 return true; 517 } 518 #else 519 static bool move_huge_pud(struct pagetable_move_control *pmc, 520 pud_t *old_pud, pud_t *new_pud) 521 522 { 523 WARN_ON_ONCE(1); 524 return false; 525 526 } 527 #endif 528 529 enum pgt_entry { 530 NORMAL_PMD, 531 HPAGE_PMD, 532 NORMAL_PUD, 533 HPAGE_PUD, 534 }; 535 536 /* 537 * Returns an extent of the corresponding size for the pgt_entry specified if 538 * valid. Else returns a smaller extent bounded by the end of the source and 539 * destination pgt_entry. 540 */ 541 static __always_inline unsigned long get_extent(enum pgt_entry entry, 542 struct pagetable_move_control *pmc) 543 { 544 unsigned long next, extent, mask, size; 545 unsigned long old_addr = pmc->old_addr; 546 unsigned long old_end = pmc->old_end; 547 unsigned long new_addr = pmc->new_addr; 548 549 switch (entry) { 550 case HPAGE_PMD: 551 case NORMAL_PMD: 552 mask = PMD_MASK; 553 size = PMD_SIZE; 554 break; 555 case HPAGE_PUD: 556 case NORMAL_PUD: 557 mask = PUD_MASK; 558 size = PUD_SIZE; 559 break; 560 default: 561 BUILD_BUG(); 562 break; 563 } 564 565 next = (old_addr + size) & mask; 566 /* even if next overflowed, extent below will be ok */ 567 extent = next - old_addr; 568 if (extent > old_end - old_addr) 569 extent = old_end - old_addr; 570 next = (new_addr + size) & mask; 571 if (extent > next - new_addr) 572 extent = next - new_addr; 573 return extent; 574 } 575 576 /* 577 * Should move_pgt_entry() acquire the rmap locks? This is either expressed in 578 * the PMC, or overridden in the case of normal, larger page tables. 579 */ 580 static bool should_take_rmap_locks(struct pagetable_move_control *pmc, 581 enum pgt_entry entry) 582 { 583 switch (entry) { 584 case NORMAL_PMD: 585 case NORMAL_PUD: 586 return true; 587 default: 588 return pmc->need_rmap_locks; 589 } 590 } 591 592 /* 593 * Attempts to speedup the move by moving entry at the level corresponding to 594 * pgt_entry. Returns true if the move was successful, else false. 595 */ 596 static bool move_pgt_entry(struct pagetable_move_control *pmc, 597 enum pgt_entry entry, void *old_entry, void *new_entry) 598 { 599 bool moved = false; 600 bool need_rmap_locks = should_take_rmap_locks(pmc, entry); 601 602 /* See comment in move_ptes() */ 603 if (need_rmap_locks) 604 take_rmap_locks(pmc->old); 605 606 switch (entry) { 607 case NORMAL_PMD: 608 moved = move_normal_pmd(pmc, old_entry, new_entry); 609 break; 610 case NORMAL_PUD: 611 moved = move_normal_pud(pmc, old_entry, new_entry); 612 break; 613 case HPAGE_PMD: 614 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 615 move_huge_pmd(pmc->old, pmc->old_addr, pmc->new_addr, old_entry, 616 new_entry); 617 break; 618 case HPAGE_PUD: 619 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 620 move_huge_pud(pmc, old_entry, new_entry); 621 break; 622 623 default: 624 WARN_ON_ONCE(1); 625 break; 626 } 627 628 if (need_rmap_locks) 629 drop_rmap_locks(pmc->old); 630 631 return moved; 632 } 633 634 /* 635 * A helper to check if aligning down is OK. The aligned address should fall 636 * on *no mapping*. For the stack moving down, that's a special move within 637 * the VMA that is created to span the source and destination of the move, 638 * so we make an exception for it. 639 */ 640 static bool can_align_down(struct pagetable_move_control *pmc, 641 struct vm_area_struct *vma, unsigned long addr_to_align, 642 unsigned long mask) 643 { 644 unsigned long addr_masked = addr_to_align & mask; 645 646 /* 647 * If @addr_to_align of either source or destination is not the beginning 648 * of the corresponding VMA, we can't align down or we will destroy part 649 * of the current mapping. 650 */ 651 if (!pmc->for_stack && vma->vm_start != addr_to_align) 652 return false; 653 654 /* In the stack case we explicitly permit in-VMA alignment. */ 655 if (pmc->for_stack && addr_masked >= vma->vm_start) 656 return true; 657 658 /* 659 * Make sure the realignment doesn't cause the address to fall on an 660 * existing mapping. 661 */ 662 return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL; 663 } 664 665 /* 666 * Determine if are in fact able to realign for efficiency to a higher page 667 * table boundary. 668 */ 669 static bool can_realign_addr(struct pagetable_move_control *pmc, 670 unsigned long pagetable_mask) 671 { 672 unsigned long align_mask = ~pagetable_mask; 673 unsigned long old_align = pmc->old_addr & align_mask; 674 unsigned long new_align = pmc->new_addr & align_mask; 675 unsigned long pagetable_size = align_mask + 1; 676 unsigned long old_align_next = pagetable_size - old_align; 677 678 /* 679 * We don't want to have to go hunting for VMAs from the end of the old 680 * VMA to the next page table boundary, also we want to make sure the 681 * operation is worthwhile. 682 * 683 * So ensure that we only perform this realignment if the end of the 684 * range being copied reaches or crosses the page table boundary. 685 * 686 * boundary boundary 687 * .<- old_align -> . 688 * . |----------------.-----------| 689 * . | vma . | 690 * . |----------------.-----------| 691 * . <----------------.-----------> 692 * . len_in 693 * <-------------------------------> 694 * . pagetable_size . 695 * . <----------------> 696 * . old_align_next . 697 */ 698 if (pmc->len_in < old_align_next) 699 return false; 700 701 /* Skip if the addresses are already aligned. */ 702 if (old_align == 0) 703 return false; 704 705 /* Only realign if the new and old addresses are mutually aligned. */ 706 if (old_align != new_align) 707 return false; 708 709 /* Ensure realignment doesn't cause overlap with existing mappings. */ 710 if (!can_align_down(pmc, pmc->old, pmc->old_addr, pagetable_mask) || 711 !can_align_down(pmc, pmc->new, pmc->new_addr, pagetable_mask)) 712 return false; 713 714 return true; 715 } 716 717 /* 718 * Opportunistically realign to specified boundary for faster copy. 719 * 720 * Consider an mremap() of a VMA with page table boundaries as below, and no 721 * preceding VMAs from the lower page table boundary to the start of the VMA, 722 * with the end of the range reaching or crossing the page table boundary. 723 * 724 * boundary boundary 725 * . |----------------.-----------| 726 * . | vma . | 727 * . |----------------.-----------| 728 * . pmc->old_addr . pmc->old_end 729 * . <----------------------------> 730 * . move these page tables 731 * 732 * If we proceed with moving page tables in this scenario, we will have a lot of 733 * work to do traversing old page tables and establishing new ones in the 734 * destination across multiple lower level page tables. 735 * 736 * The idea here is simply to align pmc->old_addr, pmc->new_addr down to the 737 * page table boundary, so we can simply copy a single page table entry for the 738 * aligned portion of the VMA instead: 739 * 740 * boundary boundary 741 * . |----------------.-----------| 742 * . | vma . | 743 * . |----------------.-----------| 744 * pmc->old_addr . pmc->old_end 745 * <-------------------------------------------> 746 * . move these page tables 747 */ 748 static void try_realign_addr(struct pagetable_move_control *pmc, 749 unsigned long pagetable_mask) 750 { 751 752 if (!can_realign_addr(pmc, pagetable_mask)) 753 return; 754 755 /* 756 * Simply align to page table boundaries. Note that we do NOT update the 757 * pmc->old_end value, and since the move_page_tables() operation spans 758 * from [old_addr, old_end) (offsetting new_addr as it is performed), 759 * this simply changes the start of the copy, not the end. 760 */ 761 pmc->old_addr &= pagetable_mask; 762 pmc->new_addr &= pagetable_mask; 763 } 764 765 /* Is the page table move operation done? */ 766 static bool pmc_done(struct pagetable_move_control *pmc) 767 { 768 return pmc->old_addr >= pmc->old_end; 769 } 770 771 /* Advance to the next page table, offset by extent bytes. */ 772 static void pmc_next(struct pagetable_move_control *pmc, unsigned long extent) 773 { 774 pmc->old_addr += extent; 775 pmc->new_addr += extent; 776 } 777 778 /* 779 * Determine how many bytes in the specified input range have had their page 780 * tables moved so far. 781 */ 782 static unsigned long pmc_progress(struct pagetable_move_control *pmc) 783 { 784 unsigned long orig_old_addr = pmc->old_end - pmc->len_in; 785 unsigned long old_addr = pmc->old_addr; 786 787 /* 788 * Prevent negative return values when {old,new}_addr was realigned but 789 * we broke out of the loop in move_page_tables() for the first PMD 790 * itself. 791 */ 792 return old_addr < orig_old_addr ? 0 : old_addr - orig_old_addr; 793 } 794 795 unsigned long move_page_tables(struct pagetable_move_control *pmc) 796 { 797 unsigned long extent; 798 struct mmu_notifier_range range; 799 pmd_t *old_pmd, *new_pmd; 800 pud_t *old_pud, *new_pud; 801 struct mm_struct *mm = pmc->old->vm_mm; 802 803 if (!pmc->len_in) 804 return 0; 805 806 if (is_vm_hugetlb_page(pmc->old)) 807 return move_hugetlb_page_tables(pmc->old, pmc->new, pmc->old_addr, 808 pmc->new_addr, pmc->len_in); 809 810 /* 811 * If possible, realign addresses to PMD boundary for faster copy. 812 * Only realign if the mremap copying hits a PMD boundary. 813 */ 814 try_realign_addr(pmc, PMD_MASK); 815 816 flush_cache_range(pmc->old, pmc->old_addr, pmc->old_end); 817 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, mm, 818 pmc->old_addr, pmc->old_end); 819 mmu_notifier_invalidate_range_start(&range); 820 821 for (; !pmc_done(pmc); pmc_next(pmc, extent)) { 822 cond_resched(); 823 /* 824 * If extent is PUD-sized try to speed up the move by moving at the 825 * PUD level if possible. 826 */ 827 extent = get_extent(NORMAL_PUD, pmc); 828 829 old_pud = get_old_pud(mm, pmc->old_addr); 830 if (!old_pud) 831 continue; 832 new_pud = alloc_new_pud(mm, pmc->new_addr); 833 if (!new_pud) 834 break; 835 if (pud_trans_huge(*old_pud)) { 836 if (extent == HPAGE_PUD_SIZE) { 837 move_pgt_entry(pmc, HPAGE_PUD, old_pud, new_pud); 838 /* We ignore and continue on error? */ 839 continue; 840 } 841 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) { 842 if (move_pgt_entry(pmc, NORMAL_PUD, old_pud, new_pud)) 843 continue; 844 } 845 846 extent = get_extent(NORMAL_PMD, pmc); 847 old_pmd = get_old_pmd(mm, pmc->old_addr); 848 if (!old_pmd) 849 continue; 850 new_pmd = alloc_new_pmd(mm, pmc->new_addr); 851 if (!new_pmd) 852 break; 853 again: 854 if (pmd_is_huge(*old_pmd)) { 855 if (extent == HPAGE_PMD_SIZE && 856 move_pgt_entry(pmc, HPAGE_PMD, old_pmd, new_pmd)) 857 continue; 858 split_huge_pmd(pmc->old, old_pmd, pmc->old_addr); 859 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) && 860 extent == PMD_SIZE) { 861 /* 862 * If the extent is PMD-sized, try to speed the move by 863 * moving at the PMD level if possible. 864 */ 865 if (move_pgt_entry(pmc, NORMAL_PMD, old_pmd, new_pmd)) 866 continue; 867 } 868 if (pmd_none(*old_pmd)) 869 continue; 870 if (pte_alloc(pmc->new->vm_mm, new_pmd)) 871 break; 872 if (move_ptes(pmc, extent, old_pmd, new_pmd) < 0) 873 goto again; 874 } 875 876 mmu_notifier_invalidate_range_end(&range); 877 878 return pmc_progress(pmc); 879 } 880 881 /* Set vrm->delta to the difference in VMA size specified by user. */ 882 static void vrm_set_delta(struct vma_remap_struct *vrm) 883 { 884 vrm->delta = abs_diff(vrm->old_len, vrm->new_len); 885 } 886 887 /* Determine what kind of remap this is - shrink, expand or no resize at all. */ 888 static enum mremap_type vrm_remap_type(struct vma_remap_struct *vrm) 889 { 890 if (vrm->delta == 0) 891 return MREMAP_NO_RESIZE; 892 893 if (vrm->old_len > vrm->new_len) 894 return MREMAP_SHRINK; 895 896 return MREMAP_EXPAND; 897 } 898 899 /* 900 * When moving a VMA to vrm->new_adr, does this result in the new and old VMAs 901 * overlapping? 902 */ 903 static bool vrm_overlaps(struct vma_remap_struct *vrm) 904 { 905 unsigned long start_old = vrm->addr; 906 unsigned long start_new = vrm->new_addr; 907 unsigned long end_old = vrm->addr + vrm->old_len; 908 unsigned long end_new = vrm->new_addr + vrm->new_len; 909 910 /* 911 * start_old end_old 912 * |-----------| 913 * | | 914 * |-----------| 915 * |-------------| 916 * | | 917 * |-------------| 918 * start_new end_new 919 */ 920 if (end_old > start_new && end_new > start_old) 921 return true; 922 923 return false; 924 } 925 926 /* 927 * Will a new address definitely be assigned? This either if the user specifies 928 * it via MREMAP_FIXED, or if MREMAP_DONTUNMAP is used, indicating we will 929 * always determine a target address. 930 */ 931 static bool vrm_implies_new_addr(struct vma_remap_struct *vrm) 932 { 933 return vrm->flags & (MREMAP_FIXED | MREMAP_DONTUNMAP); 934 } 935 936 /* 937 * Find an unmapped area for the requested vrm->new_addr. 938 * 939 * If MREMAP_FIXED then this is equivalent to a MAP_FIXED mmap() call. If only 940 * MREMAP_DONTUNMAP is set, then this is equivalent to providing a hint to 941 * mmap(), otherwise this is equivalent to mmap() specifying a NULL address. 942 * 943 * Returns 0 on success (with vrm->new_addr updated), or an error code upon 944 * failure. 945 */ 946 static unsigned long vrm_set_new_addr(struct vma_remap_struct *vrm) 947 { 948 struct vm_area_struct *vma = vrm->vma; 949 unsigned long map_flags = 0; 950 /* Page Offset _into_ the VMA. */ 951 pgoff_t internal_pgoff = (vrm->addr - vma->vm_start) >> PAGE_SHIFT; 952 pgoff_t pgoff = vma->vm_pgoff + internal_pgoff; 953 unsigned long new_addr = vrm_implies_new_addr(vrm) ? vrm->new_addr : 0; 954 unsigned long res; 955 956 if (vrm->flags & MREMAP_FIXED) 957 map_flags |= MAP_FIXED; 958 if (vma->vm_flags & VM_MAYSHARE) 959 map_flags |= MAP_SHARED; 960 961 res = get_unmapped_area(vma->vm_file, new_addr, vrm->new_len, pgoff, 962 map_flags); 963 if (IS_ERR_VALUE(res)) 964 return res; 965 966 vrm->new_addr = res; 967 return 0; 968 } 969 970 /* 971 * Keep track of pages which have been added to the memory mapping. If the VMA 972 * is accounted, also check to see if there is sufficient memory. 973 * 974 * Returns true on success, false if insufficient memory to charge. 975 */ 976 static bool vrm_calc_charge(struct vma_remap_struct *vrm) 977 { 978 unsigned long charged; 979 980 if (!(vrm->vma->vm_flags & VM_ACCOUNT)) 981 return true; 982 983 /* 984 * If we don't unmap the old mapping, then we account the entirety of 985 * the length of the new one. Otherwise it's just the delta in size. 986 */ 987 if (vrm->flags & MREMAP_DONTUNMAP) 988 charged = vrm->new_len >> PAGE_SHIFT; 989 else 990 charged = vrm->delta >> PAGE_SHIFT; 991 992 993 /* This accounts 'charged' pages of memory. */ 994 if (security_vm_enough_memory_mm(current->mm, charged)) 995 return false; 996 997 vrm->charged = charged; 998 return true; 999 } 1000 1001 /* 1002 * an error has occurred so we will not be using vrm->charged memory. Unaccount 1003 * this memory if the VMA is accounted. 1004 */ 1005 static void vrm_uncharge(struct vma_remap_struct *vrm) 1006 { 1007 if (!(vrm->vma->vm_flags & VM_ACCOUNT)) 1008 return; 1009 1010 vm_unacct_memory(vrm->charged); 1011 vrm->charged = 0; 1012 } 1013 1014 /* 1015 * Update mm exec_vm, stack_vm, data_vm, and locked_vm fields as needed to 1016 * account for 'bytes' memory used, and if locked, indicate this in the VRM so 1017 * we can handle this correctly later. 1018 */ 1019 static void vrm_stat_account(struct vma_remap_struct *vrm, 1020 unsigned long bytes) 1021 { 1022 unsigned long pages = bytes >> PAGE_SHIFT; 1023 struct mm_struct *mm = current->mm; 1024 struct vm_area_struct *vma = vrm->vma; 1025 1026 vm_stat_account(mm, vma->vm_flags, pages); 1027 if (vma->vm_flags & VM_LOCKED) 1028 mm->locked_vm += pages; 1029 } 1030 1031 static bool __check_map_count_against_split(struct mm_struct *mm, 1032 bool before_unmaps) 1033 { 1034 const int sys_map_count = get_sysctl_max_map_count(); 1035 int map_count = mm->map_count; 1036 1037 mmap_assert_write_locked(mm); 1038 1039 /* 1040 * At the point of shrinking the VMA, if new_len < old_len, we unmap 1041 * thusly in the worst case: 1042 * 1043 * old_addr+old_len old_addr+old_len 1044 * |---------------.----.---------| |---------------| |---------| 1045 * | . . | -> | +1 | -1 | +1 | 1046 * |---------------.----.---------| |---------------| |---------| 1047 * old_addr+new_len old_addr+new_len 1048 * 1049 * At the point of removing the portion of an existing VMA to make space 1050 * for the moved VMA if MREMAP_FIXED, we unmap thusly in the worst case: 1051 * 1052 * new_addr new_addr+new_len new_addr new_addr+new_len 1053 * |----.---------------.---------| |----| |---------| 1054 * | . . | -> | +1 | -1 | +1 | 1055 * |----.---------------.---------| |----| |---------| 1056 * 1057 * Therefore, before we consider the move anything, we have to account 1058 * for 2 additional VMAs possibly being created upon these unmappings. 1059 */ 1060 if (before_unmaps) 1061 map_count += 2; 1062 1063 /* 1064 * At the point of MOVING the VMA: 1065 * 1066 * We start by copying a VMA, which creates an additional VMA if no 1067 * merge occurs, then if not MREMAP_DONTUNMAP, we unmap the source VMA. 1068 * In the worst case we might then observe: 1069 * 1070 * new_addr new_addr+new_len new_addr new_addr+new_len 1071 * |----| |---------| |----|---------------|---------| 1072 * | | | | -> | | +1 | | 1073 * |----| |---------| |----|---------------|---------| 1074 * 1075 * old_addr old_addr+old_len old_addr old_addr+old_len 1076 * |----.---------------.---------| |----| |---------| 1077 * | . . | -> | +1 | -1 | +1 | 1078 * |----.---------------.---------| |----| |---------| 1079 * 1080 * Therefore we must check to ensure we have headroom of 2 additional 1081 * VMAs. 1082 */ 1083 return map_count + 2 <= sys_map_count; 1084 } 1085 1086 /* Do we violate the map count limit if we split VMAs when moving the VMA? */ 1087 static bool check_map_count_against_split(void) 1088 { 1089 return __check_map_count_against_split(current->mm, 1090 /*before_unmaps=*/false); 1091 } 1092 1093 /* Do we violate the map count limit if we split VMAs prior to early unmaps? */ 1094 static bool check_map_count_against_split_early(void) 1095 { 1096 return __check_map_count_against_split(current->mm, 1097 /*before_unmaps=*/true); 1098 } 1099 1100 /* 1101 * Perform checks before attempting to write a VMA prior to it being 1102 * moved. 1103 */ 1104 static unsigned long prep_move_vma(struct vma_remap_struct *vrm) 1105 { 1106 unsigned long err = 0; 1107 struct vm_area_struct *vma = vrm->vma; 1108 unsigned long old_addr = vrm->addr; 1109 unsigned long old_len = vrm->old_len; 1110 vm_flags_t dummy = vma->vm_flags; 1111 1112 /* 1113 * We'd prefer to avoid failure later on in do_munmap: we copy a VMA, 1114 * which may not merge, then (if MREMAP_DONTUNMAP is not set) unmap the 1115 * source, which may split, causing a net increase of 2 mappings. 1116 */ 1117 if (!check_map_count_against_split()) 1118 return -ENOMEM; 1119 1120 if (vma->vm_ops && vma->vm_ops->may_split) { 1121 if (vma->vm_start != old_addr) 1122 err = vma->vm_ops->may_split(vma, old_addr); 1123 if (!err && vma->vm_end != old_addr + old_len) 1124 err = vma->vm_ops->may_split(vma, old_addr + old_len); 1125 if (err) 1126 return err; 1127 } 1128 1129 /* 1130 * Advise KSM to break any KSM pages in the area to be moved: 1131 * it would be confusing if they were to turn up at the new 1132 * location, where they happen to coincide with different KSM 1133 * pages recently unmapped. But leave vma->vm_flags as it was, 1134 * so KSM can come around to merge on vma and new_vma afterwards. 1135 */ 1136 err = ksm_madvise(vma, old_addr, old_addr + old_len, 1137 MADV_UNMERGEABLE, &dummy); 1138 if (err) 1139 return err; 1140 1141 return 0; 1142 } 1143 1144 /* 1145 * Unmap source VMA for VMA move, turning it from a copy to a move, being 1146 * careful to ensure we do not underflow memory account while doing so if an 1147 * accountable move. 1148 * 1149 * This is best effort, if we fail to unmap then we simply try to correct 1150 * accounting and exit. 1151 */ 1152 static void unmap_source_vma(struct vma_remap_struct *vrm) 1153 { 1154 struct mm_struct *mm = current->mm; 1155 unsigned long addr = vrm->addr; 1156 unsigned long len = vrm->old_len; 1157 struct vm_area_struct *vma = vrm->vma; 1158 VMA_ITERATOR(vmi, mm, addr); 1159 int err; 1160 unsigned long vm_start; 1161 unsigned long vm_end; 1162 /* 1163 * It might seem odd that we check for MREMAP_DONTUNMAP here, given this 1164 * function implies that we unmap the original VMA, which seems 1165 * contradictory. 1166 * 1167 * However, this occurs when this operation was attempted and an error 1168 * arose, in which case we _do_ wish to unmap the _new_ VMA, which means 1169 * we actually _do_ want it be unaccounted. 1170 */ 1171 bool accountable_move = (vma->vm_flags & VM_ACCOUNT) && 1172 !(vrm->flags & MREMAP_DONTUNMAP); 1173 1174 /* 1175 * So we perform a trick here to prevent incorrect accounting. Any merge 1176 * or new VMA allocation performed in copy_vma() does not adjust 1177 * accounting, it is expected that callers handle this. 1178 * 1179 * And indeed we already have, accounting appropriately in the case of 1180 * both in vrm_charge(). 1181 * 1182 * However, when we unmap the existing VMA (to effect the move), this 1183 * code will, if the VMA has VM_ACCOUNT set, attempt to unaccount 1184 * removed pages. 1185 * 1186 * To avoid this we temporarily clear this flag, reinstating on any 1187 * portions of the original VMA that remain. 1188 */ 1189 if (accountable_move) { 1190 vm_flags_clear(vma, VM_ACCOUNT); 1191 /* We are about to split vma, so store the start/end. */ 1192 vm_start = vma->vm_start; 1193 vm_end = vma->vm_end; 1194 } 1195 1196 err = do_vmi_munmap(&vmi, mm, addr, len, vrm->uf_unmap, /* unlock= */false); 1197 vrm->vma = NULL; /* Invalidated. */ 1198 vrm->vmi_needs_invalidate = true; 1199 if (err) { 1200 /* OOM: unable to split vma, just get accounts right */ 1201 vm_acct_memory(len >> PAGE_SHIFT); 1202 return; 1203 } 1204 1205 /* 1206 * If we mremap() from a VMA like this: 1207 * 1208 * addr end 1209 * | | 1210 * v v 1211 * |-------------| 1212 * | | 1213 * |-------------| 1214 * 1215 * Having cleared VM_ACCOUNT from the whole VMA, after we unmap above 1216 * we'll end up with: 1217 * 1218 * addr end 1219 * | | 1220 * v v 1221 * |---| |---| 1222 * | A | | B | 1223 * |---| |---| 1224 * 1225 * The VMI is still pointing at addr, so vma_prev() will give us A, and 1226 * a subsequent or lone vma_next() will give as B. 1227 * 1228 * do_vmi_munmap() will have restored the VMI back to addr. 1229 */ 1230 if (accountable_move) { 1231 unsigned long end = addr + len; 1232 1233 if (vm_start < addr) { 1234 struct vm_area_struct *prev = vma_prev(&vmi); 1235 1236 vm_flags_set(prev, VM_ACCOUNT); /* Acquires VMA lock. */ 1237 } 1238 1239 if (vm_end > end) { 1240 struct vm_area_struct *next = vma_next(&vmi); 1241 1242 vm_flags_set(next, VM_ACCOUNT); /* Acquires VMA lock. */ 1243 } 1244 } 1245 } 1246 1247 /* 1248 * Copy vrm->vma over to vrm->new_addr possibly adjusting size as part of the 1249 * process. Additionally handle an error occurring on moving of page tables, 1250 * where we reset vrm state to cause unmapping of the new VMA. 1251 * 1252 * Outputs the newly installed VMA to new_vma_ptr. Returns 0 on success or an 1253 * error code. 1254 */ 1255 static int copy_vma_and_data(struct vma_remap_struct *vrm, 1256 struct vm_area_struct **new_vma_ptr) 1257 { 1258 unsigned long internal_offset = vrm->addr - vrm->vma->vm_start; 1259 unsigned long internal_pgoff = internal_offset >> PAGE_SHIFT; 1260 unsigned long new_pgoff = vrm->vma->vm_pgoff + internal_pgoff; 1261 unsigned long moved_len; 1262 struct vm_area_struct *vma = vrm->vma; 1263 struct vm_area_struct *new_vma; 1264 int err = 0; 1265 PAGETABLE_MOVE(pmc, NULL, NULL, vrm->addr, vrm->new_addr, vrm->old_len); 1266 1267 new_vma = copy_vma(&vma, vrm->new_addr, vrm->new_len, new_pgoff, 1268 &pmc.need_rmap_locks); 1269 if (!new_vma) { 1270 vrm_uncharge(vrm); 1271 *new_vma_ptr = NULL; 1272 return -ENOMEM; 1273 } 1274 /* By merging, we may have invalidated any iterator in use. */ 1275 if (vma != vrm->vma) 1276 vrm->vmi_needs_invalidate = true; 1277 1278 vrm->vma = vma; 1279 pmc.old = vma; 1280 pmc.new = new_vma; 1281 1282 moved_len = move_page_tables(&pmc); 1283 if (moved_len < vrm->old_len) 1284 err = -ENOMEM; 1285 else if (vma->vm_ops && vma->vm_ops->mremap) 1286 err = vma->vm_ops->mremap(new_vma); 1287 1288 if (unlikely(err)) { 1289 PAGETABLE_MOVE(pmc_revert, new_vma, vma, vrm->new_addr, 1290 vrm->addr, moved_len); 1291 1292 /* 1293 * On error, move entries back from new area to old, 1294 * which will succeed since page tables still there, 1295 * and then proceed to unmap new area instead of old. 1296 */ 1297 pmc_revert.need_rmap_locks = true; 1298 move_page_tables(&pmc_revert); 1299 1300 vrm->vma = new_vma; 1301 vrm->old_len = vrm->new_len; 1302 vrm->addr = vrm->new_addr; 1303 } else { 1304 mremap_userfaultfd_prep(new_vma, vrm->uf); 1305 } 1306 1307 fixup_hugetlb_reservations(vma); 1308 1309 *new_vma_ptr = new_vma; 1310 return err; 1311 } 1312 1313 /* 1314 * Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() flag on 1315 * remaining VMA by convention (it cannot be mlock()'d any longer, as pages in 1316 * range are no longer mapped), and removing anon_vma_chain links from it if the 1317 * entire VMA was copied over. 1318 */ 1319 static void dontunmap_complete(struct vma_remap_struct *vrm, 1320 struct vm_area_struct *new_vma) 1321 { 1322 unsigned long start = vrm->addr; 1323 unsigned long end = vrm->addr + vrm->old_len; 1324 unsigned long old_start = vrm->vma->vm_start; 1325 unsigned long old_end = vrm->vma->vm_end; 1326 1327 /* We always clear VM_LOCKED[ONFAULT] on the old VMA. */ 1328 vm_flags_clear(vrm->vma, VM_LOCKED_MASK); 1329 1330 /* 1331 * anon_vma links of the old vma is no longer needed after its page 1332 * table has been moved. 1333 */ 1334 if (new_vma != vrm->vma && start == old_start && end == old_end) 1335 unlink_anon_vmas(vrm->vma); 1336 1337 /* Because we won't unmap we don't need to touch locked_vm. */ 1338 } 1339 1340 static unsigned long move_vma(struct vma_remap_struct *vrm) 1341 { 1342 struct mm_struct *mm = current->mm; 1343 struct vm_area_struct *new_vma; 1344 unsigned long hiwater_vm; 1345 int err; 1346 1347 err = prep_move_vma(vrm); 1348 if (err) 1349 return err; 1350 1351 /* 1352 * If accounted, determine the number of bytes the operation will 1353 * charge. 1354 */ 1355 if (!vrm_calc_charge(vrm)) 1356 return -ENOMEM; 1357 1358 /* We don't want racing faults. */ 1359 vma_start_write(vrm->vma); 1360 1361 /* Perform copy step. */ 1362 err = copy_vma_and_data(vrm, &new_vma); 1363 /* 1364 * If we established the copied-to VMA, we attempt to recover from the 1365 * error by setting the destination VMA to the source VMA and unmapping 1366 * it below. 1367 */ 1368 if (err && !new_vma) 1369 return err; 1370 1371 /* 1372 * If we failed to move page tables we still do total_vm increment 1373 * since do_munmap() will decrement it by old_len == new_len. 1374 * 1375 * Since total_vm is about to be raised artificially high for a 1376 * moment, we need to restore high watermark afterwards: if stats 1377 * are taken meanwhile, total_vm and hiwater_vm appear too high. 1378 * If this were a serious issue, we'd add a flag to do_munmap(). 1379 */ 1380 hiwater_vm = mm->hiwater_vm; 1381 1382 vrm_stat_account(vrm, vrm->new_len); 1383 if (unlikely(!err && (vrm->flags & MREMAP_DONTUNMAP))) 1384 dontunmap_complete(vrm, new_vma); 1385 else 1386 unmap_source_vma(vrm); 1387 1388 mm->hiwater_vm = hiwater_vm; 1389 1390 return err ? (unsigned long)err : vrm->new_addr; 1391 } 1392 1393 /* 1394 * The user has requested that the VMA be shrunk (i.e., old_len > new_len), so 1395 * execute this, optionally dropping the mmap lock when we do so. 1396 * 1397 * In both cases this invalidates the VMA, however if we don't drop the lock, 1398 * then load the correct VMA into vrm->vma afterwards. 1399 */ 1400 static unsigned long shrink_vma(struct vma_remap_struct *vrm, 1401 bool drop_lock) 1402 { 1403 struct mm_struct *mm = current->mm; 1404 unsigned long unmap_start = vrm->addr + vrm->new_len; 1405 unsigned long unmap_bytes = vrm->delta; 1406 unsigned long res; 1407 VMA_ITERATOR(vmi, mm, unmap_start); 1408 1409 VM_BUG_ON(vrm->remap_type != MREMAP_SHRINK); 1410 1411 res = do_vmi_munmap(&vmi, mm, unmap_start, unmap_bytes, 1412 vrm->uf_unmap, drop_lock); 1413 vrm->vma = NULL; /* Invalidated. */ 1414 if (res) 1415 return res; 1416 1417 /* 1418 * If we've not dropped the lock, then we should reload the VMA to 1419 * replace the invalidated VMA with the one that may have now been 1420 * split. 1421 */ 1422 if (drop_lock) { 1423 vrm->mmap_locked = false; 1424 } else { 1425 vrm->vma = vma_lookup(mm, vrm->addr); 1426 if (!vrm->vma) 1427 return -EFAULT; 1428 } 1429 1430 return 0; 1431 } 1432 1433 /* 1434 * mremap_to() - remap a vma to a new location. 1435 * Returns: The new address of the vma or an error. 1436 */ 1437 static unsigned long mremap_to(struct vma_remap_struct *vrm) 1438 { 1439 struct mm_struct *mm = current->mm; 1440 unsigned long err; 1441 1442 if (vrm->flags & MREMAP_FIXED) { 1443 /* 1444 * In mremap_to(). 1445 * VMA is moved to dst address, and munmap dst first. 1446 * do_munmap will check if dst is sealed. 1447 */ 1448 err = do_munmap(mm, vrm->new_addr, vrm->new_len, 1449 vrm->uf_unmap_early); 1450 vrm->vma = NULL; /* Invalidated. */ 1451 vrm->vmi_needs_invalidate = true; 1452 if (err) 1453 return err; 1454 1455 /* 1456 * If we remap a portion of a VMA elsewhere in the same VMA, 1457 * this can invalidate the old VMA. Reset. 1458 */ 1459 vrm->vma = vma_lookup(mm, vrm->addr); 1460 if (!vrm->vma) 1461 return -EFAULT; 1462 } 1463 1464 if (vrm->remap_type == MREMAP_SHRINK) { 1465 err = shrink_vma(vrm, /* drop_lock= */false); 1466 if (err) 1467 return err; 1468 1469 /* Set up for the move now shrink has been executed. */ 1470 vrm->old_len = vrm->new_len; 1471 } 1472 1473 /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */ 1474 if (vrm->flags & MREMAP_DONTUNMAP) { 1475 vma_flags_t vma_flags = vrm->vma->flags; 1476 unsigned long pages = vrm->old_len >> PAGE_SHIFT; 1477 1478 if (!may_expand_vm(mm, &vma_flags, pages)) 1479 return -ENOMEM; 1480 } 1481 1482 err = vrm_set_new_addr(vrm); 1483 if (err) 1484 return err; 1485 1486 return move_vma(vrm); 1487 } 1488 1489 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) 1490 { 1491 unsigned long end = vma->vm_end + delta; 1492 1493 if (end < vma->vm_end) /* overflow */ 1494 return 0; 1495 if (find_vma_intersection(vma->vm_mm, vma->vm_end, end)) 1496 return 0; 1497 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 1498 0, MAP_FIXED) & ~PAGE_MASK) 1499 return 0; 1500 return 1; 1501 } 1502 1503 /* Determine whether we are actually able to execute an in-place expansion. */ 1504 static bool vrm_can_expand_in_place(struct vma_remap_struct *vrm) 1505 { 1506 /* Number of bytes from vrm->addr to end of VMA. */ 1507 unsigned long suffix_bytes = vrm->vma->vm_end - vrm->addr; 1508 1509 /* If end of range aligns to end of VMA, we can just expand in-place. */ 1510 if (suffix_bytes != vrm->old_len) 1511 return false; 1512 1513 /* Check whether this is feasible. */ 1514 if (!vma_expandable(vrm->vma, vrm->delta)) 1515 return false; 1516 1517 return true; 1518 } 1519 1520 /* 1521 * We know we can expand the VMA in-place by delta pages, so do so. 1522 * 1523 * If we discover the VMA is locked, update mm_struct statistics accordingly and 1524 * indicate so to the caller. 1525 */ 1526 static unsigned long expand_vma_in_place(struct vma_remap_struct *vrm) 1527 { 1528 struct mm_struct *mm = current->mm; 1529 struct vm_area_struct *vma = vrm->vma; 1530 VMA_ITERATOR(vmi, mm, vma->vm_end); 1531 1532 if (!vrm_calc_charge(vrm)) 1533 return -ENOMEM; 1534 1535 /* 1536 * Function vma_merge_extend() is called on the 1537 * extension we are adding to the already existing vma, 1538 * vma_merge_extend() will merge this extension with the 1539 * already existing vma (expand operation itself) and 1540 * possibly also with the next vma if it becomes 1541 * adjacent to the expanded vma and otherwise 1542 * compatible. 1543 */ 1544 vma = vma_merge_extend(&vmi, vma, vrm->delta); 1545 if (!vma) { 1546 vrm_uncharge(vrm); 1547 return -ENOMEM; 1548 } 1549 vrm->vma = vma; 1550 1551 vrm_stat_account(vrm, vrm->delta); 1552 1553 return 0; 1554 } 1555 1556 static bool align_hugetlb(struct vma_remap_struct *vrm) 1557 { 1558 struct hstate *h __maybe_unused = hstate_vma(vrm->vma); 1559 1560 vrm->old_len = ALIGN(vrm->old_len, huge_page_size(h)); 1561 vrm->new_len = ALIGN(vrm->new_len, huge_page_size(h)); 1562 1563 /* addrs must be huge page aligned */ 1564 if (vrm->addr & ~huge_page_mask(h)) 1565 return false; 1566 if (vrm->new_addr & ~huge_page_mask(h)) 1567 return false; 1568 1569 /* 1570 * Don't allow remap expansion, because the underlying hugetlb 1571 * reservation is not yet capable to handle split reservation. 1572 */ 1573 if (vrm->new_len > vrm->old_len) 1574 return false; 1575 1576 return true; 1577 } 1578 1579 /* 1580 * We are mremap()'ing without specifying a fixed address to move to, but are 1581 * requesting that the VMA's size be increased. 1582 * 1583 * Try to do so in-place, if this fails, then move the VMA to a new location to 1584 * action the change. 1585 */ 1586 static unsigned long expand_vma(struct vma_remap_struct *vrm) 1587 { 1588 unsigned long err; 1589 1590 /* 1591 * [addr, old_len) spans precisely to the end of the VMA, so try to 1592 * expand it in-place. 1593 */ 1594 if (vrm_can_expand_in_place(vrm)) { 1595 err = expand_vma_in_place(vrm); 1596 if (err) 1597 return err; 1598 1599 /* OK we're done! */ 1600 return vrm->addr; 1601 } 1602 1603 /* 1604 * We weren't able to just expand or shrink the area, 1605 * we need to create a new one and move it. 1606 */ 1607 1608 /* We're not allowed to move the VMA, so error out. */ 1609 if (!(vrm->flags & MREMAP_MAYMOVE)) 1610 return -ENOMEM; 1611 1612 /* Find a new location to move the VMA to. */ 1613 err = vrm_set_new_addr(vrm); 1614 if (err) 1615 return err; 1616 1617 return move_vma(vrm); 1618 } 1619 1620 /* 1621 * Attempt to resize the VMA in-place, if we cannot, then move the VMA to the 1622 * first available address to perform the operation. 1623 */ 1624 static unsigned long mremap_at(struct vma_remap_struct *vrm) 1625 { 1626 unsigned long res; 1627 1628 switch (vrm->remap_type) { 1629 case MREMAP_INVALID: 1630 break; 1631 case MREMAP_NO_RESIZE: 1632 /* NO-OP CASE - resizing to the same size. */ 1633 return vrm->addr; 1634 case MREMAP_SHRINK: 1635 /* 1636 * SHRINK CASE. Can always be done in-place. 1637 * 1638 * Simply unmap the shrunken portion of the VMA. This does all 1639 * the needed commit accounting, and we indicate that the mmap 1640 * lock should be dropped. 1641 */ 1642 res = shrink_vma(vrm, /* drop_lock= */true); 1643 if (res) 1644 return res; 1645 1646 return vrm->addr; 1647 case MREMAP_EXPAND: 1648 return expand_vma(vrm); 1649 } 1650 1651 /* Should not be possible. */ 1652 WARN_ON_ONCE(1); 1653 return -EINVAL; 1654 } 1655 1656 /* 1657 * Will this operation result in the VMA being expanded or moved and thus need 1658 * to map a new portion of virtual address space? 1659 */ 1660 static bool vrm_will_map_new(struct vma_remap_struct *vrm) 1661 { 1662 if (vrm->remap_type == MREMAP_EXPAND) 1663 return true; 1664 1665 if (vrm_implies_new_addr(vrm)) 1666 return true; 1667 1668 return false; 1669 } 1670 1671 /* Does this remap ONLY move mappings? */ 1672 static bool vrm_move_only(struct vma_remap_struct *vrm) 1673 { 1674 if (!(vrm->flags & MREMAP_FIXED)) 1675 return false; 1676 1677 if (vrm->old_len != vrm->new_len) 1678 return false; 1679 1680 return true; 1681 } 1682 1683 static void notify_uffd(struct vma_remap_struct *vrm, bool failed) 1684 { 1685 struct mm_struct *mm = current->mm; 1686 1687 /* Regardless of success/failure, we always notify of any unmaps. */ 1688 userfaultfd_unmap_complete(mm, vrm->uf_unmap_early); 1689 if (failed) 1690 mremap_userfaultfd_fail(vrm->uf); 1691 else 1692 mremap_userfaultfd_complete(vrm->uf, vrm->addr, 1693 vrm->new_addr, vrm->old_len); 1694 userfaultfd_unmap_complete(mm, vrm->uf_unmap); 1695 } 1696 1697 static bool vma_multi_allowed(struct vm_area_struct *vma) 1698 { 1699 struct file *file = vma->vm_file; 1700 1701 /* 1702 * We can't support moving multiple uffd VMAs as notify requires 1703 * mmap lock to be dropped. 1704 */ 1705 if (userfaultfd_armed(vma)) 1706 return false; 1707 1708 /* 1709 * Custom get unmapped area might result in MREMAP_FIXED not 1710 * being obeyed. 1711 */ 1712 if (!file || !file->f_op->get_unmapped_area) 1713 return true; 1714 /* Known good. */ 1715 if (vma_is_shmem(vma)) 1716 return true; 1717 if (is_vm_hugetlb_page(vma)) 1718 return true; 1719 if (file->f_op->get_unmapped_area == thp_get_unmapped_area) 1720 return true; 1721 1722 return false; 1723 } 1724 1725 static int check_prep_vma(struct vma_remap_struct *vrm) 1726 { 1727 struct vm_area_struct *vma = vrm->vma; 1728 struct mm_struct *mm = current->mm; 1729 unsigned long addr = vrm->addr; 1730 unsigned long old_len, new_len, pgoff; 1731 1732 if (!vma) 1733 return -EFAULT; 1734 1735 /* If mseal()'d, mremap() is prohibited. */ 1736 if (vma_is_sealed(vma)) 1737 return -EPERM; 1738 1739 /* Align to hugetlb page size, if required. */ 1740 if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm)) 1741 return -EINVAL; 1742 1743 vrm_set_delta(vrm); 1744 vrm->remap_type = vrm_remap_type(vrm); 1745 /* For convenience, we set new_addr even if VMA won't move. */ 1746 if (!vrm_implies_new_addr(vrm)) 1747 vrm->new_addr = addr; 1748 1749 /* Below only meaningful if we expand or move a VMA. */ 1750 if (!vrm_will_map_new(vrm)) 1751 return 0; 1752 1753 old_len = vrm->old_len; 1754 new_len = vrm->new_len; 1755 1756 /* 1757 * !old_len is a special case where an attempt is made to 'duplicate' 1758 * a mapping. This makes no sense for private mappings as it will 1759 * instead create a fresh/new mapping unrelated to the original. This 1760 * is contrary to the basic idea of mremap which creates new mappings 1761 * based on the original. There are no known use cases for this 1762 * behavior. As a result, fail such attempts. 1763 */ 1764 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { 1765 pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", 1766 current->comm, current->pid); 1767 return -EINVAL; 1768 } 1769 1770 if ((vrm->flags & MREMAP_DONTUNMAP) && 1771 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))) 1772 return -EINVAL; 1773 1774 /* 1775 * We permit crossing of boundaries for the range being unmapped due to 1776 * a shrink. 1777 */ 1778 if (vrm->remap_type == MREMAP_SHRINK) 1779 old_len = new_len; 1780 1781 /* 1782 * We can't remap across the end of VMAs, as another VMA may be 1783 * adjacent: 1784 * 1785 * addr vma->vm_end 1786 * |-----.----------| 1787 * | . | 1788 * |-----.----------| 1789 * .<--------->xxx> 1790 * old_len 1791 * 1792 * We also require that vma->vm_start <= addr < vma->vm_end. 1793 */ 1794 if (old_len > vma->vm_end - addr) 1795 return -EFAULT; 1796 1797 if (new_len == old_len) 1798 return 0; 1799 1800 /* We are expanding and the VMA is mlock()'d so we need to populate. */ 1801 if (vma->vm_flags & VM_LOCKED) 1802 vrm->populate_expand = true; 1803 1804 /* Need to be careful about a growing mapping */ 1805 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; 1806 pgoff += vma->vm_pgoff; 1807 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) 1808 return -EINVAL; 1809 1810 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) 1811 return -EFAULT; 1812 1813 if (!mlock_future_ok(mm, vma->vm_flags & VM_LOCKED, vrm->delta)) 1814 return -EAGAIN; 1815 1816 if (!may_expand_vm(mm, &vma->flags, vrm->delta >> PAGE_SHIFT)) 1817 return -ENOMEM; 1818 1819 return 0; 1820 } 1821 1822 /* 1823 * Are the parameters passed to mremap() valid? If so return 0, otherwise return 1824 * error. 1825 */ 1826 static unsigned long check_mremap_params(struct vma_remap_struct *vrm) 1827 1828 { 1829 unsigned long addr = vrm->addr; 1830 unsigned long flags = vrm->flags; 1831 1832 /* Ensure no unexpected flag values. */ 1833 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) 1834 return -EINVAL; 1835 1836 /* Start address must be page-aligned. */ 1837 if (offset_in_page(addr)) 1838 return -EINVAL; 1839 1840 /* 1841 * We allow a zero old-len as a special case 1842 * for DOS-emu "duplicate shm area" thing. But 1843 * a zero new-len is nonsensical. 1844 */ 1845 if (!vrm->new_len) 1846 return -EINVAL; 1847 1848 /* Is the new length silly? */ 1849 if (vrm->new_len > TASK_SIZE) 1850 return -EINVAL; 1851 1852 /* Remainder of checks are for cases with specific new_addr. */ 1853 if (!vrm_implies_new_addr(vrm)) 1854 return 0; 1855 1856 /* Is the new address silly? */ 1857 if (vrm->new_addr > TASK_SIZE - vrm->new_len) 1858 return -EINVAL; 1859 1860 /* The new address must be page-aligned. */ 1861 if (offset_in_page(vrm->new_addr)) 1862 return -EINVAL; 1863 1864 /* A fixed address implies a move. */ 1865 if (!(flags & MREMAP_MAYMOVE)) 1866 return -EINVAL; 1867 1868 /* MREMAP_DONTUNMAP does not allow resizing in the process. */ 1869 if (flags & MREMAP_DONTUNMAP && vrm->old_len != vrm->new_len) 1870 return -EINVAL; 1871 1872 /* Target VMA must not overlap source VMA. */ 1873 if (vrm_overlaps(vrm)) 1874 return -EINVAL; 1875 1876 return 0; 1877 } 1878 1879 static unsigned long remap_move(struct vma_remap_struct *vrm) 1880 { 1881 struct vm_area_struct *vma; 1882 unsigned long start = vrm->addr; 1883 unsigned long end = vrm->addr + vrm->old_len; 1884 unsigned long new_addr = vrm->new_addr; 1885 unsigned long target_addr = new_addr; 1886 unsigned long res = -EFAULT; 1887 unsigned long last_end; 1888 bool seen_vma = false; 1889 1890 VMA_ITERATOR(vmi, current->mm, start); 1891 1892 /* 1893 * When moving VMAs we allow for batched moves across multiple VMAs, 1894 * with all VMAs in the input range [addr, addr + old_len) being moved 1895 * (and split as necessary). 1896 */ 1897 for_each_vma_range(vmi, vma, end) { 1898 /* Account for start, end not aligned with VMA start, end. */ 1899 unsigned long addr = max(vma->vm_start, start); 1900 unsigned long len = min(end, vma->vm_end) - addr; 1901 unsigned long offset, res_vma; 1902 bool multi_allowed; 1903 1904 /* No gap permitted at the start of the range. */ 1905 if (!seen_vma && start < vma->vm_start) 1906 return -EFAULT; 1907 1908 /* 1909 * To sensibly move multiple VMAs, accounting for the fact that 1910 * get_unmapped_area() may align even MAP_FIXED moves, we simply 1911 * attempt to move such that the gaps between source VMAs remain 1912 * consistent in destination VMAs, e.g.: 1913 * 1914 * X Y X Y 1915 * <---> <-> <---> <-> 1916 * |-------| |-----| |-----| |-------| |-----| |-----| 1917 * | A | | B | | C | ---> | A' | | B' | | C' | 1918 * |-------| |-----| |-----| |-------| |-----| |-----| 1919 * new_addr 1920 * 1921 * So we map B' at A'->vm_end + X, and C' at B'->vm_end + Y. 1922 */ 1923 offset = seen_vma ? vma->vm_start - last_end : 0; 1924 last_end = vma->vm_end; 1925 1926 vrm->vma = vma; 1927 vrm->addr = addr; 1928 vrm->new_addr = target_addr + offset; 1929 vrm->old_len = vrm->new_len = len; 1930 1931 multi_allowed = vma_multi_allowed(vma); 1932 if (!multi_allowed) { 1933 /* This is not the first VMA, abort immediately. */ 1934 if (seen_vma) 1935 return -EFAULT; 1936 /* This is the first, but there are more, abort. */ 1937 if (vma->vm_end < end) 1938 return -EFAULT; 1939 } 1940 1941 res_vma = check_prep_vma(vrm); 1942 if (!res_vma) 1943 res_vma = mremap_to(vrm); 1944 if (IS_ERR_VALUE(res_vma)) 1945 return res_vma; 1946 1947 if (!seen_vma) { 1948 VM_WARN_ON_ONCE(multi_allowed && res_vma != new_addr); 1949 res = res_vma; 1950 } 1951 1952 /* mmap lock is only dropped on shrink. */ 1953 VM_WARN_ON_ONCE(!vrm->mmap_locked); 1954 /* This is a move, no expand should occur. */ 1955 VM_WARN_ON_ONCE(vrm->populate_expand); 1956 1957 if (vrm->vmi_needs_invalidate) { 1958 vma_iter_invalidate(&vmi); 1959 vrm->vmi_needs_invalidate = false; 1960 } 1961 seen_vma = true; 1962 target_addr = res_vma + vrm->new_len; 1963 } 1964 1965 return res; 1966 } 1967 1968 static unsigned long do_mremap(struct vma_remap_struct *vrm) 1969 { 1970 struct mm_struct *mm = current->mm; 1971 unsigned long res; 1972 bool failed; 1973 1974 vrm->old_len = PAGE_ALIGN(vrm->old_len); 1975 vrm->new_len = PAGE_ALIGN(vrm->new_len); 1976 1977 res = check_mremap_params(vrm); 1978 if (res) 1979 return res; 1980 1981 if (mmap_write_lock_killable(mm)) 1982 return -EINTR; 1983 vrm->mmap_locked = true; 1984 1985 if (!check_map_count_against_split_early()) { 1986 mmap_write_unlock(mm); 1987 return -ENOMEM; 1988 } 1989 1990 if (vrm_move_only(vrm)) { 1991 res = remap_move(vrm); 1992 } else { 1993 vrm->vma = vma_lookup(current->mm, vrm->addr); 1994 res = check_prep_vma(vrm); 1995 if (res) 1996 goto out; 1997 1998 /* Actually execute mremap. */ 1999 res = vrm_implies_new_addr(vrm) ? mremap_to(vrm) : mremap_at(vrm); 2000 } 2001 2002 out: 2003 failed = IS_ERR_VALUE(res); 2004 2005 if (vrm->mmap_locked) 2006 mmap_write_unlock(mm); 2007 2008 /* VMA mlock'd + was expanded, so populated expanded region. */ 2009 if (!failed && vrm->populate_expand) 2010 mm_populate(vrm->new_addr + vrm->old_len, vrm->delta); 2011 2012 notify_uffd(vrm, failed); 2013 return res; 2014 } 2015 2016 /* 2017 * Expand (or shrink) an existing mapping, potentially moving it at the 2018 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) 2019 * 2020 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise 2021 * This option implies MREMAP_MAYMOVE. 2022 */ 2023 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, 2024 unsigned long, new_len, unsigned long, flags, 2025 unsigned long, new_addr) 2026 { 2027 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; 2028 LIST_HEAD(uf_unmap_early); 2029 LIST_HEAD(uf_unmap); 2030 /* 2031 * There is a deliberate asymmetry here: we strip the pointer tag 2032 * from the old address but leave the new address alone. This is 2033 * for consistency with mmap(), where we prevent the creation of 2034 * aliasing mappings in userspace by leaving the tag bits of the 2035 * mapping address intact. A non-zero tag will cause the subsequent 2036 * range checks to reject the address as invalid. 2037 * 2038 * See Documentation/arch/arm64/tagged-address-abi.rst for more 2039 * information. 2040 */ 2041 struct vma_remap_struct vrm = { 2042 .addr = untagged_addr(addr), 2043 .old_len = old_len, 2044 .new_len = new_len, 2045 .flags = flags, 2046 .new_addr = new_addr, 2047 2048 .uf = &uf, 2049 .uf_unmap_early = &uf_unmap_early, 2050 .uf_unmap = &uf_unmap, 2051 2052 .remap_type = MREMAP_INVALID, /* We set later. */ 2053 }; 2054 2055 return do_mremap(&vrm); 2056 } 2057