1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm/userfaultfd.c 4 * 5 * Copyright (C) 2015 Red Hat, Inc. 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/sched/signal.h> 10 #include <linux/pagemap.h> 11 #include <linux/rmap.h> 12 #include <linux/swap.h> 13 #include <linux/swapops.h> 14 #include <linux/userfaultfd_k.h> 15 #include <linux/mmu_notifier.h> 16 #include <linux/hugetlb.h> 17 #include <linux/shmem_fs.h> 18 #include <asm/tlbflush.h> 19 #include <asm/tlb.h> 20 #include "internal.h" 21 22 static __always_inline 23 bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end) 24 { 25 /* Make sure that the dst range is fully within dst_vma. */ 26 if (dst_end > dst_vma->vm_end) 27 return false; 28 29 /* 30 * Check the vma is registered in uffd, this is required to 31 * enforce the VM_MAYWRITE check done at uffd registration 32 * time. 33 */ 34 if (!dst_vma->vm_userfaultfd_ctx.ctx) 35 return false; 36 37 return true; 38 } 39 40 static __always_inline 41 struct vm_area_struct *find_vma_and_prepare_anon(struct mm_struct *mm, 42 unsigned long addr) 43 { 44 struct vm_area_struct *vma; 45 46 mmap_assert_locked(mm); 47 vma = vma_lookup(mm, addr); 48 if (!vma) 49 vma = ERR_PTR(-ENOENT); 50 else if (!(vma->vm_flags & VM_SHARED) && 51 unlikely(anon_vma_prepare(vma))) 52 vma = ERR_PTR(-ENOMEM); 53 54 return vma; 55 } 56 57 #ifdef CONFIG_PER_VMA_LOCK 58 /* 59 * uffd_lock_vma() - Lookup and lock vma corresponding to @address. 60 * @mm: mm to search vma in. 61 * @address: address that the vma should contain. 62 * 63 * Should be called without holding mmap_lock. 64 * 65 * Return: A locked vma containing @address, -ENOENT if no vma is found, or 66 * -ENOMEM if anon_vma couldn't be allocated. 67 */ 68 static struct vm_area_struct *uffd_lock_vma(struct mm_struct *mm, 69 unsigned long address) 70 { 71 struct vm_area_struct *vma; 72 73 vma = lock_vma_under_rcu(mm, address); 74 if (vma) { 75 /* 76 * We know we're going to need to use anon_vma, so check 77 * that early. 78 */ 79 if (!(vma->vm_flags & VM_SHARED) && unlikely(!vma->anon_vma)) 80 vma_end_read(vma); 81 else 82 return vma; 83 } 84 85 mmap_read_lock(mm); 86 vma = find_vma_and_prepare_anon(mm, address); 87 if (!IS_ERR(vma)) { 88 /* 89 * We cannot use vma_start_read() as it may fail due to 90 * false locked (see comment in vma_start_read()). We 91 * can avoid that by directly locking vm_lock under 92 * mmap_lock, which guarantees that nobody can lock the 93 * vma for write (vma_start_write()) under us. 94 */ 95 down_read(&vma->vm_lock->lock); 96 } 97 98 mmap_read_unlock(mm); 99 return vma; 100 } 101 102 static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm, 103 unsigned long dst_start, 104 unsigned long len) 105 { 106 struct vm_area_struct *dst_vma; 107 108 dst_vma = uffd_lock_vma(dst_mm, dst_start); 109 if (IS_ERR(dst_vma) || validate_dst_vma(dst_vma, dst_start + len)) 110 return dst_vma; 111 112 vma_end_read(dst_vma); 113 return ERR_PTR(-ENOENT); 114 } 115 116 static void uffd_mfill_unlock(struct vm_area_struct *vma) 117 { 118 vma_end_read(vma); 119 } 120 121 #else 122 123 static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm, 124 unsigned long dst_start, 125 unsigned long len) 126 { 127 struct vm_area_struct *dst_vma; 128 129 mmap_read_lock(dst_mm); 130 dst_vma = find_vma_and_prepare_anon(dst_mm, dst_start); 131 if (IS_ERR(dst_vma)) 132 goto out_unlock; 133 134 if (validate_dst_vma(dst_vma, dst_start + len)) 135 return dst_vma; 136 137 dst_vma = ERR_PTR(-ENOENT); 138 out_unlock: 139 mmap_read_unlock(dst_mm); 140 return dst_vma; 141 } 142 143 static void uffd_mfill_unlock(struct vm_area_struct *vma) 144 { 145 mmap_read_unlock(vma->vm_mm); 146 } 147 #endif 148 149 /* Check if dst_addr is outside of file's size. Must be called with ptl held. */ 150 static bool mfill_file_over_size(struct vm_area_struct *dst_vma, 151 unsigned long dst_addr) 152 { 153 struct inode *inode; 154 pgoff_t offset, max_off; 155 156 if (!dst_vma->vm_file) 157 return false; 158 159 inode = dst_vma->vm_file->f_inode; 160 offset = linear_page_index(dst_vma, dst_addr); 161 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 162 return offset >= max_off; 163 } 164 165 /* 166 * Install PTEs, to map dst_addr (within dst_vma) to page. 167 * 168 * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem 169 * and anon, and for both shared and private VMAs. 170 */ 171 int mfill_atomic_install_pte(pmd_t *dst_pmd, 172 struct vm_area_struct *dst_vma, 173 unsigned long dst_addr, struct page *page, 174 bool newly_allocated, uffd_flags_t flags) 175 { 176 int ret; 177 struct mm_struct *dst_mm = dst_vma->vm_mm; 178 pte_t _dst_pte, *dst_pte; 179 bool writable = dst_vma->vm_flags & VM_WRITE; 180 bool vm_shared = dst_vma->vm_flags & VM_SHARED; 181 spinlock_t *ptl; 182 struct folio *folio = page_folio(page); 183 bool page_in_cache = folio_mapping(folio); 184 185 _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 186 _dst_pte = pte_mkdirty(_dst_pte); 187 if (page_in_cache && !vm_shared) 188 writable = false; 189 if (writable) 190 _dst_pte = pte_mkwrite(_dst_pte, dst_vma); 191 if (flags & MFILL_ATOMIC_WP) 192 _dst_pte = pte_mkuffd_wp(_dst_pte); 193 194 ret = -EAGAIN; 195 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 196 if (!dst_pte) 197 goto out; 198 199 if (mfill_file_over_size(dst_vma, dst_addr)) { 200 ret = -EFAULT; 201 goto out_unlock; 202 } 203 204 ret = -EEXIST; 205 /* 206 * We allow to overwrite a pte marker: consider when both MISSING|WP 207 * registered, we firstly wr-protect a none pte which has no page cache 208 * page backing it, then access the page. 209 */ 210 if (!pte_none_mostly(ptep_get(dst_pte))) 211 goto out_unlock; 212 213 if (page_in_cache) { 214 /* Usually, cache pages are already added to LRU */ 215 if (newly_allocated) 216 folio_add_lru(folio); 217 folio_add_file_rmap_pte(folio, page, dst_vma); 218 } else { 219 folio_add_new_anon_rmap(folio, dst_vma, dst_addr, RMAP_EXCLUSIVE); 220 folio_add_lru_vma(folio, dst_vma); 221 } 222 223 /* 224 * Must happen after rmap, as mm_counter() checks mapping (via 225 * PageAnon()), which is set by __page_set_anon_rmap(). 226 */ 227 inc_mm_counter(dst_mm, mm_counter(folio)); 228 229 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 230 231 /* No need to invalidate - it was non-present before */ 232 update_mmu_cache(dst_vma, dst_addr, dst_pte); 233 ret = 0; 234 out_unlock: 235 pte_unmap_unlock(dst_pte, ptl); 236 out: 237 return ret; 238 } 239 240 static int mfill_atomic_pte_copy(pmd_t *dst_pmd, 241 struct vm_area_struct *dst_vma, 242 unsigned long dst_addr, 243 unsigned long src_addr, 244 uffd_flags_t flags, 245 struct folio **foliop) 246 { 247 void *kaddr; 248 int ret; 249 struct folio *folio; 250 251 if (!*foliop) { 252 ret = -ENOMEM; 253 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma, 254 dst_addr); 255 if (!folio) 256 goto out; 257 258 kaddr = kmap_local_folio(folio, 0); 259 /* 260 * The read mmap_lock is held here. Despite the 261 * mmap_lock being read recursive a deadlock is still 262 * possible if a writer has taken a lock. For example: 263 * 264 * process A thread 1 takes read lock on own mmap_lock 265 * process A thread 2 calls mmap, blocks taking write lock 266 * process B thread 1 takes page fault, read lock on own mmap lock 267 * process B thread 2 calls mmap, blocks taking write lock 268 * process A thread 1 blocks taking read lock on process B 269 * process B thread 1 blocks taking read lock on process A 270 * 271 * Disable page faults to prevent potential deadlock 272 * and retry the copy outside the mmap_lock. 273 */ 274 pagefault_disable(); 275 ret = copy_from_user(kaddr, (const void __user *) src_addr, 276 PAGE_SIZE); 277 pagefault_enable(); 278 kunmap_local(kaddr); 279 280 /* fallback to copy_from_user outside mmap_lock */ 281 if (unlikely(ret)) { 282 ret = -ENOENT; 283 *foliop = folio; 284 /* don't free the page */ 285 goto out; 286 } 287 288 flush_dcache_folio(folio); 289 } else { 290 folio = *foliop; 291 *foliop = NULL; 292 } 293 294 /* 295 * The memory barrier inside __folio_mark_uptodate makes sure that 296 * preceding stores to the page contents become visible before 297 * the set_pte_at() write. 298 */ 299 __folio_mark_uptodate(folio); 300 301 ret = -ENOMEM; 302 if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL)) 303 goto out_release; 304 305 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, 306 &folio->page, true, flags); 307 if (ret) 308 goto out_release; 309 out: 310 return ret; 311 out_release: 312 folio_put(folio); 313 goto out; 314 } 315 316 static int mfill_atomic_pte_zeroed_folio(pmd_t *dst_pmd, 317 struct vm_area_struct *dst_vma, 318 unsigned long dst_addr) 319 { 320 struct folio *folio; 321 int ret = -ENOMEM; 322 323 folio = vma_alloc_zeroed_movable_folio(dst_vma, dst_addr); 324 if (!folio) 325 return ret; 326 327 if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL)) 328 goto out_put; 329 330 /* 331 * The memory barrier inside __folio_mark_uptodate makes sure that 332 * zeroing out the folio become visible before mapping the page 333 * using set_pte_at(). See do_anonymous_page(). 334 */ 335 __folio_mark_uptodate(folio); 336 337 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, 338 &folio->page, true, 0); 339 if (ret) 340 goto out_put; 341 342 return 0; 343 out_put: 344 folio_put(folio); 345 return ret; 346 } 347 348 static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd, 349 struct vm_area_struct *dst_vma, 350 unsigned long dst_addr) 351 { 352 pte_t _dst_pte, *dst_pte; 353 spinlock_t *ptl; 354 int ret; 355 356 if (mm_forbids_zeropage(dst_vma->vm_mm)) 357 return mfill_atomic_pte_zeroed_folio(dst_pmd, dst_vma, dst_addr); 358 359 _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), 360 dst_vma->vm_page_prot)); 361 ret = -EAGAIN; 362 dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl); 363 if (!dst_pte) 364 goto out; 365 if (mfill_file_over_size(dst_vma, dst_addr)) { 366 ret = -EFAULT; 367 goto out_unlock; 368 } 369 ret = -EEXIST; 370 if (!pte_none(ptep_get(dst_pte))) 371 goto out_unlock; 372 set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte); 373 /* No need to invalidate - it was non-present before */ 374 update_mmu_cache(dst_vma, dst_addr, dst_pte); 375 ret = 0; 376 out_unlock: 377 pte_unmap_unlock(dst_pte, ptl); 378 out: 379 return ret; 380 } 381 382 /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */ 383 static int mfill_atomic_pte_continue(pmd_t *dst_pmd, 384 struct vm_area_struct *dst_vma, 385 unsigned long dst_addr, 386 uffd_flags_t flags) 387 { 388 struct inode *inode = file_inode(dst_vma->vm_file); 389 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 390 struct folio *folio; 391 struct page *page; 392 int ret; 393 394 ret = shmem_get_folio(inode, pgoff, 0, &folio, SGP_NOALLOC); 395 /* Our caller expects us to return -EFAULT if we failed to find folio */ 396 if (ret == -ENOENT) 397 ret = -EFAULT; 398 if (ret) 399 goto out; 400 if (!folio) { 401 ret = -EFAULT; 402 goto out; 403 } 404 405 page = folio_file_page(folio, pgoff); 406 if (PageHWPoison(page)) { 407 ret = -EIO; 408 goto out_release; 409 } 410 411 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, 412 page, false, flags); 413 if (ret) 414 goto out_release; 415 416 folio_unlock(folio); 417 ret = 0; 418 out: 419 return ret; 420 out_release: 421 folio_unlock(folio); 422 folio_put(folio); 423 goto out; 424 } 425 426 /* Handles UFFDIO_POISON for all non-hugetlb VMAs. */ 427 static int mfill_atomic_pte_poison(pmd_t *dst_pmd, 428 struct vm_area_struct *dst_vma, 429 unsigned long dst_addr, 430 uffd_flags_t flags) 431 { 432 int ret; 433 struct mm_struct *dst_mm = dst_vma->vm_mm; 434 pte_t _dst_pte, *dst_pte; 435 spinlock_t *ptl; 436 437 _dst_pte = make_pte_marker(PTE_MARKER_POISONED); 438 ret = -EAGAIN; 439 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 440 if (!dst_pte) 441 goto out; 442 443 if (mfill_file_over_size(dst_vma, dst_addr)) { 444 ret = -EFAULT; 445 goto out_unlock; 446 } 447 448 ret = -EEXIST; 449 /* Refuse to overwrite any PTE, even a PTE marker (e.g. UFFD WP). */ 450 if (!pte_none(ptep_get(dst_pte))) 451 goto out_unlock; 452 453 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 454 455 /* No need to invalidate - it was non-present before */ 456 update_mmu_cache(dst_vma, dst_addr, dst_pte); 457 ret = 0; 458 out_unlock: 459 pte_unmap_unlock(dst_pte, ptl); 460 out: 461 return ret; 462 } 463 464 static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) 465 { 466 pgd_t *pgd; 467 p4d_t *p4d; 468 pud_t *pud; 469 470 pgd = pgd_offset(mm, address); 471 p4d = p4d_alloc(mm, pgd, address); 472 if (!p4d) 473 return NULL; 474 pud = pud_alloc(mm, p4d, address); 475 if (!pud) 476 return NULL; 477 /* 478 * Note that we didn't run this because the pmd was 479 * missing, the *pmd may be already established and in 480 * turn it may also be a trans_huge_pmd. 481 */ 482 return pmd_alloc(mm, pud, address); 483 } 484 485 #ifdef CONFIG_HUGETLB_PAGE 486 /* 487 * mfill_atomic processing for HUGETLB vmas. Note that this routine is 488 * called with either vma-lock or mmap_lock held, it will release the lock 489 * before returning. 490 */ 491 static __always_inline ssize_t mfill_atomic_hugetlb( 492 struct userfaultfd_ctx *ctx, 493 struct vm_area_struct *dst_vma, 494 unsigned long dst_start, 495 unsigned long src_start, 496 unsigned long len, 497 uffd_flags_t flags) 498 { 499 struct mm_struct *dst_mm = dst_vma->vm_mm; 500 ssize_t err; 501 pte_t *dst_pte; 502 unsigned long src_addr, dst_addr; 503 long copied; 504 struct folio *folio; 505 unsigned long vma_hpagesize; 506 pgoff_t idx; 507 u32 hash; 508 struct address_space *mapping; 509 510 /* 511 * There is no default zero huge page for all huge page sizes as 512 * supported by hugetlb. A PMD_SIZE huge pages may exist as used 513 * by THP. Since we can not reliably insert a zero page, this 514 * feature is not supported. 515 */ 516 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) { 517 up_read(&ctx->map_changing_lock); 518 uffd_mfill_unlock(dst_vma); 519 return -EINVAL; 520 } 521 522 src_addr = src_start; 523 dst_addr = dst_start; 524 copied = 0; 525 folio = NULL; 526 vma_hpagesize = vma_kernel_pagesize(dst_vma); 527 528 /* 529 * Validate alignment based on huge page size 530 */ 531 err = -EINVAL; 532 if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) 533 goto out_unlock; 534 535 retry: 536 /* 537 * On routine entry dst_vma is set. If we had to drop mmap_lock and 538 * retry, dst_vma will be set to NULL and we must lookup again. 539 */ 540 if (!dst_vma) { 541 dst_vma = uffd_mfill_lock(dst_mm, dst_start, len); 542 if (IS_ERR(dst_vma)) { 543 err = PTR_ERR(dst_vma); 544 goto out; 545 } 546 547 err = -ENOENT; 548 if (!is_vm_hugetlb_page(dst_vma)) 549 goto out_unlock_vma; 550 551 err = -EINVAL; 552 if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) 553 goto out_unlock_vma; 554 555 /* 556 * If memory mappings are changing because of non-cooperative 557 * operation (e.g. mremap) running in parallel, bail out and 558 * request the user to retry later 559 */ 560 down_read(&ctx->map_changing_lock); 561 err = -EAGAIN; 562 if (atomic_read(&ctx->mmap_changing)) 563 goto out_unlock; 564 } 565 566 while (src_addr < src_start + len) { 567 BUG_ON(dst_addr >= dst_start + len); 568 569 /* 570 * Serialize via vma_lock and hugetlb_fault_mutex. 571 * vma_lock ensures the dst_pte remains valid even 572 * in the case of shared pmds. fault mutex prevents 573 * races with other faulting threads. 574 */ 575 idx = linear_page_index(dst_vma, dst_addr); 576 mapping = dst_vma->vm_file->f_mapping; 577 hash = hugetlb_fault_mutex_hash(mapping, idx); 578 mutex_lock(&hugetlb_fault_mutex_table[hash]); 579 hugetlb_vma_lock_read(dst_vma); 580 581 err = -ENOMEM; 582 dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize); 583 if (!dst_pte) { 584 hugetlb_vma_unlock_read(dst_vma); 585 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 586 goto out_unlock; 587 } 588 589 if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) && 590 !huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte))) { 591 err = -EEXIST; 592 hugetlb_vma_unlock_read(dst_vma); 593 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 594 goto out_unlock; 595 } 596 597 err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr, 598 src_addr, flags, &folio); 599 600 hugetlb_vma_unlock_read(dst_vma); 601 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 602 603 cond_resched(); 604 605 if (unlikely(err == -ENOENT)) { 606 up_read(&ctx->map_changing_lock); 607 uffd_mfill_unlock(dst_vma); 608 BUG_ON(!folio); 609 610 err = copy_folio_from_user(folio, 611 (const void __user *)src_addr, true); 612 if (unlikely(err)) { 613 err = -EFAULT; 614 goto out; 615 } 616 617 dst_vma = NULL; 618 goto retry; 619 } else 620 BUG_ON(folio); 621 622 if (!err) { 623 dst_addr += vma_hpagesize; 624 src_addr += vma_hpagesize; 625 copied += vma_hpagesize; 626 627 if (fatal_signal_pending(current)) 628 err = -EINTR; 629 } 630 if (err) 631 break; 632 } 633 634 out_unlock: 635 up_read(&ctx->map_changing_lock); 636 out_unlock_vma: 637 uffd_mfill_unlock(dst_vma); 638 out: 639 if (folio) 640 folio_put(folio); 641 BUG_ON(copied < 0); 642 BUG_ON(err > 0); 643 BUG_ON(!copied && !err); 644 return copied ? copied : err; 645 } 646 #else /* !CONFIG_HUGETLB_PAGE */ 647 /* fail at build time if gcc attempts to use this */ 648 extern ssize_t mfill_atomic_hugetlb(struct userfaultfd_ctx *ctx, 649 struct vm_area_struct *dst_vma, 650 unsigned long dst_start, 651 unsigned long src_start, 652 unsigned long len, 653 uffd_flags_t flags); 654 #endif /* CONFIG_HUGETLB_PAGE */ 655 656 static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd, 657 struct vm_area_struct *dst_vma, 658 unsigned long dst_addr, 659 unsigned long src_addr, 660 uffd_flags_t flags, 661 struct folio **foliop) 662 { 663 ssize_t err; 664 665 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) { 666 return mfill_atomic_pte_continue(dst_pmd, dst_vma, 667 dst_addr, flags); 668 } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { 669 return mfill_atomic_pte_poison(dst_pmd, dst_vma, 670 dst_addr, flags); 671 } 672 673 /* 674 * The normal page fault path for a shmem will invoke the 675 * fault, fill the hole in the file and COW it right away. The 676 * result generates plain anonymous memory. So when we are 677 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll 678 * generate anonymous memory directly without actually filling 679 * the hole. For the MAP_PRIVATE case the robustness check 680 * only happens in the pagetable (to verify it's still none) 681 * and not in the radix tree. 682 */ 683 if (!(dst_vma->vm_flags & VM_SHARED)) { 684 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) 685 err = mfill_atomic_pte_copy(dst_pmd, dst_vma, 686 dst_addr, src_addr, 687 flags, foliop); 688 else 689 err = mfill_atomic_pte_zeropage(dst_pmd, 690 dst_vma, dst_addr); 691 } else { 692 err = shmem_mfill_atomic_pte(dst_pmd, dst_vma, 693 dst_addr, src_addr, 694 flags, foliop); 695 } 696 697 return err; 698 } 699 700 static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx, 701 unsigned long dst_start, 702 unsigned long src_start, 703 unsigned long len, 704 uffd_flags_t flags) 705 { 706 struct mm_struct *dst_mm = ctx->mm; 707 struct vm_area_struct *dst_vma; 708 ssize_t err; 709 pmd_t *dst_pmd; 710 unsigned long src_addr, dst_addr; 711 long copied; 712 struct folio *folio; 713 714 /* 715 * Sanitize the command parameters: 716 */ 717 BUG_ON(dst_start & ~PAGE_MASK); 718 BUG_ON(len & ~PAGE_MASK); 719 720 /* Does the address range wrap, or is the span zero-sized? */ 721 BUG_ON(src_start + len <= src_start); 722 BUG_ON(dst_start + len <= dst_start); 723 724 src_addr = src_start; 725 dst_addr = dst_start; 726 copied = 0; 727 folio = NULL; 728 retry: 729 /* 730 * Make sure the vma is not shared, that the dst range is 731 * both valid and fully within a single existing vma. 732 */ 733 dst_vma = uffd_mfill_lock(dst_mm, dst_start, len); 734 if (IS_ERR(dst_vma)) { 735 err = PTR_ERR(dst_vma); 736 goto out; 737 } 738 739 /* 740 * If memory mappings are changing because of non-cooperative 741 * operation (e.g. mremap) running in parallel, bail out and 742 * request the user to retry later 743 */ 744 down_read(&ctx->map_changing_lock); 745 err = -EAGAIN; 746 if (atomic_read(&ctx->mmap_changing)) 747 goto out_unlock; 748 749 err = -EINVAL; 750 /* 751 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but 752 * it will overwrite vm_ops, so vma_is_anonymous must return false. 753 */ 754 if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && 755 dst_vma->vm_flags & VM_SHARED)) 756 goto out_unlock; 757 758 /* 759 * validate 'mode' now that we know the dst_vma: don't allow 760 * a wrprotect copy if the userfaultfd didn't register as WP. 761 */ 762 if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP)) 763 goto out_unlock; 764 765 /* 766 * If this is a HUGETLB vma, pass off to appropriate routine 767 */ 768 if (is_vm_hugetlb_page(dst_vma)) 769 return mfill_atomic_hugetlb(ctx, dst_vma, dst_start, 770 src_start, len, flags); 771 772 if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) 773 goto out_unlock; 774 if (!vma_is_shmem(dst_vma) && 775 uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) 776 goto out_unlock; 777 778 while (src_addr < src_start + len) { 779 pmd_t dst_pmdval; 780 781 BUG_ON(dst_addr >= dst_start + len); 782 783 dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); 784 if (unlikely(!dst_pmd)) { 785 err = -ENOMEM; 786 break; 787 } 788 789 dst_pmdval = pmdp_get_lockless(dst_pmd); 790 if (unlikely(pmd_none(dst_pmdval)) && 791 unlikely(__pte_alloc(dst_mm, dst_pmd))) { 792 err = -ENOMEM; 793 break; 794 } 795 dst_pmdval = pmdp_get_lockless(dst_pmd); 796 /* 797 * If the dst_pmd is THP don't override it and just be strict. 798 * (This includes the case where the PMD used to be THP and 799 * changed back to none after __pte_alloc().) 800 */ 801 if (unlikely(!pmd_present(dst_pmdval) || pmd_trans_huge(dst_pmdval) || 802 pmd_devmap(dst_pmdval))) { 803 err = -EEXIST; 804 break; 805 } 806 if (unlikely(pmd_bad(dst_pmdval))) { 807 err = -EFAULT; 808 break; 809 } 810 /* 811 * For shmem mappings, khugepaged is allowed to remove page 812 * tables under us; pte_offset_map_lock() will deal with that. 813 */ 814 815 err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, 816 src_addr, flags, &folio); 817 cond_resched(); 818 819 if (unlikely(err == -ENOENT)) { 820 void *kaddr; 821 822 up_read(&ctx->map_changing_lock); 823 uffd_mfill_unlock(dst_vma); 824 BUG_ON(!folio); 825 826 kaddr = kmap_local_folio(folio, 0); 827 err = copy_from_user(kaddr, 828 (const void __user *) src_addr, 829 PAGE_SIZE); 830 kunmap_local(kaddr); 831 if (unlikely(err)) { 832 err = -EFAULT; 833 goto out; 834 } 835 flush_dcache_folio(folio); 836 goto retry; 837 } else 838 BUG_ON(folio); 839 840 if (!err) { 841 dst_addr += PAGE_SIZE; 842 src_addr += PAGE_SIZE; 843 copied += PAGE_SIZE; 844 845 if (fatal_signal_pending(current)) 846 err = -EINTR; 847 } 848 if (err) 849 break; 850 } 851 852 out_unlock: 853 up_read(&ctx->map_changing_lock); 854 uffd_mfill_unlock(dst_vma); 855 out: 856 if (folio) 857 folio_put(folio); 858 BUG_ON(copied < 0); 859 BUG_ON(err > 0); 860 BUG_ON(!copied && !err); 861 return copied ? copied : err; 862 } 863 864 ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start, 865 unsigned long src_start, unsigned long len, 866 uffd_flags_t flags) 867 { 868 return mfill_atomic(ctx, dst_start, src_start, len, 869 uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY)); 870 } 871 872 ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx, 873 unsigned long start, 874 unsigned long len) 875 { 876 return mfill_atomic(ctx, start, 0, len, 877 uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE)); 878 } 879 880 ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long start, 881 unsigned long len, uffd_flags_t flags) 882 { 883 884 /* 885 * A caller might reasonably assume that UFFDIO_CONTINUE contains an 886 * smp_wmb() to ensure that any writes to the about-to-be-mapped page by 887 * the thread doing the UFFDIO_CONTINUE are guaranteed to be visible to 888 * subsequent loads from the page through the newly mapped address range. 889 */ 890 smp_wmb(); 891 892 return mfill_atomic(ctx, start, 0, len, 893 uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE)); 894 } 895 896 ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start, 897 unsigned long len, uffd_flags_t flags) 898 { 899 return mfill_atomic(ctx, start, 0, len, 900 uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON)); 901 } 902 903 long uffd_wp_range(struct vm_area_struct *dst_vma, 904 unsigned long start, unsigned long len, bool enable_wp) 905 { 906 unsigned int mm_cp_flags; 907 struct mmu_gather tlb; 908 long ret; 909 910 VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end, 911 "The address range exceeds VMA boundary.\n"); 912 if (enable_wp) 913 mm_cp_flags = MM_CP_UFFD_WP; 914 else 915 mm_cp_flags = MM_CP_UFFD_WP_RESOLVE; 916 917 /* 918 * vma->vm_page_prot already reflects that uffd-wp is enabled for this 919 * VMA (see userfaultfd_set_vm_flags()) and that all PTEs are supposed 920 * to be write-protected as default whenever protection changes. 921 * Try upgrading write permissions manually. 922 */ 923 if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma)) 924 mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE; 925 tlb_gather_mmu(&tlb, dst_vma->vm_mm); 926 ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags); 927 tlb_finish_mmu(&tlb); 928 929 return ret; 930 } 931 932 int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start, 933 unsigned long len, bool enable_wp) 934 { 935 struct mm_struct *dst_mm = ctx->mm; 936 unsigned long end = start + len; 937 unsigned long _start, _end; 938 struct vm_area_struct *dst_vma; 939 unsigned long page_mask; 940 long err; 941 VMA_ITERATOR(vmi, dst_mm, start); 942 943 /* 944 * Sanitize the command parameters: 945 */ 946 BUG_ON(start & ~PAGE_MASK); 947 BUG_ON(len & ~PAGE_MASK); 948 949 /* Does the address range wrap, or is the span zero-sized? */ 950 BUG_ON(start + len <= start); 951 952 mmap_read_lock(dst_mm); 953 954 /* 955 * If memory mappings are changing because of non-cooperative 956 * operation (e.g. mremap) running in parallel, bail out and 957 * request the user to retry later 958 */ 959 down_read(&ctx->map_changing_lock); 960 err = -EAGAIN; 961 if (atomic_read(&ctx->mmap_changing)) 962 goto out_unlock; 963 964 err = -ENOENT; 965 for_each_vma_range(vmi, dst_vma, end) { 966 967 if (!userfaultfd_wp(dst_vma)) { 968 err = -ENOENT; 969 break; 970 } 971 972 if (is_vm_hugetlb_page(dst_vma)) { 973 err = -EINVAL; 974 page_mask = vma_kernel_pagesize(dst_vma) - 1; 975 if ((start & page_mask) || (len & page_mask)) 976 break; 977 } 978 979 _start = max(dst_vma->vm_start, start); 980 _end = min(dst_vma->vm_end, end); 981 982 err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp); 983 984 /* Return 0 on success, <0 on failures */ 985 if (err < 0) 986 break; 987 err = 0; 988 } 989 out_unlock: 990 up_read(&ctx->map_changing_lock); 991 mmap_read_unlock(dst_mm); 992 return err; 993 } 994 995 996 void double_pt_lock(spinlock_t *ptl1, 997 spinlock_t *ptl2) 998 __acquires(ptl1) 999 __acquires(ptl2) 1000 { 1001 if (ptl1 > ptl2) 1002 swap(ptl1, ptl2); 1003 /* lock in virtual address order to avoid lock inversion */ 1004 spin_lock(ptl1); 1005 if (ptl1 != ptl2) 1006 spin_lock_nested(ptl2, SINGLE_DEPTH_NESTING); 1007 else 1008 __acquire(ptl2); 1009 } 1010 1011 void double_pt_unlock(spinlock_t *ptl1, 1012 spinlock_t *ptl2) 1013 __releases(ptl1) 1014 __releases(ptl2) 1015 { 1016 spin_unlock(ptl1); 1017 if (ptl1 != ptl2) 1018 spin_unlock(ptl2); 1019 else 1020 __release(ptl2); 1021 } 1022 1023 1024 static int move_present_pte(struct mm_struct *mm, 1025 struct vm_area_struct *dst_vma, 1026 struct vm_area_struct *src_vma, 1027 unsigned long dst_addr, unsigned long src_addr, 1028 pte_t *dst_pte, pte_t *src_pte, 1029 pte_t orig_dst_pte, pte_t orig_src_pte, 1030 spinlock_t *dst_ptl, spinlock_t *src_ptl, 1031 struct folio *src_folio) 1032 { 1033 int err = 0; 1034 1035 double_pt_lock(dst_ptl, src_ptl); 1036 1037 if (!pte_same(ptep_get(src_pte), orig_src_pte) || 1038 !pte_same(ptep_get(dst_pte), orig_dst_pte)) { 1039 err = -EAGAIN; 1040 goto out; 1041 } 1042 if (folio_test_large(src_folio) || 1043 folio_maybe_dma_pinned(src_folio) || 1044 !PageAnonExclusive(&src_folio->page)) { 1045 err = -EBUSY; 1046 goto out; 1047 } 1048 1049 orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte); 1050 /* Folio got pinned from under us. Put it back and fail the move. */ 1051 if (folio_maybe_dma_pinned(src_folio)) { 1052 set_pte_at(mm, src_addr, src_pte, orig_src_pte); 1053 err = -EBUSY; 1054 goto out; 1055 } 1056 1057 folio_move_anon_rmap(src_folio, dst_vma); 1058 src_folio->index = linear_page_index(dst_vma, dst_addr); 1059 1060 orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot); 1061 /* Follow mremap() behavior and treat the entry dirty after the move */ 1062 orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma); 1063 1064 set_pte_at(mm, dst_addr, dst_pte, orig_dst_pte); 1065 out: 1066 double_pt_unlock(dst_ptl, src_ptl); 1067 return err; 1068 } 1069 1070 static int move_swap_pte(struct mm_struct *mm, 1071 unsigned long dst_addr, unsigned long src_addr, 1072 pte_t *dst_pte, pte_t *src_pte, 1073 pte_t orig_dst_pte, pte_t orig_src_pte, 1074 spinlock_t *dst_ptl, spinlock_t *src_ptl) 1075 { 1076 if (!pte_swp_exclusive(orig_src_pte)) 1077 return -EBUSY; 1078 1079 double_pt_lock(dst_ptl, src_ptl); 1080 1081 if (!pte_same(ptep_get(src_pte), orig_src_pte) || 1082 !pte_same(ptep_get(dst_pte), orig_dst_pte)) { 1083 double_pt_unlock(dst_ptl, src_ptl); 1084 return -EAGAIN; 1085 } 1086 1087 orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte); 1088 set_pte_at(mm, dst_addr, dst_pte, orig_src_pte); 1089 double_pt_unlock(dst_ptl, src_ptl); 1090 1091 return 0; 1092 } 1093 1094 static int move_zeropage_pte(struct mm_struct *mm, 1095 struct vm_area_struct *dst_vma, 1096 struct vm_area_struct *src_vma, 1097 unsigned long dst_addr, unsigned long src_addr, 1098 pte_t *dst_pte, pte_t *src_pte, 1099 pte_t orig_dst_pte, pte_t orig_src_pte, 1100 spinlock_t *dst_ptl, spinlock_t *src_ptl) 1101 { 1102 pte_t zero_pte; 1103 1104 double_pt_lock(dst_ptl, src_ptl); 1105 if (!pte_same(ptep_get(src_pte), orig_src_pte) || 1106 !pte_same(ptep_get(dst_pte), orig_dst_pte)) { 1107 double_pt_unlock(dst_ptl, src_ptl); 1108 return -EAGAIN; 1109 } 1110 1111 zero_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), 1112 dst_vma->vm_page_prot)); 1113 ptep_clear_flush(src_vma, src_addr, src_pte); 1114 set_pte_at(mm, dst_addr, dst_pte, zero_pte); 1115 double_pt_unlock(dst_ptl, src_ptl); 1116 1117 return 0; 1118 } 1119 1120 1121 /* 1122 * The mmap_lock for reading is held by the caller. Just move the page 1123 * from src_pmd to dst_pmd if possible, and return true if succeeded 1124 * in moving the page. 1125 */ 1126 static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, 1127 struct vm_area_struct *dst_vma, 1128 struct vm_area_struct *src_vma, 1129 unsigned long dst_addr, unsigned long src_addr, 1130 __u64 mode) 1131 { 1132 swp_entry_t entry; 1133 pte_t orig_src_pte, orig_dst_pte; 1134 pte_t src_folio_pte; 1135 spinlock_t *src_ptl, *dst_ptl; 1136 pte_t *src_pte = NULL; 1137 pte_t *dst_pte = NULL; 1138 pmd_t dummy_pmdval; 1139 struct folio *src_folio = NULL; 1140 struct anon_vma *src_anon_vma = NULL; 1141 struct mmu_notifier_range range; 1142 int err = 0; 1143 1144 flush_cache_range(src_vma, src_addr, src_addr + PAGE_SIZE); 1145 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 1146 src_addr, src_addr + PAGE_SIZE); 1147 mmu_notifier_invalidate_range_start(&range); 1148 retry: 1149 /* 1150 * Use the maywrite version to indicate that dst_pte will be modified, 1151 * but since we will use pte_same() to detect the change of the pte 1152 * entry, there is no need to get pmdval, so just pass a dummy variable 1153 * to it. 1154 */ 1155 dst_pte = pte_offset_map_rw_nolock(mm, dst_pmd, dst_addr, &dummy_pmdval, 1156 &dst_ptl); 1157 1158 /* Retry if a huge pmd materialized from under us */ 1159 if (unlikely(!dst_pte)) { 1160 err = -EAGAIN; 1161 goto out; 1162 } 1163 1164 /* same as dst_pte */ 1165 src_pte = pte_offset_map_rw_nolock(mm, src_pmd, src_addr, &dummy_pmdval, 1166 &src_ptl); 1167 1168 /* 1169 * We held the mmap_lock for reading so MADV_DONTNEED 1170 * can zap transparent huge pages under us, or the 1171 * transparent huge page fault can establish new 1172 * transparent huge pages under us. 1173 */ 1174 if (unlikely(!src_pte)) { 1175 err = -EAGAIN; 1176 goto out; 1177 } 1178 1179 /* Sanity checks before the operation */ 1180 if (WARN_ON_ONCE(pmd_none(*dst_pmd)) || WARN_ON_ONCE(pmd_none(*src_pmd)) || 1181 WARN_ON_ONCE(pmd_trans_huge(*dst_pmd)) || WARN_ON_ONCE(pmd_trans_huge(*src_pmd))) { 1182 err = -EINVAL; 1183 goto out; 1184 } 1185 1186 spin_lock(dst_ptl); 1187 orig_dst_pte = ptep_get(dst_pte); 1188 spin_unlock(dst_ptl); 1189 if (!pte_none(orig_dst_pte)) { 1190 err = -EEXIST; 1191 goto out; 1192 } 1193 1194 spin_lock(src_ptl); 1195 orig_src_pte = ptep_get(src_pte); 1196 spin_unlock(src_ptl); 1197 if (pte_none(orig_src_pte)) { 1198 if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) 1199 err = -ENOENT; 1200 else /* nothing to do to move a hole */ 1201 err = 0; 1202 goto out; 1203 } 1204 1205 /* If PTE changed after we locked the folio them start over */ 1206 if (src_folio && unlikely(!pte_same(src_folio_pte, orig_src_pte))) { 1207 err = -EAGAIN; 1208 goto out; 1209 } 1210 1211 if (pte_present(orig_src_pte)) { 1212 if (is_zero_pfn(pte_pfn(orig_src_pte))) { 1213 err = move_zeropage_pte(mm, dst_vma, src_vma, 1214 dst_addr, src_addr, dst_pte, src_pte, 1215 orig_dst_pte, orig_src_pte, 1216 dst_ptl, src_ptl); 1217 goto out; 1218 } 1219 1220 /* 1221 * Pin and lock both source folio and anon_vma. Since we are in 1222 * RCU read section, we can't block, so on contention have to 1223 * unmap the ptes, obtain the lock and retry. 1224 */ 1225 if (!src_folio) { 1226 struct folio *folio; 1227 1228 /* 1229 * Pin the page while holding the lock to be sure the 1230 * page isn't freed under us 1231 */ 1232 spin_lock(src_ptl); 1233 if (!pte_same(orig_src_pte, ptep_get(src_pte))) { 1234 spin_unlock(src_ptl); 1235 err = -EAGAIN; 1236 goto out; 1237 } 1238 1239 folio = vm_normal_folio(src_vma, src_addr, orig_src_pte); 1240 if (!folio || !PageAnonExclusive(&folio->page)) { 1241 spin_unlock(src_ptl); 1242 err = -EBUSY; 1243 goto out; 1244 } 1245 1246 folio_get(folio); 1247 src_folio = folio; 1248 src_folio_pte = orig_src_pte; 1249 spin_unlock(src_ptl); 1250 1251 if (!folio_trylock(src_folio)) { 1252 pte_unmap(&orig_src_pte); 1253 pte_unmap(&orig_dst_pte); 1254 src_pte = dst_pte = NULL; 1255 /* now we can block and wait */ 1256 folio_lock(src_folio); 1257 goto retry; 1258 } 1259 1260 if (WARN_ON_ONCE(!folio_test_anon(src_folio))) { 1261 err = -EBUSY; 1262 goto out; 1263 } 1264 } 1265 1266 /* at this point we have src_folio locked */ 1267 if (folio_test_large(src_folio)) { 1268 /* split_folio() can block */ 1269 pte_unmap(&orig_src_pte); 1270 pte_unmap(&orig_dst_pte); 1271 src_pte = dst_pte = NULL; 1272 err = split_folio(src_folio); 1273 if (err) 1274 goto out; 1275 /* have to reacquire the folio after it got split */ 1276 folio_unlock(src_folio); 1277 folio_put(src_folio); 1278 src_folio = NULL; 1279 goto retry; 1280 } 1281 1282 if (!src_anon_vma) { 1283 /* 1284 * folio_referenced walks the anon_vma chain 1285 * without the folio lock. Serialize against it with 1286 * the anon_vma lock, the folio lock is not enough. 1287 */ 1288 src_anon_vma = folio_get_anon_vma(src_folio); 1289 if (!src_anon_vma) { 1290 /* page was unmapped from under us */ 1291 err = -EAGAIN; 1292 goto out; 1293 } 1294 if (!anon_vma_trylock_write(src_anon_vma)) { 1295 pte_unmap(&orig_src_pte); 1296 pte_unmap(&orig_dst_pte); 1297 src_pte = dst_pte = NULL; 1298 /* now we can block and wait */ 1299 anon_vma_lock_write(src_anon_vma); 1300 goto retry; 1301 } 1302 } 1303 1304 err = move_present_pte(mm, dst_vma, src_vma, 1305 dst_addr, src_addr, dst_pte, src_pte, 1306 orig_dst_pte, orig_src_pte, 1307 dst_ptl, src_ptl, src_folio); 1308 } else { 1309 entry = pte_to_swp_entry(orig_src_pte); 1310 if (non_swap_entry(entry)) { 1311 if (is_migration_entry(entry)) { 1312 pte_unmap(&orig_src_pte); 1313 pte_unmap(&orig_dst_pte); 1314 src_pte = dst_pte = NULL; 1315 migration_entry_wait(mm, src_pmd, src_addr); 1316 err = -EAGAIN; 1317 } else 1318 err = -EFAULT; 1319 goto out; 1320 } 1321 1322 err = move_swap_pte(mm, dst_addr, src_addr, 1323 dst_pte, src_pte, 1324 orig_dst_pte, orig_src_pte, 1325 dst_ptl, src_ptl); 1326 } 1327 1328 out: 1329 if (src_anon_vma) { 1330 anon_vma_unlock_write(src_anon_vma); 1331 put_anon_vma(src_anon_vma); 1332 } 1333 if (src_folio) { 1334 folio_unlock(src_folio); 1335 folio_put(src_folio); 1336 } 1337 if (dst_pte) 1338 pte_unmap(dst_pte); 1339 if (src_pte) 1340 pte_unmap(src_pte); 1341 mmu_notifier_invalidate_range_end(&range); 1342 1343 return err; 1344 } 1345 1346 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1347 static inline bool move_splits_huge_pmd(unsigned long dst_addr, 1348 unsigned long src_addr, 1349 unsigned long src_end) 1350 { 1351 return (src_addr & ~HPAGE_PMD_MASK) || (dst_addr & ~HPAGE_PMD_MASK) || 1352 src_end - src_addr < HPAGE_PMD_SIZE; 1353 } 1354 #else 1355 static inline bool move_splits_huge_pmd(unsigned long dst_addr, 1356 unsigned long src_addr, 1357 unsigned long src_end) 1358 { 1359 /* This is unreachable anyway, just to avoid warnings when HPAGE_PMD_SIZE==0 */ 1360 return false; 1361 } 1362 #endif 1363 1364 static inline bool vma_move_compatible(struct vm_area_struct *vma) 1365 { 1366 return !(vma->vm_flags & (VM_PFNMAP | VM_IO | VM_HUGETLB | 1367 VM_MIXEDMAP | VM_SHADOW_STACK)); 1368 } 1369 1370 static int validate_move_areas(struct userfaultfd_ctx *ctx, 1371 struct vm_area_struct *src_vma, 1372 struct vm_area_struct *dst_vma) 1373 { 1374 /* Only allow moving if both have the same access and protection */ 1375 if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) || 1376 pgprot_val(src_vma->vm_page_prot) != pgprot_val(dst_vma->vm_page_prot)) 1377 return -EINVAL; 1378 1379 /* Only allow moving if both are mlocked or both aren't */ 1380 if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED)) 1381 return -EINVAL; 1382 1383 /* 1384 * For now, we keep it simple and only move between writable VMAs. 1385 * Access flags are equal, therefore cheching only the source is enough. 1386 */ 1387 if (!(src_vma->vm_flags & VM_WRITE)) 1388 return -EINVAL; 1389 1390 /* Check if vma flags indicate content which can be moved */ 1391 if (!vma_move_compatible(src_vma) || !vma_move_compatible(dst_vma)) 1392 return -EINVAL; 1393 1394 /* Ensure dst_vma is registered in uffd we are operating on */ 1395 if (!dst_vma->vm_userfaultfd_ctx.ctx || 1396 dst_vma->vm_userfaultfd_ctx.ctx != ctx) 1397 return -EINVAL; 1398 1399 /* Only allow moving across anonymous vmas */ 1400 if (!vma_is_anonymous(src_vma) || !vma_is_anonymous(dst_vma)) 1401 return -EINVAL; 1402 1403 return 0; 1404 } 1405 1406 static __always_inline 1407 int find_vmas_mm_locked(struct mm_struct *mm, 1408 unsigned long dst_start, 1409 unsigned long src_start, 1410 struct vm_area_struct **dst_vmap, 1411 struct vm_area_struct **src_vmap) 1412 { 1413 struct vm_area_struct *vma; 1414 1415 mmap_assert_locked(mm); 1416 vma = find_vma_and_prepare_anon(mm, dst_start); 1417 if (IS_ERR(vma)) 1418 return PTR_ERR(vma); 1419 1420 *dst_vmap = vma; 1421 /* Skip finding src_vma if src_start is in dst_vma */ 1422 if (src_start >= vma->vm_start && src_start < vma->vm_end) 1423 goto out_success; 1424 1425 vma = vma_lookup(mm, src_start); 1426 if (!vma) 1427 return -ENOENT; 1428 out_success: 1429 *src_vmap = vma; 1430 return 0; 1431 } 1432 1433 #ifdef CONFIG_PER_VMA_LOCK 1434 static int uffd_move_lock(struct mm_struct *mm, 1435 unsigned long dst_start, 1436 unsigned long src_start, 1437 struct vm_area_struct **dst_vmap, 1438 struct vm_area_struct **src_vmap) 1439 { 1440 struct vm_area_struct *vma; 1441 int err; 1442 1443 vma = uffd_lock_vma(mm, dst_start); 1444 if (IS_ERR(vma)) 1445 return PTR_ERR(vma); 1446 1447 *dst_vmap = vma; 1448 /* 1449 * Skip finding src_vma if src_start is in dst_vma. This also ensures 1450 * that we don't lock the same vma twice. 1451 */ 1452 if (src_start >= vma->vm_start && src_start < vma->vm_end) { 1453 *src_vmap = vma; 1454 return 0; 1455 } 1456 1457 /* 1458 * Using uffd_lock_vma() to get src_vma can lead to following deadlock: 1459 * 1460 * Thread1 Thread2 1461 * ------- ------- 1462 * vma_start_read(dst_vma) 1463 * mmap_write_lock(mm) 1464 * vma_start_write(src_vma) 1465 * vma_start_read(src_vma) 1466 * mmap_read_lock(mm) 1467 * vma_start_write(dst_vma) 1468 */ 1469 *src_vmap = lock_vma_under_rcu(mm, src_start); 1470 if (likely(*src_vmap)) 1471 return 0; 1472 1473 /* Undo any locking and retry in mmap_lock critical section */ 1474 vma_end_read(*dst_vmap); 1475 1476 mmap_read_lock(mm); 1477 err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap); 1478 if (!err) { 1479 /* 1480 * See comment in uffd_lock_vma() as to why not using 1481 * vma_start_read() here. 1482 */ 1483 down_read(&(*dst_vmap)->vm_lock->lock); 1484 if (*dst_vmap != *src_vmap) 1485 down_read_nested(&(*src_vmap)->vm_lock->lock, 1486 SINGLE_DEPTH_NESTING); 1487 } 1488 mmap_read_unlock(mm); 1489 return err; 1490 } 1491 1492 static void uffd_move_unlock(struct vm_area_struct *dst_vma, 1493 struct vm_area_struct *src_vma) 1494 { 1495 vma_end_read(src_vma); 1496 if (src_vma != dst_vma) 1497 vma_end_read(dst_vma); 1498 } 1499 1500 #else 1501 1502 static int uffd_move_lock(struct mm_struct *mm, 1503 unsigned long dst_start, 1504 unsigned long src_start, 1505 struct vm_area_struct **dst_vmap, 1506 struct vm_area_struct **src_vmap) 1507 { 1508 int err; 1509 1510 mmap_read_lock(mm); 1511 err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap); 1512 if (err) 1513 mmap_read_unlock(mm); 1514 return err; 1515 } 1516 1517 static void uffd_move_unlock(struct vm_area_struct *dst_vma, 1518 struct vm_area_struct *src_vma) 1519 { 1520 mmap_assert_locked(src_vma->vm_mm); 1521 mmap_read_unlock(dst_vma->vm_mm); 1522 } 1523 #endif 1524 1525 /** 1526 * move_pages - move arbitrary anonymous pages of an existing vma 1527 * @ctx: pointer to the userfaultfd context 1528 * @dst_start: start of the destination virtual memory range 1529 * @src_start: start of the source virtual memory range 1530 * @len: length of the virtual memory range 1531 * @mode: flags from uffdio_move.mode 1532 * 1533 * It will either use the mmap_lock in read mode or per-vma locks 1534 * 1535 * move_pages() remaps arbitrary anonymous pages atomically in zero 1536 * copy. It only works on non shared anonymous pages because those can 1537 * be relocated without generating non linear anon_vmas in the rmap 1538 * code. 1539 * 1540 * It provides a zero copy mechanism to handle userspace page faults. 1541 * The source vma pages should have mapcount == 1, which can be 1542 * enforced by using madvise(MADV_DONTFORK) on src vma. 1543 * 1544 * The thread receiving the page during the userland page fault 1545 * will receive the faulting page in the source vma through the network, 1546 * storage or any other I/O device (MADV_DONTFORK in the source vma 1547 * avoids move_pages() to fail with -EBUSY if the process forks before 1548 * move_pages() is called), then it will call move_pages() to map the 1549 * page in the faulting address in the destination vma. 1550 * 1551 * This userfaultfd command works purely via pagetables, so it's the 1552 * most efficient way to move physical non shared anonymous pages 1553 * across different virtual addresses. Unlike mremap()/mmap()/munmap() 1554 * it does not create any new vmas. The mapping in the destination 1555 * address is atomic. 1556 * 1557 * It only works if the vma protection bits are identical from the 1558 * source and destination vma. 1559 * 1560 * It can remap non shared anonymous pages within the same vma too. 1561 * 1562 * If the source virtual memory range has any unmapped holes, or if 1563 * the destination virtual memory range is not a whole unmapped hole, 1564 * move_pages() will fail respectively with -ENOENT or -EEXIST. This 1565 * provides a very strict behavior to avoid any chance of memory 1566 * corruption going unnoticed if there are userland race conditions. 1567 * Only one thread should resolve the userland page fault at any given 1568 * time for any given faulting address. This means that if two threads 1569 * try to both call move_pages() on the same destination address at the 1570 * same time, the second thread will get an explicit error from this 1571 * command. 1572 * 1573 * The command retval will return "len" is successful. The command 1574 * however can be interrupted by fatal signals or errors. If 1575 * interrupted it will return the number of bytes successfully 1576 * remapped before the interruption if any, or the negative error if 1577 * none. It will never return zero. Either it will return an error or 1578 * an amount of bytes successfully moved. If the retval reports a 1579 * "short" remap, the move_pages() command should be repeated by 1580 * userland with src+retval, dst+reval, len-retval if it wants to know 1581 * about the error that interrupted it. 1582 * 1583 * The UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES flag can be specified to 1584 * prevent -ENOENT errors to materialize if there are holes in the 1585 * source virtual range that is being remapped. The holes will be 1586 * accounted as successfully remapped in the retval of the 1587 * command. This is mostly useful to remap hugepage naturally aligned 1588 * virtual regions without knowing if there are transparent hugepage 1589 * in the regions or not, but preventing the risk of having to split 1590 * the hugepmd during the remap. 1591 * 1592 * If there's any rmap walk that is taking the anon_vma locks without 1593 * first obtaining the folio lock (the only current instance is 1594 * folio_referenced), they will have to verify if the folio->mapping 1595 * has changed after taking the anon_vma lock. If it changed they 1596 * should release the lock and retry obtaining a new anon_vma, because 1597 * it means the anon_vma was changed by move_pages() before the lock 1598 * could be obtained. This is the only additional complexity added to 1599 * the rmap code to provide this anonymous page remapping functionality. 1600 */ 1601 ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start, 1602 unsigned long src_start, unsigned long len, __u64 mode) 1603 { 1604 struct mm_struct *mm = ctx->mm; 1605 struct vm_area_struct *src_vma, *dst_vma; 1606 unsigned long src_addr, dst_addr; 1607 pmd_t *src_pmd, *dst_pmd; 1608 long err = -EINVAL; 1609 ssize_t moved = 0; 1610 1611 /* Sanitize the command parameters. */ 1612 if (WARN_ON_ONCE(src_start & ~PAGE_MASK) || 1613 WARN_ON_ONCE(dst_start & ~PAGE_MASK) || 1614 WARN_ON_ONCE(len & ~PAGE_MASK)) 1615 goto out; 1616 1617 /* Does the address range wrap, or is the span zero-sized? */ 1618 if (WARN_ON_ONCE(src_start + len <= src_start) || 1619 WARN_ON_ONCE(dst_start + len <= dst_start)) 1620 goto out; 1621 1622 err = uffd_move_lock(mm, dst_start, src_start, &dst_vma, &src_vma); 1623 if (err) 1624 goto out; 1625 1626 /* Re-check after taking map_changing_lock */ 1627 err = -EAGAIN; 1628 down_read(&ctx->map_changing_lock); 1629 if (likely(atomic_read(&ctx->mmap_changing))) 1630 goto out_unlock; 1631 /* 1632 * Make sure the vma is not shared, that the src and dst remap 1633 * ranges are both valid and fully within a single existing 1634 * vma. 1635 */ 1636 err = -EINVAL; 1637 if (src_vma->vm_flags & VM_SHARED) 1638 goto out_unlock; 1639 if (src_start + len > src_vma->vm_end) 1640 goto out_unlock; 1641 1642 if (dst_vma->vm_flags & VM_SHARED) 1643 goto out_unlock; 1644 if (dst_start + len > dst_vma->vm_end) 1645 goto out_unlock; 1646 1647 err = validate_move_areas(ctx, src_vma, dst_vma); 1648 if (err) 1649 goto out_unlock; 1650 1651 for (src_addr = src_start, dst_addr = dst_start; 1652 src_addr < src_start + len;) { 1653 spinlock_t *ptl; 1654 pmd_t dst_pmdval; 1655 unsigned long step_size; 1656 1657 /* 1658 * Below works because anonymous area would not have a 1659 * transparent huge PUD. If file-backed support is added, 1660 * that case would need to be handled here. 1661 */ 1662 src_pmd = mm_find_pmd(mm, src_addr); 1663 if (unlikely(!src_pmd)) { 1664 if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) { 1665 err = -ENOENT; 1666 break; 1667 } 1668 src_pmd = mm_alloc_pmd(mm, src_addr); 1669 if (unlikely(!src_pmd)) { 1670 err = -ENOMEM; 1671 break; 1672 } 1673 } 1674 dst_pmd = mm_alloc_pmd(mm, dst_addr); 1675 if (unlikely(!dst_pmd)) { 1676 err = -ENOMEM; 1677 break; 1678 } 1679 1680 dst_pmdval = pmdp_get_lockless(dst_pmd); 1681 /* 1682 * If the dst_pmd is mapped as THP don't override it and just 1683 * be strict. If dst_pmd changes into TPH after this check, the 1684 * move_pages_huge_pmd() will detect the change and retry 1685 * while move_pages_pte() will detect the change and fail. 1686 */ 1687 if (unlikely(pmd_trans_huge(dst_pmdval))) { 1688 err = -EEXIST; 1689 break; 1690 } 1691 1692 ptl = pmd_trans_huge_lock(src_pmd, src_vma); 1693 if (ptl) { 1694 if (pmd_devmap(*src_pmd)) { 1695 spin_unlock(ptl); 1696 err = -ENOENT; 1697 break; 1698 } 1699 1700 /* Check if we can move the pmd without splitting it. */ 1701 if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) || 1702 !pmd_none(dst_pmdval)) { 1703 struct folio *folio = pmd_folio(*src_pmd); 1704 1705 if (!folio || (!is_huge_zero_folio(folio) && 1706 !PageAnonExclusive(&folio->page))) { 1707 spin_unlock(ptl); 1708 err = -EBUSY; 1709 break; 1710 } 1711 1712 spin_unlock(ptl); 1713 split_huge_pmd(src_vma, src_pmd, src_addr); 1714 /* The folio will be split by move_pages_pte() */ 1715 continue; 1716 } 1717 1718 err = move_pages_huge_pmd(mm, dst_pmd, src_pmd, 1719 dst_pmdval, dst_vma, src_vma, 1720 dst_addr, src_addr); 1721 step_size = HPAGE_PMD_SIZE; 1722 } else { 1723 if (pmd_none(*src_pmd)) { 1724 if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) { 1725 err = -ENOENT; 1726 break; 1727 } 1728 if (unlikely(__pte_alloc(mm, src_pmd))) { 1729 err = -ENOMEM; 1730 break; 1731 } 1732 } 1733 1734 if (unlikely(pte_alloc(mm, dst_pmd))) { 1735 err = -ENOMEM; 1736 break; 1737 } 1738 1739 err = move_pages_pte(mm, dst_pmd, src_pmd, 1740 dst_vma, src_vma, 1741 dst_addr, src_addr, mode); 1742 step_size = PAGE_SIZE; 1743 } 1744 1745 cond_resched(); 1746 1747 if (fatal_signal_pending(current)) { 1748 /* Do not override an error */ 1749 if (!err || err == -EAGAIN) 1750 err = -EINTR; 1751 break; 1752 } 1753 1754 if (err) { 1755 if (err == -EAGAIN) 1756 continue; 1757 break; 1758 } 1759 1760 /* Proceed to the next page */ 1761 dst_addr += step_size; 1762 src_addr += step_size; 1763 moved += step_size; 1764 } 1765 1766 out_unlock: 1767 up_read(&ctx->map_changing_lock); 1768 uffd_move_unlock(dst_vma, src_vma); 1769 out: 1770 VM_WARN_ON(moved < 0); 1771 VM_WARN_ON(err > 0); 1772 VM_WARN_ON(!moved && !err); 1773 return moved ? moved : err; 1774 } 1775 1776 static void userfaultfd_set_vm_flags(struct vm_area_struct *vma, 1777 vm_flags_t flags) 1778 { 1779 const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP; 1780 1781 vm_flags_reset(vma, flags); 1782 /* 1783 * For shared mappings, we want to enable writenotify while 1784 * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply 1785 * recalculate vma->vm_page_prot whenever userfaultfd-wp changes. 1786 */ 1787 if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed) 1788 vma_set_page_prot(vma); 1789 } 1790 1791 static void userfaultfd_set_ctx(struct vm_area_struct *vma, 1792 struct userfaultfd_ctx *ctx, 1793 unsigned long flags) 1794 { 1795 vma_start_write(vma); 1796 vma->vm_userfaultfd_ctx = (struct vm_userfaultfd_ctx){ctx}; 1797 userfaultfd_set_vm_flags(vma, 1798 (vma->vm_flags & ~__VM_UFFD_FLAGS) | flags); 1799 } 1800 1801 void userfaultfd_reset_ctx(struct vm_area_struct *vma) 1802 { 1803 userfaultfd_set_ctx(vma, NULL, 0); 1804 } 1805 1806 struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi, 1807 struct vm_area_struct *prev, 1808 struct vm_area_struct *vma, 1809 unsigned long start, 1810 unsigned long end) 1811 { 1812 struct vm_area_struct *ret; 1813 1814 /* Reset ptes for the whole vma range if wr-protected */ 1815 if (userfaultfd_wp(vma)) 1816 uffd_wp_range(vma, start, end - start, false); 1817 1818 ret = vma_modify_flags_uffd(vmi, prev, vma, start, end, 1819 vma->vm_flags & ~__VM_UFFD_FLAGS, 1820 NULL_VM_UFFD_CTX); 1821 1822 /* 1823 * In the vma_merge() successful mprotect-like case 8: 1824 * the next vma was merged into the current one and 1825 * the current one has not been updated yet. 1826 */ 1827 if (!IS_ERR(ret)) 1828 userfaultfd_reset_ctx(ret); 1829 1830 return ret; 1831 } 1832 1833 /* Assumes mmap write lock taken, and mm_struct pinned. */ 1834 int userfaultfd_register_range(struct userfaultfd_ctx *ctx, 1835 struct vm_area_struct *vma, 1836 unsigned long vm_flags, 1837 unsigned long start, unsigned long end, 1838 bool wp_async) 1839 { 1840 VMA_ITERATOR(vmi, ctx->mm, start); 1841 struct vm_area_struct *prev = vma_prev(&vmi); 1842 unsigned long vma_end; 1843 unsigned long new_flags; 1844 1845 if (vma->vm_start < start) 1846 prev = vma; 1847 1848 for_each_vma_range(vmi, vma, end) { 1849 cond_resched(); 1850 1851 BUG_ON(!vma_can_userfault(vma, vm_flags, wp_async)); 1852 BUG_ON(vma->vm_userfaultfd_ctx.ctx && 1853 vma->vm_userfaultfd_ctx.ctx != ctx); 1854 WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); 1855 1856 /* 1857 * Nothing to do: this vma is already registered into this 1858 * userfaultfd and with the right tracking mode too. 1859 */ 1860 if (vma->vm_userfaultfd_ctx.ctx == ctx && 1861 (vma->vm_flags & vm_flags) == vm_flags) 1862 goto skip; 1863 1864 if (vma->vm_start > start) 1865 start = vma->vm_start; 1866 vma_end = min(end, vma->vm_end); 1867 1868 new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags; 1869 vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end, 1870 new_flags, 1871 (struct vm_userfaultfd_ctx){ctx}); 1872 if (IS_ERR(vma)) 1873 return PTR_ERR(vma); 1874 1875 /* 1876 * In the vma_merge() successful mprotect-like case 8: 1877 * the next vma was merged into the current one and 1878 * the current one has not been updated yet. 1879 */ 1880 userfaultfd_set_ctx(vma, ctx, vm_flags); 1881 1882 if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma)) 1883 hugetlb_unshare_all_pmds(vma); 1884 1885 skip: 1886 prev = vma; 1887 start = vma->vm_end; 1888 } 1889 1890 return 0; 1891 } 1892 1893 void userfaultfd_release_new(struct userfaultfd_ctx *ctx) 1894 { 1895 struct mm_struct *mm = ctx->mm; 1896 struct vm_area_struct *vma; 1897 VMA_ITERATOR(vmi, mm, 0); 1898 1899 /* the various vma->vm_userfaultfd_ctx still points to it */ 1900 mmap_write_lock(mm); 1901 for_each_vma(vmi, vma) { 1902 if (vma->vm_userfaultfd_ctx.ctx == ctx) 1903 userfaultfd_reset_ctx(vma); 1904 } 1905 mmap_write_unlock(mm); 1906 } 1907 1908 void userfaultfd_release_all(struct mm_struct *mm, 1909 struct userfaultfd_ctx *ctx) 1910 { 1911 struct vm_area_struct *vma, *prev; 1912 VMA_ITERATOR(vmi, mm, 0); 1913 1914 if (!mmget_not_zero(mm)) 1915 return; 1916 1917 /* 1918 * Flush page faults out of all CPUs. NOTE: all page faults 1919 * must be retried without returning VM_FAULT_SIGBUS if 1920 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx 1921 * changes while handle_userfault released the mmap_lock. So 1922 * it's critical that released is set to true (above), before 1923 * taking the mmap_lock for writing. 1924 */ 1925 mmap_write_lock(mm); 1926 prev = NULL; 1927 for_each_vma(vmi, vma) { 1928 cond_resched(); 1929 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ 1930 !!(vma->vm_flags & __VM_UFFD_FLAGS)); 1931 if (vma->vm_userfaultfd_ctx.ctx != ctx) { 1932 prev = vma; 1933 continue; 1934 } 1935 1936 vma = userfaultfd_clear_vma(&vmi, prev, vma, 1937 vma->vm_start, vma->vm_end); 1938 prev = vma; 1939 } 1940 mmap_write_unlock(mm); 1941 mmput(mm); 1942 } 1943