Lines Matching +full:wait +full:- +full:retry +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0-only
27 if (dst_end > dst_vma->vm_end) in validate_dst_vma()
35 if (!dst_vma->vm_userfaultfd_ctx.ctx) in validate_dst_vma()
50 vma = ERR_PTR(-ENOENT); in find_vma_and_prepare_anon()
51 else if (!(vma->vm_flags & VM_SHARED) && in find_vma_and_prepare_anon()
53 vma = ERR_PTR(-ENOMEM); in find_vma_and_prepare_anon()
60 * uffd_lock_vma() - Lookup and lock vma corresponding to @address.
66 * Return: A locked vma containing @address, -ENOENT if no vma is found, or
67 * -ENOMEM if anon_vma couldn't be allocated.
80 if (!(vma->vm_flags & VM_SHARED) && unlikely(!vma->anon_vma)) in uffd_lock_vma()
94 * vma for write (vma_start_write()) under us. in uffd_lock_vma()
96 down_read(&vma->vm_lock->lock); in uffd_lock_vma()
114 return ERR_PTR(-ENOENT); in uffd_mfill_lock()
138 dst_vma = ERR_PTR(-ENOENT); in uffd_mfill_lock()
146 mmap_read_unlock(vma->vm_mm); in uffd_mfill_unlock()
157 if (!dst_vma->vm_file) in mfill_file_over_size()
160 inode = dst_vma->vm_file->f_inode; in mfill_file_over_size()
178 struct mm_struct *dst_mm = dst_vma->vm_mm; in mfill_atomic_install_pte()
180 bool writable = dst_vma->vm_flags & VM_WRITE; in mfill_atomic_install_pte()
181 bool vm_shared = dst_vma->vm_flags & VM_SHARED; in mfill_atomic_install_pte()
186 _dst_pte = mk_pte(page, dst_vma->vm_page_prot); in mfill_atomic_install_pte()
195 ret = -EAGAIN; in mfill_atomic_install_pte()
201 ret = -EFAULT; in mfill_atomic_install_pte()
205 ret = -EEXIST; in mfill_atomic_install_pte()
208 * registered, we firstly wr-protect a none pte which has no page cache in mfill_atomic_install_pte()
232 /* No need to invalidate - it was non-present before */ in mfill_atomic_install_pte()
253 ret = -ENOMEM; in mfill_atomic_pte_copy()
273 * and retry the copy outside the mmap_lock. in mfill_atomic_pte_copy()
283 ret = -ENOENT; in mfill_atomic_pte_copy()
302 ret = -ENOMEM; in mfill_atomic_pte_copy()
303 if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL)) in mfill_atomic_pte_copy()
307 &folio->page, true, flags); in mfill_atomic_pte_copy()
322 int ret = -ENOMEM; in mfill_atomic_pte_zeroed_folio()
328 if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL)) in mfill_atomic_pte_zeroed_folio()
339 &folio->page, true, 0); in mfill_atomic_pte_zeroed_folio()
357 if (mm_forbids_zeropage(dst_vma->vm_mm)) in mfill_atomic_pte_zeropage()
361 dst_vma->vm_page_prot)); in mfill_atomic_pte_zeropage()
362 ret = -EAGAIN; in mfill_atomic_pte_zeropage()
363 dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl); in mfill_atomic_pte_zeropage()
367 ret = -EFAULT; in mfill_atomic_pte_zeropage()
370 ret = -EEXIST; in mfill_atomic_pte_zeropage()
373 set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte); in mfill_atomic_pte_zeropage()
374 /* No need to invalidate - it was non-present before */ in mfill_atomic_pte_zeropage()
389 struct inode *inode = file_inode(dst_vma->vm_file); in mfill_atomic_pte_continue()
396 /* Our caller expects us to return -EFAULT if we failed to find folio */ in mfill_atomic_pte_continue()
397 if (ret == -ENOENT) in mfill_atomic_pte_continue()
398 ret = -EFAULT; in mfill_atomic_pte_continue()
402 ret = -EFAULT; in mfill_atomic_pte_continue()
408 ret = -EIO; in mfill_atomic_pte_continue()
427 /* Handles UFFDIO_POISON for all non-hugetlb VMAs. */
434 struct mm_struct *dst_mm = dst_vma->vm_mm; in mfill_atomic_pte_poison()
439 ret = -EAGAIN; in mfill_atomic_pte_poison()
445 ret = -EFAULT; in mfill_atomic_pte_poison()
449 ret = -EEXIST; in mfill_atomic_pte_poison()
456 /* No need to invalidate - it was non-present before */ in mfill_atomic_pte_poison()
489 * called with either vma-lock or mmap_lock held, it will release the lock
500 struct mm_struct *dst_mm = dst_vma->vm_mm; in mfill_atomic_hugetlb()
518 up_read(&ctx->map_changing_lock); in mfill_atomic_hugetlb()
520 return -EINVAL; in mfill_atomic_hugetlb()
532 err = -EINVAL; in mfill_atomic_hugetlb()
533 if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) in mfill_atomic_hugetlb()
536 retry: in mfill_atomic_hugetlb()
539 * retry, dst_vma will be set to NULL and we must lookup again. in mfill_atomic_hugetlb()
548 err = -ENOENT; in mfill_atomic_hugetlb()
552 err = -EINVAL; in mfill_atomic_hugetlb()
557 * If memory mappings are changing because of non-cooperative in mfill_atomic_hugetlb()
559 * request the user to retry later in mfill_atomic_hugetlb()
561 down_read(&ctx->map_changing_lock); in mfill_atomic_hugetlb()
562 err = -EAGAIN; in mfill_atomic_hugetlb()
563 if (atomic_read(&ctx->mmap_changing)) in mfill_atomic_hugetlb()
577 mapping = dst_vma->vm_file->f_mapping; in mfill_atomic_hugetlb()
582 err = -ENOMEM; in mfill_atomic_hugetlb()
592 err = -EEXIST; in mfill_atomic_hugetlb()
606 if (unlikely(err == -ENOENT)) { in mfill_atomic_hugetlb()
607 up_read(&ctx->map_changing_lock); in mfill_atomic_hugetlb()
614 err = -EFAULT; in mfill_atomic_hugetlb()
619 goto retry; in mfill_atomic_hugetlb()
629 err = -EINTR; in mfill_atomic_hugetlb()
636 up_read(&ctx->map_changing_lock); in mfill_atomic_hugetlb()
684 if (!(dst_vma->vm_flags & VM_SHARED)) { in mfill_atomic_pte()
707 struct mm_struct *dst_mm = ctx->mm; in mfill_atomic()
721 /* Does the address range wrap, or is the span zero-sized? */ in mfill_atomic()
729 retry: in mfill_atomic()
741 * If memory mappings are changing because of non-cooperative in mfill_atomic()
743 * request the user to retry later in mfill_atomic()
745 down_read(&ctx->map_changing_lock); in mfill_atomic()
746 err = -EAGAIN; in mfill_atomic()
747 if (atomic_read(&ctx->mmap_changing)) in mfill_atomic()
750 err = -EINVAL; in mfill_atomic()
756 dst_vma->vm_flags & VM_SHARED)) in mfill_atomic()
763 if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP)) in mfill_atomic()
786 err = -ENOMEM; in mfill_atomic()
793 err = -ENOMEM; in mfill_atomic()
804 err = -EEXIST; in mfill_atomic()
808 err = -EFAULT; in mfill_atomic()
813 * tables under us; pte_offset_map_lock() will deal with that. in mfill_atomic()
820 if (unlikely(err == -ENOENT)) { in mfill_atomic()
823 up_read(&ctx->map_changing_lock); in mfill_atomic()
833 err = -EFAULT; in mfill_atomic()
837 goto retry; in mfill_atomic()
847 err = -EINTR; in mfill_atomic()
854 up_read(&ctx->map_changing_lock); in mfill_atomic()
887 * smp_wmb() to ensure that any writes to the about-to-be-mapped page by in mfill_atomic_continue()
911 VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end, in uffd_wp_range()
919 * vma->vm_page_prot already reflects that uffd-wp is enabled for this in uffd_wp_range()
921 * to be write-protected as default whenever protection changes. in uffd_wp_range()
926 tlb_gather_mmu(&tlb, dst_vma->vm_mm); in uffd_wp_range()
936 struct mm_struct *dst_mm = ctx->mm; in mwriteprotect_range()
950 /* Does the address range wrap, or is the span zero-sized? */ in mwriteprotect_range()
956 * If memory mappings are changing because of non-cooperative in mwriteprotect_range()
958 * request the user to retry later in mwriteprotect_range()
960 down_read(&ctx->map_changing_lock); in mwriteprotect_range()
961 err = -EAGAIN; in mwriteprotect_range()
962 if (atomic_read(&ctx->mmap_changing)) in mwriteprotect_range()
965 err = -ENOENT; in mwriteprotect_range()
969 err = -ENOENT; in mwriteprotect_range()
974 err = -EINVAL; in mwriteprotect_range()
975 page_mask = vma_kernel_pagesize(dst_vma) - 1; in mwriteprotect_range()
980 _start = max(dst_vma->vm_start, start); in mwriteprotect_range()
981 _end = min(dst_vma->vm_end, end); in mwriteprotect_range()
983 err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp); in mwriteprotect_range()
991 up_read(&ctx->map_changing_lock); in mwriteprotect_range()
1049 err = -EAGAIN; in move_present_pte()
1054 !PageAnonExclusive(&src_folio->page)) { in move_present_pte()
1055 err = -EBUSY; in move_present_pte()
1060 /* Folio got pinned from under us. Put it back and fail the move. */ in move_present_pte()
1063 err = -EBUSY; in move_present_pte()
1068 src_folio->index = linear_page_index(dst_vma, dst_addr); in move_present_pte()
1070 orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot); in move_present_pte()
1093 return -EAGAIN; in move_swap_pte()
1098 * index and mapping to align with the dst_vma, where a swap-in may in move_swap_pte()
1103 src_folio->index = linear_page_index(dst_vma, dst_addr); in move_swap_pte()
1128 return -EAGAIN; in move_zeropage_pte()
1132 dst_vma->vm_page_prot)); in move_zeropage_pte()
1170 retry: in move_pages_pte()
1180 /* Retry if a huge pmd materialized from under us */ in move_pages_pte()
1182 err = -EAGAIN; in move_pages_pte()
1196 * can zap transparent huge pages under us, or the in move_pages_pte()
1198 * transparent huge pages under us. in move_pages_pte()
1201 err = -EAGAIN; in move_pages_pte()
1208 err = -EINVAL; in move_pages_pte()
1216 err = -EEXIST; in move_pages_pte()
1225 err = -ENOENT; in move_pages_pte()
1233 err = -EAGAIN; in move_pages_pte()
1249 * unmap the ptes, obtain the lock and retry. in move_pages_pte()
1257 * page isn't freed under us in move_pages_pte()
1262 err = -EAGAIN; in move_pages_pte()
1267 if (!folio || !PageAnonExclusive(&folio->page)) { in move_pages_pte()
1269 err = -EBUSY; in move_pages_pte()
1283 err = -EAGAIN; in move_pages_pte()
1296 /* now we can block and wait */ in move_pages_pte()
1298 goto retry; in move_pages_pte()
1302 err = -EBUSY; in move_pages_pte()
1320 goto retry; in move_pages_pte()
1331 /* page was unmapped from under us */ in move_pages_pte()
1332 err = -EAGAIN; in move_pages_pte()
1339 /* now we can block and wait */ in move_pages_pte()
1341 goto retry; in move_pages_pte()
1359 err = -EAGAIN; in move_pages_pte()
1361 err = -EFAULT; in move_pages_pte()
1366 err = -EBUSY; in move_pages_pte()
1372 err = -EAGAIN; in move_pages_pte()
1382 * For large folios, return -EBUSY immediately, as split_folio() in move_pages_pte()
1383 * also returns -EBUSY when attempting to split unmapped large in move_pages_pte()
1392 err = -EBUSY; in move_pages_pte()
1404 /* now we can block and wait */ in move_pages_pte()
1406 goto retry; in move_pages_pte()
1440 src_end - src_addr < HPAGE_PMD_SIZE; in move_splits_huge_pmd()
1454 return !(vma->vm_flags & (VM_PFNMAP | VM_IO | VM_HUGETLB | in vma_move_compatible()
1463 if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) || in validate_move_areas()
1464 pgprot_val(src_vma->vm_page_prot) != pgprot_val(dst_vma->vm_page_prot)) in validate_move_areas()
1465 return -EINVAL; in validate_move_areas()
1468 if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED)) in validate_move_areas()
1469 return -EINVAL; in validate_move_areas()
1475 if (!(src_vma->vm_flags & VM_WRITE)) in validate_move_areas()
1476 return -EINVAL; in validate_move_areas()
1480 return -EINVAL; in validate_move_areas()
1483 if (!dst_vma->vm_userfaultfd_ctx.ctx || in validate_move_areas()
1484 dst_vma->vm_userfaultfd_ctx.ctx != ctx) in validate_move_areas()
1485 return -EINVAL; in validate_move_areas()
1489 return -EINVAL; in validate_move_areas()
1510 if (src_start >= vma->vm_start && src_start < vma->vm_end) in find_vmas_mm_locked()
1515 return -ENOENT; in find_vmas_mm_locked()
1540 if (src_start >= vma->vm_start && src_start < vma->vm_end) { in uffd_move_lock()
1549 * ------- ------- in uffd_move_lock()
1561 /* Undo any locking and retry in mmap_lock critical section */ in uffd_move_lock()
1571 down_read(&(*dst_vmap)->vm_lock->lock); in uffd_move_lock()
1573 down_read_nested(&(*src_vmap)->vm_lock->lock, in uffd_move_lock()
1608 mmap_assert_locked(src_vma->vm_mm); in uffd_move_unlock()
1609 mmap_read_unlock(dst_vma->vm_mm); in uffd_move_unlock()
1614 * move_pages - move arbitrary anonymous pages of an existing vma
1621 * It will either use the mmap_lock in read mode or per-vma locks
1635 * avoids move_pages() to fail with -EBUSY if the process forks before
1652 * move_pages() will fail respectively with -ENOENT or -EEXIST. This
1668 * userland with src+retval, dst+reval, len-retval if it wants to know
1672 * prevent -ENOENT errors to materialize if there are holes in the
1682 * folio_referenced), they will have to verify if the folio->mapping
1684 * should release the lock and retry obtaining a new anon_vma, because
1692 struct mm_struct *mm = ctx->mm; in move_pages()
1696 long err = -EINVAL; in move_pages()
1705 /* Does the address range wrap, or is the span zero-sized? */ in move_pages()
1714 /* Re-check after taking map_changing_lock */ in move_pages()
1715 err = -EAGAIN; in move_pages()
1716 down_read(&ctx->map_changing_lock); in move_pages()
1717 if (likely(atomic_read(&ctx->mmap_changing))) in move_pages()
1724 err = -EINVAL; in move_pages()
1725 if (src_vma->vm_flags & VM_SHARED) in move_pages()
1727 if (src_start + len > src_vma->vm_end) in move_pages()
1730 if (dst_vma->vm_flags & VM_SHARED) in move_pages()
1732 if (dst_start + len > dst_vma->vm_end) in move_pages()
1747 * transparent huge PUD. If file-backed support is added, in move_pages()
1753 err = -ENOENT; in move_pages()
1758 err = -ENOMEM; in move_pages()
1764 err = -ENOMEM; in move_pages()
1772 * move_pages_huge_pmd() will detect the change and retry in move_pages()
1776 err = -EEXIST; in move_pages()
1784 err = -ENOENT; in move_pages()
1794 !PageAnonExclusive(&folio->page))) { in move_pages()
1796 err = -EBUSY; in move_pages()
1813 err = -ENOENT; in move_pages()
1817 err = -ENOMEM; in move_pages()
1823 err = -ENOMEM; in move_pages()
1837 if (!err || err == -EAGAIN) in move_pages()
1838 err = -EINTR; in move_pages()
1843 if (err == -EAGAIN) in move_pages()
1855 up_read(&ctx->map_changing_lock); in move_pages()
1867 const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP; in userfaultfd_set_vm_flags()
1872 * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply in userfaultfd_set_vm_flags()
1873 * recalculate vma->vm_page_prot whenever userfaultfd-wp changes. in userfaultfd_set_vm_flags()
1875 if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed) in userfaultfd_set_vm_flags()
1884 vma->vm_userfaultfd_ctx = (struct vm_userfaultfd_ctx){ctx}; in userfaultfd_set_ctx()
1886 (vma->vm_flags & ~__VM_UFFD_FLAGS) | flags); in userfaultfd_set_ctx()
1902 /* Reset ptes for the whole vma range if wr-protected */ in userfaultfd_clear_vma()
1904 uffd_wp_range(vma, start, end - start, false); in userfaultfd_clear_vma()
1907 vma->vm_flags & ~__VM_UFFD_FLAGS, in userfaultfd_clear_vma()
1911 * In the vma_merge() successful mprotect-like case 8: in userfaultfd_clear_vma()
1928 VMA_ITERATOR(vmi, ctx->mm, start); in userfaultfd_register_range()
1933 if (vma->vm_start < start) in userfaultfd_register_range()
1940 BUG_ON(vma->vm_userfaultfd_ctx.ctx && in userfaultfd_register_range()
1941 vma->vm_userfaultfd_ctx.ctx != ctx); in userfaultfd_register_range()
1942 WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); in userfaultfd_register_range()
1948 if (vma->vm_userfaultfd_ctx.ctx == ctx && in userfaultfd_register_range()
1949 (vma->vm_flags & vm_flags) == vm_flags) in userfaultfd_register_range()
1952 if (vma->vm_start > start) in userfaultfd_register_range()
1953 start = vma->vm_start; in userfaultfd_register_range()
1954 vma_end = min(end, vma->vm_end); in userfaultfd_register_range()
1956 new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags; in userfaultfd_register_range()
1964 * In the vma_merge() successful mprotect-like case 8: in userfaultfd_register_range()
1975 start = vma->vm_end; in userfaultfd_register_range()
1983 struct mm_struct *mm = ctx->mm; in userfaultfd_release_new()
1987 /* the various vma->vm_userfaultfd_ctx still points to it */ in userfaultfd_release_new()
1990 if (vma->vm_userfaultfd_ctx.ctx == ctx) in userfaultfd_release_new()
2008 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx in userfaultfd_release_all()
2017 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ in userfaultfd_release_all()
2018 !!(vma->vm_flags & __VM_UFFD_FLAGS)); in userfaultfd_release_all()
2019 if (vma->vm_userfaultfd_ctx.ctx != ctx) { in userfaultfd_release_all()
2025 vma->vm_start, vma->vm_end); in userfaultfd_release_all()