1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * fs/userfaultfd.c 4 * 5 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> 6 * Copyright (C) 2008-2009 Red Hat, Inc. 7 * Copyright (C) 2015 Red Hat, Inc. 8 * 9 * Some part derived from fs/eventfd.c (anon inode setup) and 10 * mm/ksm.c (mm hashing). 11 */ 12 13 #include <linux/list.h> 14 #include <linux/hashtable.h> 15 #include <linux/sched/signal.h> 16 #include <linux/sched/mm.h> 17 #include <linux/mm.h> 18 #include <linux/mm_inline.h> 19 #include <linux/mmu_notifier.h> 20 #include <linux/poll.h> 21 #include <linux/slab.h> 22 #include <linux/seq_file.h> 23 #include <linux/file.h> 24 #include <linux/bug.h> 25 #include <linux/anon_inodes.h> 26 #include <linux/syscalls.h> 27 #include <linux/userfaultfd_k.h> 28 #include <linux/mempolicy.h> 29 #include <linux/ioctl.h> 30 #include <linux/security.h> 31 #include <linux/hugetlb.h> 32 #include <linux/swapops.h> 33 #include <linux/miscdevice.h> 34 35 static int sysctl_unprivileged_userfaultfd __read_mostly; 36 37 #ifdef CONFIG_SYSCTL 38 static struct ctl_table vm_userfaultfd_table[] = { 39 { 40 .procname = "unprivileged_userfaultfd", 41 .data = &sysctl_unprivileged_userfaultfd, 42 .maxlen = sizeof(sysctl_unprivileged_userfaultfd), 43 .mode = 0644, 44 .proc_handler = proc_dointvec_minmax, 45 .extra1 = SYSCTL_ZERO, 46 .extra2 = SYSCTL_ONE, 47 }, 48 }; 49 #endif 50 51 static struct kmem_cache *userfaultfd_ctx_cachep __ro_after_init; 52 53 struct userfaultfd_fork_ctx { 54 struct userfaultfd_ctx *orig; 55 struct userfaultfd_ctx *new; 56 struct list_head list; 57 }; 58 59 struct userfaultfd_unmap_ctx { 60 struct userfaultfd_ctx *ctx; 61 unsigned long start; 62 unsigned long end; 63 struct list_head list; 64 }; 65 66 struct userfaultfd_wait_queue { 67 struct uffd_msg msg; 68 wait_queue_entry_t wq; 69 struct userfaultfd_ctx *ctx; 70 bool waken; 71 }; 72 73 struct userfaultfd_wake_range { 74 unsigned long start; 75 unsigned long len; 76 }; 77 78 /* internal indication that UFFD_API ioctl was successfully executed */ 79 #define UFFD_FEATURE_INITIALIZED (1u << 31) 80 81 static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx) 82 { 83 return ctx->features & UFFD_FEATURE_INITIALIZED; 84 } 85 86 static bool userfaultfd_wp_async_ctx(struct userfaultfd_ctx *ctx) 87 { 88 return ctx && (ctx->features & UFFD_FEATURE_WP_ASYNC); 89 } 90 91 /* 92 * Whether WP_UNPOPULATED is enabled on the uffd context. It is only 93 * meaningful when userfaultfd_wp()==true on the vma and when it's 94 * anonymous. 95 */ 96 bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma) 97 { 98 struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; 99 100 if (!ctx) 101 return false; 102 103 return ctx->features & UFFD_FEATURE_WP_UNPOPULATED; 104 } 105 106 static void userfaultfd_set_vm_flags(struct vm_area_struct *vma, 107 vm_flags_t flags) 108 { 109 const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP; 110 111 vm_flags_reset(vma, flags); 112 /* 113 * For shared mappings, we want to enable writenotify while 114 * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply 115 * recalculate vma->vm_page_prot whenever userfaultfd-wp changes. 116 */ 117 if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed) 118 vma_set_page_prot(vma); 119 } 120 121 static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, 122 int wake_flags, void *key) 123 { 124 struct userfaultfd_wake_range *range = key; 125 int ret; 126 struct userfaultfd_wait_queue *uwq; 127 unsigned long start, len; 128 129 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); 130 ret = 0; 131 /* len == 0 means wake all */ 132 start = range->start; 133 len = range->len; 134 if (len && (start > uwq->msg.arg.pagefault.address || 135 start + len <= uwq->msg.arg.pagefault.address)) 136 goto out; 137 WRITE_ONCE(uwq->waken, true); 138 /* 139 * The Program-Order guarantees provided by the scheduler 140 * ensure uwq->waken is visible before the task is woken. 141 */ 142 ret = wake_up_state(wq->private, mode); 143 if (ret) { 144 /* 145 * Wake only once, autoremove behavior. 146 * 147 * After the effect of list_del_init is visible to the other 148 * CPUs, the waitqueue may disappear from under us, see the 149 * !list_empty_careful() in handle_userfault(). 150 * 151 * try_to_wake_up() has an implicit smp_mb(), and the 152 * wq->private is read before calling the extern function 153 * "wake_up_state" (which in turns calls try_to_wake_up). 154 */ 155 list_del_init(&wq->entry); 156 } 157 out: 158 return ret; 159 } 160 161 /** 162 * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd 163 * context. 164 * @ctx: [in] Pointer to the userfaultfd context. 165 */ 166 static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) 167 { 168 refcount_inc(&ctx->refcount); 169 } 170 171 /** 172 * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd 173 * context. 174 * @ctx: [in] Pointer to userfaultfd context. 175 * 176 * The userfaultfd context reference must have been previously acquired either 177 * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget(). 178 */ 179 static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx) 180 { 181 if (refcount_dec_and_test(&ctx->refcount)) { 182 VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock)); 183 VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh)); 184 VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock)); 185 VM_BUG_ON(waitqueue_active(&ctx->fault_wqh)); 186 VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock)); 187 VM_BUG_ON(waitqueue_active(&ctx->event_wqh)); 188 VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock)); 189 VM_BUG_ON(waitqueue_active(&ctx->fd_wqh)); 190 mmdrop(ctx->mm); 191 kmem_cache_free(userfaultfd_ctx_cachep, ctx); 192 } 193 } 194 195 static inline void msg_init(struct uffd_msg *msg) 196 { 197 BUILD_BUG_ON(sizeof(struct uffd_msg) != 32); 198 /* 199 * Must use memset to zero out the paddings or kernel data is 200 * leaked to userland. 201 */ 202 memset(msg, 0, sizeof(struct uffd_msg)); 203 } 204 205 static inline struct uffd_msg userfault_msg(unsigned long address, 206 unsigned long real_address, 207 unsigned int flags, 208 unsigned long reason, 209 unsigned int features) 210 { 211 struct uffd_msg msg; 212 213 msg_init(&msg); 214 msg.event = UFFD_EVENT_PAGEFAULT; 215 216 msg.arg.pagefault.address = (features & UFFD_FEATURE_EXACT_ADDRESS) ? 217 real_address : address; 218 219 /* 220 * These flags indicate why the userfault occurred: 221 * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault. 222 * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault. 223 * - Neither of these flags being set indicates a MISSING fault. 224 * 225 * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write 226 * fault. Otherwise, it was a read fault. 227 */ 228 if (flags & FAULT_FLAG_WRITE) 229 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE; 230 if (reason & VM_UFFD_WP) 231 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP; 232 if (reason & VM_UFFD_MINOR) 233 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR; 234 if (features & UFFD_FEATURE_THREAD_ID) 235 msg.arg.pagefault.feat.ptid = task_pid_vnr(current); 236 return msg; 237 } 238 239 #ifdef CONFIG_HUGETLB_PAGE 240 /* 241 * Same functionality as userfaultfd_must_wait below with modifications for 242 * hugepmd ranges. 243 */ 244 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, 245 struct vm_fault *vmf, 246 unsigned long reason) 247 { 248 struct vm_area_struct *vma = vmf->vma; 249 pte_t *ptep, pte; 250 bool ret = true; 251 252 assert_fault_locked(vmf); 253 254 ptep = hugetlb_walk(vma, vmf->address, vma_mmu_pagesize(vma)); 255 if (!ptep) 256 goto out; 257 258 ret = false; 259 pte = huge_ptep_get(ptep); 260 261 /* 262 * Lockless access: we're in a wait_event so it's ok if it 263 * changes under us. PTE markers should be handled the same as none 264 * ptes here. 265 */ 266 if (huge_pte_none_mostly(pte)) 267 ret = true; 268 if (!huge_pte_write(pte) && (reason & VM_UFFD_WP)) 269 ret = true; 270 out: 271 return ret; 272 } 273 #else 274 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, 275 struct vm_fault *vmf, 276 unsigned long reason) 277 { 278 return false; /* should never get here */ 279 } 280 #endif /* CONFIG_HUGETLB_PAGE */ 281 282 /* 283 * Verify the pagetables are still not ok after having reigstered into 284 * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any 285 * userfault that has already been resolved, if userfaultfd_read and 286 * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different 287 * threads. 288 */ 289 static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, 290 struct vm_fault *vmf, 291 unsigned long reason) 292 { 293 struct mm_struct *mm = ctx->mm; 294 unsigned long address = vmf->address; 295 pgd_t *pgd; 296 p4d_t *p4d; 297 pud_t *pud; 298 pmd_t *pmd, _pmd; 299 pte_t *pte; 300 pte_t ptent; 301 bool ret = true; 302 303 assert_fault_locked(vmf); 304 305 pgd = pgd_offset(mm, address); 306 if (!pgd_present(*pgd)) 307 goto out; 308 p4d = p4d_offset(pgd, address); 309 if (!p4d_present(*p4d)) 310 goto out; 311 pud = pud_offset(p4d, address); 312 if (!pud_present(*pud)) 313 goto out; 314 pmd = pmd_offset(pud, address); 315 again: 316 _pmd = pmdp_get_lockless(pmd); 317 if (pmd_none(_pmd)) 318 goto out; 319 320 ret = false; 321 if (!pmd_present(_pmd) || pmd_devmap(_pmd)) 322 goto out; 323 324 if (pmd_trans_huge(_pmd)) { 325 if (!pmd_write(_pmd) && (reason & VM_UFFD_WP)) 326 ret = true; 327 goto out; 328 } 329 330 pte = pte_offset_map(pmd, address); 331 if (!pte) { 332 ret = true; 333 goto again; 334 } 335 /* 336 * Lockless access: we're in a wait_event so it's ok if it 337 * changes under us. PTE markers should be handled the same as none 338 * ptes here. 339 */ 340 ptent = ptep_get(pte); 341 if (pte_none_mostly(ptent)) 342 ret = true; 343 if (!pte_write(ptent) && (reason & VM_UFFD_WP)) 344 ret = true; 345 pte_unmap(pte); 346 347 out: 348 return ret; 349 } 350 351 static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags) 352 { 353 if (flags & FAULT_FLAG_INTERRUPTIBLE) 354 return TASK_INTERRUPTIBLE; 355 356 if (flags & FAULT_FLAG_KILLABLE) 357 return TASK_KILLABLE; 358 359 return TASK_UNINTERRUPTIBLE; 360 } 361 362 /* 363 * The locking rules involved in returning VM_FAULT_RETRY depending on 364 * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and 365 * FAULT_FLAG_KILLABLE are not straightforward. The "Caution" 366 * recommendation in __lock_page_or_retry is not an understatement. 367 * 368 * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released 369 * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is 370 * not set. 371 * 372 * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not 373 * set, VM_FAULT_RETRY can still be returned if and only if there are 374 * fatal_signal_pending()s, and the mmap_lock must be released before 375 * returning it. 376 */ 377 vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) 378 { 379 struct vm_area_struct *vma = vmf->vma; 380 struct mm_struct *mm = vma->vm_mm; 381 struct userfaultfd_ctx *ctx; 382 struct userfaultfd_wait_queue uwq; 383 vm_fault_t ret = VM_FAULT_SIGBUS; 384 bool must_wait; 385 unsigned int blocking_state; 386 387 /* 388 * We don't do userfault handling for the final child pid update. 389 * 390 * We also don't do userfault handling during 391 * coredumping. hugetlbfs has the special 392 * hugetlb_follow_page_mask() to skip missing pages in the 393 * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with 394 * the no_page_table() helper in follow_page_mask(), but the 395 * shmem_vm_ops->fault method is invoked even during 396 * coredumping and it ends up here. 397 */ 398 if (current->flags & (PF_EXITING|PF_DUMPCORE)) 399 goto out; 400 401 assert_fault_locked(vmf); 402 403 ctx = vma->vm_userfaultfd_ctx.ctx; 404 if (!ctx) 405 goto out; 406 407 BUG_ON(ctx->mm != mm); 408 409 /* Any unrecognized flag is a bug. */ 410 VM_BUG_ON(reason & ~__VM_UFFD_FLAGS); 411 /* 0 or > 1 flags set is a bug; we expect exactly 1. */ 412 VM_BUG_ON(!reason || (reason & (reason - 1))); 413 414 if (ctx->features & UFFD_FEATURE_SIGBUS) 415 goto out; 416 if (!(vmf->flags & FAULT_FLAG_USER) && (ctx->flags & UFFD_USER_MODE_ONLY)) 417 goto out; 418 419 /* 420 * If it's already released don't get it. This avoids to loop 421 * in __get_user_pages if userfaultfd_release waits on the 422 * caller of handle_userfault to release the mmap_lock. 423 */ 424 if (unlikely(READ_ONCE(ctx->released))) { 425 /* 426 * Don't return VM_FAULT_SIGBUS in this case, so a non 427 * cooperative manager can close the uffd after the 428 * last UFFDIO_COPY, without risking to trigger an 429 * involuntary SIGBUS if the process was starting the 430 * userfaultfd while the userfaultfd was still armed 431 * (but after the last UFFDIO_COPY). If the uffd 432 * wasn't already closed when the userfault reached 433 * this point, that would normally be solved by 434 * userfaultfd_must_wait returning 'false'. 435 * 436 * If we were to return VM_FAULT_SIGBUS here, the non 437 * cooperative manager would be instead forced to 438 * always call UFFDIO_UNREGISTER before it can safely 439 * close the uffd. 440 */ 441 ret = VM_FAULT_NOPAGE; 442 goto out; 443 } 444 445 /* 446 * Check that we can return VM_FAULT_RETRY. 447 * 448 * NOTE: it should become possible to return VM_FAULT_RETRY 449 * even if FAULT_FLAG_TRIED is set without leading to gup() 450 * -EBUSY failures, if the userfaultfd is to be extended for 451 * VM_UFFD_WP tracking and we intend to arm the userfault 452 * without first stopping userland access to the memory. For 453 * VM_UFFD_MISSING userfaults this is enough for now. 454 */ 455 if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) { 456 /* 457 * Validate the invariant that nowait must allow retry 458 * to be sure not to return SIGBUS erroneously on 459 * nowait invocations. 460 */ 461 BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT); 462 #ifdef CONFIG_DEBUG_VM 463 if (printk_ratelimit()) { 464 printk(KERN_WARNING 465 "FAULT_FLAG_ALLOW_RETRY missing %x\n", 466 vmf->flags); 467 dump_stack(); 468 } 469 #endif 470 goto out; 471 } 472 473 /* 474 * Handle nowait, not much to do other than tell it to retry 475 * and wait. 476 */ 477 ret = VM_FAULT_RETRY; 478 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) 479 goto out; 480 481 /* take the reference before dropping the mmap_lock */ 482 userfaultfd_ctx_get(ctx); 483 484 init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); 485 uwq.wq.private = current; 486 uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags, 487 reason, ctx->features); 488 uwq.ctx = ctx; 489 uwq.waken = false; 490 491 blocking_state = userfaultfd_get_blocking_state(vmf->flags); 492 493 /* 494 * Take the vma lock now, in order to safely call 495 * userfaultfd_huge_must_wait() later. Since acquiring the 496 * (sleepable) vma lock can modify the current task state, that 497 * must be before explicitly calling set_current_state(). 498 */ 499 if (is_vm_hugetlb_page(vma)) 500 hugetlb_vma_lock_read(vma); 501 502 spin_lock_irq(&ctx->fault_pending_wqh.lock); 503 /* 504 * After the __add_wait_queue the uwq is visible to userland 505 * through poll/read(). 506 */ 507 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq); 508 /* 509 * The smp_mb() after __set_current_state prevents the reads 510 * following the spin_unlock to happen before the list_add in 511 * __add_wait_queue. 512 */ 513 set_current_state(blocking_state); 514 spin_unlock_irq(&ctx->fault_pending_wqh.lock); 515 516 if (!is_vm_hugetlb_page(vma)) 517 must_wait = userfaultfd_must_wait(ctx, vmf, reason); 518 else 519 must_wait = userfaultfd_huge_must_wait(ctx, vmf, reason); 520 if (is_vm_hugetlb_page(vma)) 521 hugetlb_vma_unlock_read(vma); 522 release_fault_lock(vmf); 523 524 if (likely(must_wait && !READ_ONCE(ctx->released))) { 525 wake_up_poll(&ctx->fd_wqh, EPOLLIN); 526 schedule(); 527 } 528 529 __set_current_state(TASK_RUNNING); 530 531 /* 532 * Here we race with the list_del; list_add in 533 * userfaultfd_ctx_read(), however because we don't ever run 534 * list_del_init() to refile across the two lists, the prev 535 * and next pointers will never point to self. list_add also 536 * would never let any of the two pointers to point to 537 * self. So list_empty_careful won't risk to see both pointers 538 * pointing to self at any time during the list refile. The 539 * only case where list_del_init() is called is the full 540 * removal in the wake function and there we don't re-list_add 541 * and it's fine not to block on the spinlock. The uwq on this 542 * kernel stack can be released after the list_del_init. 543 */ 544 if (!list_empty_careful(&uwq.wq.entry)) { 545 spin_lock_irq(&ctx->fault_pending_wqh.lock); 546 /* 547 * No need of list_del_init(), the uwq on the stack 548 * will be freed shortly anyway. 549 */ 550 list_del(&uwq.wq.entry); 551 spin_unlock_irq(&ctx->fault_pending_wqh.lock); 552 } 553 554 /* 555 * ctx may go away after this if the userfault pseudo fd is 556 * already released. 557 */ 558 userfaultfd_ctx_put(ctx); 559 560 out: 561 return ret; 562 } 563 564 static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, 565 struct userfaultfd_wait_queue *ewq) 566 { 567 struct userfaultfd_ctx *release_new_ctx; 568 569 if (WARN_ON_ONCE(current->flags & PF_EXITING)) 570 goto out; 571 572 ewq->ctx = ctx; 573 init_waitqueue_entry(&ewq->wq, current); 574 release_new_ctx = NULL; 575 576 spin_lock_irq(&ctx->event_wqh.lock); 577 /* 578 * After the __add_wait_queue the uwq is visible to userland 579 * through poll/read(). 580 */ 581 __add_wait_queue(&ctx->event_wqh, &ewq->wq); 582 for (;;) { 583 set_current_state(TASK_KILLABLE); 584 if (ewq->msg.event == 0) 585 break; 586 if (READ_ONCE(ctx->released) || 587 fatal_signal_pending(current)) { 588 /* 589 * &ewq->wq may be queued in fork_event, but 590 * __remove_wait_queue ignores the head 591 * parameter. It would be a problem if it 592 * didn't. 593 */ 594 __remove_wait_queue(&ctx->event_wqh, &ewq->wq); 595 if (ewq->msg.event == UFFD_EVENT_FORK) { 596 struct userfaultfd_ctx *new; 597 598 new = (struct userfaultfd_ctx *) 599 (unsigned long) 600 ewq->msg.arg.reserved.reserved1; 601 release_new_ctx = new; 602 } 603 break; 604 } 605 606 spin_unlock_irq(&ctx->event_wqh.lock); 607 608 wake_up_poll(&ctx->fd_wqh, EPOLLIN); 609 schedule(); 610 611 spin_lock_irq(&ctx->event_wqh.lock); 612 } 613 __set_current_state(TASK_RUNNING); 614 spin_unlock_irq(&ctx->event_wqh.lock); 615 616 if (release_new_ctx) { 617 struct vm_area_struct *vma; 618 struct mm_struct *mm = release_new_ctx->mm; 619 VMA_ITERATOR(vmi, mm, 0); 620 621 /* the various vma->vm_userfaultfd_ctx still points to it */ 622 mmap_write_lock(mm); 623 for_each_vma(vmi, vma) { 624 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { 625 vma_start_write(vma); 626 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 627 userfaultfd_set_vm_flags(vma, 628 vma->vm_flags & ~__VM_UFFD_FLAGS); 629 } 630 } 631 mmap_write_unlock(mm); 632 633 userfaultfd_ctx_put(release_new_ctx); 634 } 635 636 /* 637 * ctx may go away after this if the userfault pseudo fd is 638 * already released. 639 */ 640 out: 641 atomic_dec(&ctx->mmap_changing); 642 VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0); 643 userfaultfd_ctx_put(ctx); 644 } 645 646 static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx, 647 struct userfaultfd_wait_queue *ewq) 648 { 649 ewq->msg.event = 0; 650 wake_up_locked(&ctx->event_wqh); 651 __remove_wait_queue(&ctx->event_wqh, &ewq->wq); 652 } 653 654 int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) 655 { 656 struct userfaultfd_ctx *ctx = NULL, *octx; 657 struct userfaultfd_fork_ctx *fctx; 658 659 octx = vma->vm_userfaultfd_ctx.ctx; 660 if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { 661 vma_start_write(vma); 662 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 663 userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS); 664 return 0; 665 } 666 667 list_for_each_entry(fctx, fcs, list) 668 if (fctx->orig == octx) { 669 ctx = fctx->new; 670 break; 671 } 672 673 if (!ctx) { 674 fctx = kmalloc(sizeof(*fctx), GFP_KERNEL); 675 if (!fctx) 676 return -ENOMEM; 677 678 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); 679 if (!ctx) { 680 kfree(fctx); 681 return -ENOMEM; 682 } 683 684 refcount_set(&ctx->refcount, 1); 685 ctx->flags = octx->flags; 686 ctx->features = octx->features; 687 ctx->released = false; 688 init_rwsem(&ctx->map_changing_lock); 689 atomic_set(&ctx->mmap_changing, 0); 690 ctx->mm = vma->vm_mm; 691 mmgrab(ctx->mm); 692 693 userfaultfd_ctx_get(octx); 694 down_write(&octx->map_changing_lock); 695 atomic_inc(&octx->mmap_changing); 696 up_write(&octx->map_changing_lock); 697 fctx->orig = octx; 698 fctx->new = ctx; 699 list_add_tail(&fctx->list, fcs); 700 } 701 702 vma->vm_userfaultfd_ctx.ctx = ctx; 703 return 0; 704 } 705 706 static void dup_fctx(struct userfaultfd_fork_ctx *fctx) 707 { 708 struct userfaultfd_ctx *ctx = fctx->orig; 709 struct userfaultfd_wait_queue ewq; 710 711 msg_init(&ewq.msg); 712 713 ewq.msg.event = UFFD_EVENT_FORK; 714 ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new; 715 716 userfaultfd_event_wait_completion(ctx, &ewq); 717 } 718 719 void dup_userfaultfd_complete(struct list_head *fcs) 720 { 721 struct userfaultfd_fork_ctx *fctx, *n; 722 723 list_for_each_entry_safe(fctx, n, fcs, list) { 724 dup_fctx(fctx); 725 list_del(&fctx->list); 726 kfree(fctx); 727 } 728 } 729 730 void mremap_userfaultfd_prep(struct vm_area_struct *vma, 731 struct vm_userfaultfd_ctx *vm_ctx) 732 { 733 struct userfaultfd_ctx *ctx; 734 735 ctx = vma->vm_userfaultfd_ctx.ctx; 736 737 if (!ctx) 738 return; 739 740 if (ctx->features & UFFD_FEATURE_EVENT_REMAP) { 741 vm_ctx->ctx = ctx; 742 userfaultfd_ctx_get(ctx); 743 down_write(&ctx->map_changing_lock); 744 atomic_inc(&ctx->mmap_changing); 745 up_write(&ctx->map_changing_lock); 746 } else { 747 /* Drop uffd context if remap feature not enabled */ 748 vma_start_write(vma); 749 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 750 userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS); 751 } 752 } 753 754 void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx, 755 unsigned long from, unsigned long to, 756 unsigned long len) 757 { 758 struct userfaultfd_ctx *ctx = vm_ctx->ctx; 759 struct userfaultfd_wait_queue ewq; 760 761 if (!ctx) 762 return; 763 764 if (to & ~PAGE_MASK) { 765 userfaultfd_ctx_put(ctx); 766 return; 767 } 768 769 msg_init(&ewq.msg); 770 771 ewq.msg.event = UFFD_EVENT_REMAP; 772 ewq.msg.arg.remap.from = from; 773 ewq.msg.arg.remap.to = to; 774 ewq.msg.arg.remap.len = len; 775 776 userfaultfd_event_wait_completion(ctx, &ewq); 777 } 778 779 bool userfaultfd_remove(struct vm_area_struct *vma, 780 unsigned long start, unsigned long end) 781 { 782 struct mm_struct *mm = vma->vm_mm; 783 struct userfaultfd_ctx *ctx; 784 struct userfaultfd_wait_queue ewq; 785 786 ctx = vma->vm_userfaultfd_ctx.ctx; 787 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE)) 788 return true; 789 790 userfaultfd_ctx_get(ctx); 791 down_write(&ctx->map_changing_lock); 792 atomic_inc(&ctx->mmap_changing); 793 up_write(&ctx->map_changing_lock); 794 mmap_read_unlock(mm); 795 796 msg_init(&ewq.msg); 797 798 ewq.msg.event = UFFD_EVENT_REMOVE; 799 ewq.msg.arg.remove.start = start; 800 ewq.msg.arg.remove.end = end; 801 802 userfaultfd_event_wait_completion(ctx, &ewq); 803 804 return false; 805 } 806 807 static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, 808 unsigned long start, unsigned long end) 809 { 810 struct userfaultfd_unmap_ctx *unmap_ctx; 811 812 list_for_each_entry(unmap_ctx, unmaps, list) 813 if (unmap_ctx->ctx == ctx && unmap_ctx->start == start && 814 unmap_ctx->end == end) 815 return true; 816 817 return false; 818 } 819 820 int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start, 821 unsigned long end, struct list_head *unmaps) 822 { 823 struct userfaultfd_unmap_ctx *unmap_ctx; 824 struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; 825 826 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) || 827 has_unmap_ctx(ctx, unmaps, start, end)) 828 return 0; 829 830 unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL); 831 if (!unmap_ctx) 832 return -ENOMEM; 833 834 userfaultfd_ctx_get(ctx); 835 down_write(&ctx->map_changing_lock); 836 atomic_inc(&ctx->mmap_changing); 837 up_write(&ctx->map_changing_lock); 838 unmap_ctx->ctx = ctx; 839 unmap_ctx->start = start; 840 unmap_ctx->end = end; 841 list_add_tail(&unmap_ctx->list, unmaps); 842 843 return 0; 844 } 845 846 void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf) 847 { 848 struct userfaultfd_unmap_ctx *ctx, *n; 849 struct userfaultfd_wait_queue ewq; 850 851 list_for_each_entry_safe(ctx, n, uf, list) { 852 msg_init(&ewq.msg); 853 854 ewq.msg.event = UFFD_EVENT_UNMAP; 855 ewq.msg.arg.remove.start = ctx->start; 856 ewq.msg.arg.remove.end = ctx->end; 857 858 userfaultfd_event_wait_completion(ctx->ctx, &ewq); 859 860 list_del(&ctx->list); 861 kfree(ctx); 862 } 863 } 864 865 static int userfaultfd_release(struct inode *inode, struct file *file) 866 { 867 struct userfaultfd_ctx *ctx = file->private_data; 868 struct mm_struct *mm = ctx->mm; 869 struct vm_area_struct *vma, *prev; 870 /* len == 0 means wake all */ 871 struct userfaultfd_wake_range range = { .len = 0, }; 872 unsigned long new_flags; 873 VMA_ITERATOR(vmi, mm, 0); 874 875 WRITE_ONCE(ctx->released, true); 876 877 if (!mmget_not_zero(mm)) 878 goto wakeup; 879 880 /* 881 * Flush page faults out of all CPUs. NOTE: all page faults 882 * must be retried without returning VM_FAULT_SIGBUS if 883 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx 884 * changes while handle_userfault released the mmap_lock. So 885 * it's critical that released is set to true (above), before 886 * taking the mmap_lock for writing. 887 */ 888 mmap_write_lock(mm); 889 prev = NULL; 890 for_each_vma(vmi, vma) { 891 cond_resched(); 892 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ 893 !!(vma->vm_flags & __VM_UFFD_FLAGS)); 894 if (vma->vm_userfaultfd_ctx.ctx != ctx) { 895 prev = vma; 896 continue; 897 } 898 /* Reset ptes for the whole vma range if wr-protected */ 899 if (userfaultfd_wp(vma)) 900 uffd_wp_range(vma, vma->vm_start, 901 vma->vm_end - vma->vm_start, false); 902 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; 903 vma = vma_modify_flags_uffd(&vmi, prev, vma, vma->vm_start, 904 vma->vm_end, new_flags, 905 NULL_VM_UFFD_CTX); 906 907 vma_start_write(vma); 908 userfaultfd_set_vm_flags(vma, new_flags); 909 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 910 911 prev = vma; 912 } 913 mmap_write_unlock(mm); 914 mmput(mm); 915 wakeup: 916 /* 917 * After no new page faults can wait on this fault_*wqh, flush 918 * the last page faults that may have been already waiting on 919 * the fault_*wqh. 920 */ 921 spin_lock_irq(&ctx->fault_pending_wqh.lock); 922 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); 923 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range); 924 spin_unlock_irq(&ctx->fault_pending_wqh.lock); 925 926 /* Flush pending events that may still wait on event_wqh */ 927 wake_up_all(&ctx->event_wqh); 928 929 wake_up_poll(&ctx->fd_wqh, EPOLLHUP); 930 userfaultfd_ctx_put(ctx); 931 return 0; 932 } 933 934 /* fault_pending_wqh.lock must be hold by the caller */ 935 static inline struct userfaultfd_wait_queue *find_userfault_in( 936 wait_queue_head_t *wqh) 937 { 938 wait_queue_entry_t *wq; 939 struct userfaultfd_wait_queue *uwq; 940 941 lockdep_assert_held(&wqh->lock); 942 943 uwq = NULL; 944 if (!waitqueue_active(wqh)) 945 goto out; 946 /* walk in reverse to provide FIFO behavior to read userfaults */ 947 wq = list_last_entry(&wqh->head, typeof(*wq), entry); 948 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); 949 out: 950 return uwq; 951 } 952 953 static inline struct userfaultfd_wait_queue *find_userfault( 954 struct userfaultfd_ctx *ctx) 955 { 956 return find_userfault_in(&ctx->fault_pending_wqh); 957 } 958 959 static inline struct userfaultfd_wait_queue *find_userfault_evt( 960 struct userfaultfd_ctx *ctx) 961 { 962 return find_userfault_in(&ctx->event_wqh); 963 } 964 965 static __poll_t userfaultfd_poll(struct file *file, poll_table *wait) 966 { 967 struct userfaultfd_ctx *ctx = file->private_data; 968 __poll_t ret; 969 970 poll_wait(file, &ctx->fd_wqh, wait); 971 972 if (!userfaultfd_is_initialized(ctx)) 973 return EPOLLERR; 974 975 /* 976 * poll() never guarantees that read won't block. 977 * userfaults can be waken before they're read(). 978 */ 979 if (unlikely(!(file->f_flags & O_NONBLOCK))) 980 return EPOLLERR; 981 /* 982 * lockless access to see if there are pending faults 983 * __pollwait last action is the add_wait_queue but 984 * the spin_unlock would allow the waitqueue_active to 985 * pass above the actual list_add inside 986 * add_wait_queue critical section. So use a full 987 * memory barrier to serialize the list_add write of 988 * add_wait_queue() with the waitqueue_active read 989 * below. 990 */ 991 ret = 0; 992 smp_mb(); 993 if (waitqueue_active(&ctx->fault_pending_wqh)) 994 ret = EPOLLIN; 995 else if (waitqueue_active(&ctx->event_wqh)) 996 ret = EPOLLIN; 997 998 return ret; 999 } 1000 1001 static const struct file_operations userfaultfd_fops; 1002 1003 static int resolve_userfault_fork(struct userfaultfd_ctx *new, 1004 struct inode *inode, 1005 struct uffd_msg *msg) 1006 { 1007 int fd; 1008 1009 fd = anon_inode_create_getfd("[userfaultfd]", &userfaultfd_fops, new, 1010 O_RDONLY | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode); 1011 if (fd < 0) 1012 return fd; 1013 1014 msg->arg.reserved.reserved1 = 0; 1015 msg->arg.fork.ufd = fd; 1016 return 0; 1017 } 1018 1019 static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, 1020 struct uffd_msg *msg, struct inode *inode) 1021 { 1022 ssize_t ret; 1023 DECLARE_WAITQUEUE(wait, current); 1024 struct userfaultfd_wait_queue *uwq; 1025 /* 1026 * Handling fork event requires sleeping operations, so 1027 * we drop the event_wqh lock, then do these ops, then 1028 * lock it back and wake up the waiter. While the lock is 1029 * dropped the ewq may go away so we keep track of it 1030 * carefully. 1031 */ 1032 LIST_HEAD(fork_event); 1033 struct userfaultfd_ctx *fork_nctx = NULL; 1034 1035 /* always take the fd_wqh lock before the fault_pending_wqh lock */ 1036 spin_lock_irq(&ctx->fd_wqh.lock); 1037 __add_wait_queue(&ctx->fd_wqh, &wait); 1038 for (;;) { 1039 set_current_state(TASK_INTERRUPTIBLE); 1040 spin_lock(&ctx->fault_pending_wqh.lock); 1041 uwq = find_userfault(ctx); 1042 if (uwq) { 1043 /* 1044 * Use a seqcount to repeat the lockless check 1045 * in wake_userfault() to avoid missing 1046 * wakeups because during the refile both 1047 * waitqueue could become empty if this is the 1048 * only userfault. 1049 */ 1050 write_seqcount_begin(&ctx->refile_seq); 1051 1052 /* 1053 * The fault_pending_wqh.lock prevents the uwq 1054 * to disappear from under us. 1055 * 1056 * Refile this userfault from 1057 * fault_pending_wqh to fault_wqh, it's not 1058 * pending anymore after we read it. 1059 * 1060 * Use list_del() by hand (as 1061 * userfaultfd_wake_function also uses 1062 * list_del_init() by hand) to be sure nobody 1063 * changes __remove_wait_queue() to use 1064 * list_del_init() in turn breaking the 1065 * !list_empty_careful() check in 1066 * handle_userfault(). The uwq->wq.head list 1067 * must never be empty at any time during the 1068 * refile, or the waitqueue could disappear 1069 * from under us. The "wait_queue_head_t" 1070 * parameter of __remove_wait_queue() is unused 1071 * anyway. 1072 */ 1073 list_del(&uwq->wq.entry); 1074 add_wait_queue(&ctx->fault_wqh, &uwq->wq); 1075 1076 write_seqcount_end(&ctx->refile_seq); 1077 1078 /* careful to always initialize msg if ret == 0 */ 1079 *msg = uwq->msg; 1080 spin_unlock(&ctx->fault_pending_wqh.lock); 1081 ret = 0; 1082 break; 1083 } 1084 spin_unlock(&ctx->fault_pending_wqh.lock); 1085 1086 spin_lock(&ctx->event_wqh.lock); 1087 uwq = find_userfault_evt(ctx); 1088 if (uwq) { 1089 *msg = uwq->msg; 1090 1091 if (uwq->msg.event == UFFD_EVENT_FORK) { 1092 fork_nctx = (struct userfaultfd_ctx *) 1093 (unsigned long) 1094 uwq->msg.arg.reserved.reserved1; 1095 list_move(&uwq->wq.entry, &fork_event); 1096 /* 1097 * fork_nctx can be freed as soon as 1098 * we drop the lock, unless we take a 1099 * reference on it. 1100 */ 1101 userfaultfd_ctx_get(fork_nctx); 1102 spin_unlock(&ctx->event_wqh.lock); 1103 ret = 0; 1104 break; 1105 } 1106 1107 userfaultfd_event_complete(ctx, uwq); 1108 spin_unlock(&ctx->event_wqh.lock); 1109 ret = 0; 1110 break; 1111 } 1112 spin_unlock(&ctx->event_wqh.lock); 1113 1114 if (signal_pending(current)) { 1115 ret = -ERESTARTSYS; 1116 break; 1117 } 1118 if (no_wait) { 1119 ret = -EAGAIN; 1120 break; 1121 } 1122 spin_unlock_irq(&ctx->fd_wqh.lock); 1123 schedule(); 1124 spin_lock_irq(&ctx->fd_wqh.lock); 1125 } 1126 __remove_wait_queue(&ctx->fd_wqh, &wait); 1127 __set_current_state(TASK_RUNNING); 1128 spin_unlock_irq(&ctx->fd_wqh.lock); 1129 1130 if (!ret && msg->event == UFFD_EVENT_FORK) { 1131 ret = resolve_userfault_fork(fork_nctx, inode, msg); 1132 spin_lock_irq(&ctx->event_wqh.lock); 1133 if (!list_empty(&fork_event)) { 1134 /* 1135 * The fork thread didn't abort, so we can 1136 * drop the temporary refcount. 1137 */ 1138 userfaultfd_ctx_put(fork_nctx); 1139 1140 uwq = list_first_entry(&fork_event, 1141 typeof(*uwq), 1142 wq.entry); 1143 /* 1144 * If fork_event list wasn't empty and in turn 1145 * the event wasn't already released by fork 1146 * (the event is allocated on fork kernel 1147 * stack), put the event back to its place in 1148 * the event_wq. fork_event head will be freed 1149 * as soon as we return so the event cannot 1150 * stay queued there no matter the current 1151 * "ret" value. 1152 */ 1153 list_del(&uwq->wq.entry); 1154 __add_wait_queue(&ctx->event_wqh, &uwq->wq); 1155 1156 /* 1157 * Leave the event in the waitqueue and report 1158 * error to userland if we failed to resolve 1159 * the userfault fork. 1160 */ 1161 if (likely(!ret)) 1162 userfaultfd_event_complete(ctx, uwq); 1163 } else { 1164 /* 1165 * Here the fork thread aborted and the 1166 * refcount from the fork thread on fork_nctx 1167 * has already been released. We still hold 1168 * the reference we took before releasing the 1169 * lock above. If resolve_userfault_fork 1170 * failed we've to drop it because the 1171 * fork_nctx has to be freed in such case. If 1172 * it succeeded we'll hold it because the new 1173 * uffd references it. 1174 */ 1175 if (ret) 1176 userfaultfd_ctx_put(fork_nctx); 1177 } 1178 spin_unlock_irq(&ctx->event_wqh.lock); 1179 } 1180 1181 return ret; 1182 } 1183 1184 static ssize_t userfaultfd_read(struct file *file, char __user *buf, 1185 size_t count, loff_t *ppos) 1186 { 1187 struct userfaultfd_ctx *ctx = file->private_data; 1188 ssize_t _ret, ret = 0; 1189 struct uffd_msg msg; 1190 int no_wait = file->f_flags & O_NONBLOCK; 1191 struct inode *inode = file_inode(file); 1192 1193 if (!userfaultfd_is_initialized(ctx)) 1194 return -EINVAL; 1195 1196 for (;;) { 1197 if (count < sizeof(msg)) 1198 return ret ? ret : -EINVAL; 1199 _ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode); 1200 if (_ret < 0) 1201 return ret ? ret : _ret; 1202 if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg))) 1203 return ret ? ret : -EFAULT; 1204 ret += sizeof(msg); 1205 buf += sizeof(msg); 1206 count -= sizeof(msg); 1207 /* 1208 * Allow to read more than one fault at time but only 1209 * block if waiting for the very first one. 1210 */ 1211 no_wait = O_NONBLOCK; 1212 } 1213 } 1214 1215 static void __wake_userfault(struct userfaultfd_ctx *ctx, 1216 struct userfaultfd_wake_range *range) 1217 { 1218 spin_lock_irq(&ctx->fault_pending_wqh.lock); 1219 /* wake all in the range and autoremove */ 1220 if (waitqueue_active(&ctx->fault_pending_wqh)) 1221 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 1222 range); 1223 if (waitqueue_active(&ctx->fault_wqh)) 1224 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range); 1225 spin_unlock_irq(&ctx->fault_pending_wqh.lock); 1226 } 1227 1228 static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, 1229 struct userfaultfd_wake_range *range) 1230 { 1231 unsigned seq; 1232 bool need_wakeup; 1233 1234 /* 1235 * To be sure waitqueue_active() is not reordered by the CPU 1236 * before the pagetable update, use an explicit SMP memory 1237 * barrier here. PT lock release or mmap_read_unlock(mm) still 1238 * have release semantics that can allow the 1239 * waitqueue_active() to be reordered before the pte update. 1240 */ 1241 smp_mb(); 1242 1243 /* 1244 * Use waitqueue_active because it's very frequent to 1245 * change the address space atomically even if there are no 1246 * userfaults yet. So we take the spinlock only when we're 1247 * sure we've userfaults to wake. 1248 */ 1249 do { 1250 seq = read_seqcount_begin(&ctx->refile_seq); 1251 need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) || 1252 waitqueue_active(&ctx->fault_wqh); 1253 cond_resched(); 1254 } while (read_seqcount_retry(&ctx->refile_seq, seq)); 1255 if (need_wakeup) 1256 __wake_userfault(ctx, range); 1257 } 1258 1259 static __always_inline int validate_unaligned_range( 1260 struct mm_struct *mm, __u64 start, __u64 len) 1261 { 1262 __u64 task_size = mm->task_size; 1263 1264 if (len & ~PAGE_MASK) 1265 return -EINVAL; 1266 if (!len) 1267 return -EINVAL; 1268 if (start < mmap_min_addr) 1269 return -EINVAL; 1270 if (start >= task_size) 1271 return -EINVAL; 1272 if (len > task_size - start) 1273 return -EINVAL; 1274 if (start + len <= start) 1275 return -EINVAL; 1276 return 0; 1277 } 1278 1279 static __always_inline int validate_range(struct mm_struct *mm, 1280 __u64 start, __u64 len) 1281 { 1282 if (start & ~PAGE_MASK) 1283 return -EINVAL; 1284 1285 return validate_unaligned_range(mm, start, len); 1286 } 1287 1288 static int userfaultfd_register(struct userfaultfd_ctx *ctx, 1289 unsigned long arg) 1290 { 1291 struct mm_struct *mm = ctx->mm; 1292 struct vm_area_struct *vma, *prev, *cur; 1293 int ret; 1294 struct uffdio_register uffdio_register; 1295 struct uffdio_register __user *user_uffdio_register; 1296 unsigned long vm_flags, new_flags; 1297 bool found; 1298 bool basic_ioctls; 1299 unsigned long start, end, vma_end; 1300 struct vma_iterator vmi; 1301 bool wp_async = userfaultfd_wp_async_ctx(ctx); 1302 1303 user_uffdio_register = (struct uffdio_register __user *) arg; 1304 1305 ret = -EFAULT; 1306 if (copy_from_user(&uffdio_register, user_uffdio_register, 1307 sizeof(uffdio_register)-sizeof(__u64))) 1308 goto out; 1309 1310 ret = -EINVAL; 1311 if (!uffdio_register.mode) 1312 goto out; 1313 if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES) 1314 goto out; 1315 vm_flags = 0; 1316 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING) 1317 vm_flags |= VM_UFFD_MISSING; 1318 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) { 1319 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP 1320 goto out; 1321 #endif 1322 vm_flags |= VM_UFFD_WP; 1323 } 1324 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) { 1325 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 1326 goto out; 1327 #endif 1328 vm_flags |= VM_UFFD_MINOR; 1329 } 1330 1331 ret = validate_range(mm, uffdio_register.range.start, 1332 uffdio_register.range.len); 1333 if (ret) 1334 goto out; 1335 1336 start = uffdio_register.range.start; 1337 end = start + uffdio_register.range.len; 1338 1339 ret = -ENOMEM; 1340 if (!mmget_not_zero(mm)) 1341 goto out; 1342 1343 ret = -EINVAL; 1344 mmap_write_lock(mm); 1345 vma_iter_init(&vmi, mm, start); 1346 vma = vma_find(&vmi, end); 1347 if (!vma) 1348 goto out_unlock; 1349 1350 /* 1351 * If the first vma contains huge pages, make sure start address 1352 * is aligned to huge page size. 1353 */ 1354 if (is_vm_hugetlb_page(vma)) { 1355 unsigned long vma_hpagesize = vma_kernel_pagesize(vma); 1356 1357 if (start & (vma_hpagesize - 1)) 1358 goto out_unlock; 1359 } 1360 1361 /* 1362 * Search for not compatible vmas. 1363 */ 1364 found = false; 1365 basic_ioctls = false; 1366 cur = vma; 1367 do { 1368 cond_resched(); 1369 1370 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ 1371 !!(cur->vm_flags & __VM_UFFD_FLAGS)); 1372 1373 /* check not compatible vmas */ 1374 ret = -EINVAL; 1375 if (!vma_can_userfault(cur, vm_flags, wp_async)) 1376 goto out_unlock; 1377 1378 /* 1379 * UFFDIO_COPY will fill file holes even without 1380 * PROT_WRITE. This check enforces that if this is a 1381 * MAP_SHARED, the process has write permission to the backing 1382 * file. If VM_MAYWRITE is set it also enforces that on a 1383 * MAP_SHARED vma: there is no F_WRITE_SEAL and no further 1384 * F_WRITE_SEAL can be taken until the vma is destroyed. 1385 */ 1386 ret = -EPERM; 1387 if (unlikely(!(cur->vm_flags & VM_MAYWRITE))) 1388 goto out_unlock; 1389 1390 /* 1391 * If this vma contains ending address, and huge pages 1392 * check alignment. 1393 */ 1394 if (is_vm_hugetlb_page(cur) && end <= cur->vm_end && 1395 end > cur->vm_start) { 1396 unsigned long vma_hpagesize = vma_kernel_pagesize(cur); 1397 1398 ret = -EINVAL; 1399 1400 if (end & (vma_hpagesize - 1)) 1401 goto out_unlock; 1402 } 1403 if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE)) 1404 goto out_unlock; 1405 1406 /* 1407 * Check that this vma isn't already owned by a 1408 * different userfaultfd. We can't allow more than one 1409 * userfaultfd to own a single vma simultaneously or we 1410 * wouldn't know which one to deliver the userfaults to. 1411 */ 1412 ret = -EBUSY; 1413 if (cur->vm_userfaultfd_ctx.ctx && 1414 cur->vm_userfaultfd_ctx.ctx != ctx) 1415 goto out_unlock; 1416 1417 /* 1418 * Note vmas containing huge pages 1419 */ 1420 if (is_vm_hugetlb_page(cur)) 1421 basic_ioctls = true; 1422 1423 found = true; 1424 } for_each_vma_range(vmi, cur, end); 1425 BUG_ON(!found); 1426 1427 vma_iter_set(&vmi, start); 1428 prev = vma_prev(&vmi); 1429 if (vma->vm_start < start) 1430 prev = vma; 1431 1432 ret = 0; 1433 for_each_vma_range(vmi, vma, end) { 1434 cond_resched(); 1435 1436 BUG_ON(!vma_can_userfault(vma, vm_flags, wp_async)); 1437 BUG_ON(vma->vm_userfaultfd_ctx.ctx && 1438 vma->vm_userfaultfd_ctx.ctx != ctx); 1439 WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); 1440 1441 /* 1442 * Nothing to do: this vma is already registered into this 1443 * userfaultfd and with the right tracking mode too. 1444 */ 1445 if (vma->vm_userfaultfd_ctx.ctx == ctx && 1446 (vma->vm_flags & vm_flags) == vm_flags) 1447 goto skip; 1448 1449 if (vma->vm_start > start) 1450 start = vma->vm_start; 1451 vma_end = min(end, vma->vm_end); 1452 1453 new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags; 1454 vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end, 1455 new_flags, 1456 (struct vm_userfaultfd_ctx){ctx}); 1457 if (IS_ERR(vma)) { 1458 ret = PTR_ERR(vma); 1459 break; 1460 } 1461 1462 /* 1463 * In the vma_merge() successful mprotect-like case 8: 1464 * the next vma was merged into the current one and 1465 * the current one has not been updated yet. 1466 */ 1467 vma_start_write(vma); 1468 userfaultfd_set_vm_flags(vma, new_flags); 1469 vma->vm_userfaultfd_ctx.ctx = ctx; 1470 1471 if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma)) 1472 hugetlb_unshare_all_pmds(vma); 1473 1474 skip: 1475 prev = vma; 1476 start = vma->vm_end; 1477 } 1478 1479 out_unlock: 1480 mmap_write_unlock(mm); 1481 mmput(mm); 1482 if (!ret) { 1483 __u64 ioctls_out; 1484 1485 ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC : 1486 UFFD_API_RANGE_IOCTLS; 1487 1488 /* 1489 * Declare the WP ioctl only if the WP mode is 1490 * specified and all checks passed with the range 1491 */ 1492 if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP)) 1493 ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT); 1494 1495 /* CONTINUE ioctl is only supported for MINOR ranges. */ 1496 if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR)) 1497 ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE); 1498 1499 /* 1500 * Now that we scanned all vmas we can already tell 1501 * userland which ioctls methods are guaranteed to 1502 * succeed on this range. 1503 */ 1504 if (put_user(ioctls_out, &user_uffdio_register->ioctls)) 1505 ret = -EFAULT; 1506 } 1507 out: 1508 return ret; 1509 } 1510 1511 static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, 1512 unsigned long arg) 1513 { 1514 struct mm_struct *mm = ctx->mm; 1515 struct vm_area_struct *vma, *prev, *cur; 1516 int ret; 1517 struct uffdio_range uffdio_unregister; 1518 unsigned long new_flags; 1519 bool found; 1520 unsigned long start, end, vma_end; 1521 const void __user *buf = (void __user *)arg; 1522 struct vma_iterator vmi; 1523 bool wp_async = userfaultfd_wp_async_ctx(ctx); 1524 1525 ret = -EFAULT; 1526 if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister))) 1527 goto out; 1528 1529 ret = validate_range(mm, uffdio_unregister.start, 1530 uffdio_unregister.len); 1531 if (ret) 1532 goto out; 1533 1534 start = uffdio_unregister.start; 1535 end = start + uffdio_unregister.len; 1536 1537 ret = -ENOMEM; 1538 if (!mmget_not_zero(mm)) 1539 goto out; 1540 1541 mmap_write_lock(mm); 1542 ret = -EINVAL; 1543 vma_iter_init(&vmi, mm, start); 1544 vma = vma_find(&vmi, end); 1545 if (!vma) 1546 goto out_unlock; 1547 1548 /* 1549 * If the first vma contains huge pages, make sure start address 1550 * is aligned to huge page size. 1551 */ 1552 if (is_vm_hugetlb_page(vma)) { 1553 unsigned long vma_hpagesize = vma_kernel_pagesize(vma); 1554 1555 if (start & (vma_hpagesize - 1)) 1556 goto out_unlock; 1557 } 1558 1559 /* 1560 * Search for not compatible vmas. 1561 */ 1562 found = false; 1563 cur = vma; 1564 do { 1565 cond_resched(); 1566 1567 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ 1568 !!(cur->vm_flags & __VM_UFFD_FLAGS)); 1569 1570 /* 1571 * Check not compatible vmas, not strictly required 1572 * here as not compatible vmas cannot have an 1573 * userfaultfd_ctx registered on them, but this 1574 * provides for more strict behavior to notice 1575 * unregistration errors. 1576 */ 1577 if (!vma_can_userfault(cur, cur->vm_flags, wp_async)) 1578 goto out_unlock; 1579 1580 found = true; 1581 } for_each_vma_range(vmi, cur, end); 1582 BUG_ON(!found); 1583 1584 vma_iter_set(&vmi, start); 1585 prev = vma_prev(&vmi); 1586 if (vma->vm_start < start) 1587 prev = vma; 1588 1589 ret = 0; 1590 for_each_vma_range(vmi, vma, end) { 1591 cond_resched(); 1592 1593 BUG_ON(!vma_can_userfault(vma, vma->vm_flags, wp_async)); 1594 1595 /* 1596 * Nothing to do: this vma is already registered into this 1597 * userfaultfd and with the right tracking mode too. 1598 */ 1599 if (!vma->vm_userfaultfd_ctx.ctx) 1600 goto skip; 1601 1602 WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); 1603 1604 if (vma->vm_start > start) 1605 start = vma->vm_start; 1606 vma_end = min(end, vma->vm_end); 1607 1608 if (userfaultfd_missing(vma)) { 1609 /* 1610 * Wake any concurrent pending userfault while 1611 * we unregister, so they will not hang 1612 * permanently and it avoids userland to call 1613 * UFFDIO_WAKE explicitly. 1614 */ 1615 struct userfaultfd_wake_range range; 1616 range.start = start; 1617 range.len = vma_end - start; 1618 wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range); 1619 } 1620 1621 /* Reset ptes for the whole vma range if wr-protected */ 1622 if (userfaultfd_wp(vma)) 1623 uffd_wp_range(vma, start, vma_end - start, false); 1624 1625 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; 1626 vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end, 1627 new_flags, NULL_VM_UFFD_CTX); 1628 if (IS_ERR(vma)) { 1629 ret = PTR_ERR(vma); 1630 break; 1631 } 1632 1633 /* 1634 * In the vma_merge() successful mprotect-like case 8: 1635 * the next vma was merged into the current one and 1636 * the current one has not been updated yet. 1637 */ 1638 vma_start_write(vma); 1639 userfaultfd_set_vm_flags(vma, new_flags); 1640 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 1641 1642 skip: 1643 prev = vma; 1644 start = vma->vm_end; 1645 } 1646 1647 out_unlock: 1648 mmap_write_unlock(mm); 1649 mmput(mm); 1650 out: 1651 return ret; 1652 } 1653 1654 /* 1655 * userfaultfd_wake may be used in combination with the 1656 * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches. 1657 */ 1658 static int userfaultfd_wake(struct userfaultfd_ctx *ctx, 1659 unsigned long arg) 1660 { 1661 int ret; 1662 struct uffdio_range uffdio_wake; 1663 struct userfaultfd_wake_range range; 1664 const void __user *buf = (void __user *)arg; 1665 1666 ret = -EFAULT; 1667 if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake))) 1668 goto out; 1669 1670 ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len); 1671 if (ret) 1672 goto out; 1673 1674 range.start = uffdio_wake.start; 1675 range.len = uffdio_wake.len; 1676 1677 /* 1678 * len == 0 means wake all and we don't want to wake all here, 1679 * so check it again to be sure. 1680 */ 1681 VM_BUG_ON(!range.len); 1682 1683 wake_userfault(ctx, &range); 1684 ret = 0; 1685 1686 out: 1687 return ret; 1688 } 1689 1690 static int userfaultfd_copy(struct userfaultfd_ctx *ctx, 1691 unsigned long arg) 1692 { 1693 __s64 ret; 1694 struct uffdio_copy uffdio_copy; 1695 struct uffdio_copy __user *user_uffdio_copy; 1696 struct userfaultfd_wake_range range; 1697 uffd_flags_t flags = 0; 1698 1699 user_uffdio_copy = (struct uffdio_copy __user *) arg; 1700 1701 ret = -EAGAIN; 1702 if (atomic_read(&ctx->mmap_changing)) 1703 goto out; 1704 1705 ret = -EFAULT; 1706 if (copy_from_user(&uffdio_copy, user_uffdio_copy, 1707 /* don't copy "copy" last field */ 1708 sizeof(uffdio_copy)-sizeof(__s64))) 1709 goto out; 1710 1711 ret = validate_unaligned_range(ctx->mm, uffdio_copy.src, 1712 uffdio_copy.len); 1713 if (ret) 1714 goto out; 1715 ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len); 1716 if (ret) 1717 goto out; 1718 1719 ret = -EINVAL; 1720 if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP)) 1721 goto out; 1722 if (uffdio_copy.mode & UFFDIO_COPY_MODE_WP) 1723 flags |= MFILL_ATOMIC_WP; 1724 if (mmget_not_zero(ctx->mm)) { 1725 ret = mfill_atomic_copy(ctx, uffdio_copy.dst, uffdio_copy.src, 1726 uffdio_copy.len, flags); 1727 mmput(ctx->mm); 1728 } else { 1729 return -ESRCH; 1730 } 1731 if (unlikely(put_user(ret, &user_uffdio_copy->copy))) 1732 return -EFAULT; 1733 if (ret < 0) 1734 goto out; 1735 BUG_ON(!ret); 1736 /* len == 0 would wake all */ 1737 range.len = ret; 1738 if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) { 1739 range.start = uffdio_copy.dst; 1740 wake_userfault(ctx, &range); 1741 } 1742 ret = range.len == uffdio_copy.len ? 0 : -EAGAIN; 1743 out: 1744 return ret; 1745 } 1746 1747 static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, 1748 unsigned long arg) 1749 { 1750 __s64 ret; 1751 struct uffdio_zeropage uffdio_zeropage; 1752 struct uffdio_zeropage __user *user_uffdio_zeropage; 1753 struct userfaultfd_wake_range range; 1754 1755 user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg; 1756 1757 ret = -EAGAIN; 1758 if (atomic_read(&ctx->mmap_changing)) 1759 goto out; 1760 1761 ret = -EFAULT; 1762 if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage, 1763 /* don't copy "zeropage" last field */ 1764 sizeof(uffdio_zeropage)-sizeof(__s64))) 1765 goto out; 1766 1767 ret = validate_range(ctx->mm, uffdio_zeropage.range.start, 1768 uffdio_zeropage.range.len); 1769 if (ret) 1770 goto out; 1771 ret = -EINVAL; 1772 if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE) 1773 goto out; 1774 1775 if (mmget_not_zero(ctx->mm)) { 1776 ret = mfill_atomic_zeropage(ctx, uffdio_zeropage.range.start, 1777 uffdio_zeropage.range.len); 1778 mmput(ctx->mm); 1779 } else { 1780 return -ESRCH; 1781 } 1782 if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) 1783 return -EFAULT; 1784 if (ret < 0) 1785 goto out; 1786 /* len == 0 would wake all */ 1787 BUG_ON(!ret); 1788 range.len = ret; 1789 if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) { 1790 range.start = uffdio_zeropage.range.start; 1791 wake_userfault(ctx, &range); 1792 } 1793 ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN; 1794 out: 1795 return ret; 1796 } 1797 1798 static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx, 1799 unsigned long arg) 1800 { 1801 int ret; 1802 struct uffdio_writeprotect uffdio_wp; 1803 struct uffdio_writeprotect __user *user_uffdio_wp; 1804 struct userfaultfd_wake_range range; 1805 bool mode_wp, mode_dontwake; 1806 1807 if (atomic_read(&ctx->mmap_changing)) 1808 return -EAGAIN; 1809 1810 user_uffdio_wp = (struct uffdio_writeprotect __user *) arg; 1811 1812 if (copy_from_user(&uffdio_wp, user_uffdio_wp, 1813 sizeof(struct uffdio_writeprotect))) 1814 return -EFAULT; 1815 1816 ret = validate_range(ctx->mm, uffdio_wp.range.start, 1817 uffdio_wp.range.len); 1818 if (ret) 1819 return ret; 1820 1821 if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE | 1822 UFFDIO_WRITEPROTECT_MODE_WP)) 1823 return -EINVAL; 1824 1825 mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP; 1826 mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE; 1827 1828 if (mode_wp && mode_dontwake) 1829 return -EINVAL; 1830 1831 if (mmget_not_zero(ctx->mm)) { 1832 ret = mwriteprotect_range(ctx, uffdio_wp.range.start, 1833 uffdio_wp.range.len, mode_wp); 1834 mmput(ctx->mm); 1835 } else { 1836 return -ESRCH; 1837 } 1838 1839 if (ret) 1840 return ret; 1841 1842 if (!mode_wp && !mode_dontwake) { 1843 range.start = uffdio_wp.range.start; 1844 range.len = uffdio_wp.range.len; 1845 wake_userfault(ctx, &range); 1846 } 1847 return ret; 1848 } 1849 1850 static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg) 1851 { 1852 __s64 ret; 1853 struct uffdio_continue uffdio_continue; 1854 struct uffdio_continue __user *user_uffdio_continue; 1855 struct userfaultfd_wake_range range; 1856 uffd_flags_t flags = 0; 1857 1858 user_uffdio_continue = (struct uffdio_continue __user *)arg; 1859 1860 ret = -EAGAIN; 1861 if (atomic_read(&ctx->mmap_changing)) 1862 goto out; 1863 1864 ret = -EFAULT; 1865 if (copy_from_user(&uffdio_continue, user_uffdio_continue, 1866 /* don't copy the output fields */ 1867 sizeof(uffdio_continue) - (sizeof(__s64)))) 1868 goto out; 1869 1870 ret = validate_range(ctx->mm, uffdio_continue.range.start, 1871 uffdio_continue.range.len); 1872 if (ret) 1873 goto out; 1874 1875 ret = -EINVAL; 1876 if (uffdio_continue.mode & ~(UFFDIO_CONTINUE_MODE_DONTWAKE | 1877 UFFDIO_CONTINUE_MODE_WP)) 1878 goto out; 1879 if (uffdio_continue.mode & UFFDIO_CONTINUE_MODE_WP) 1880 flags |= MFILL_ATOMIC_WP; 1881 1882 if (mmget_not_zero(ctx->mm)) { 1883 ret = mfill_atomic_continue(ctx, uffdio_continue.range.start, 1884 uffdio_continue.range.len, flags); 1885 mmput(ctx->mm); 1886 } else { 1887 return -ESRCH; 1888 } 1889 1890 if (unlikely(put_user(ret, &user_uffdio_continue->mapped))) 1891 return -EFAULT; 1892 if (ret < 0) 1893 goto out; 1894 1895 /* len == 0 would wake all */ 1896 BUG_ON(!ret); 1897 range.len = ret; 1898 if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) { 1899 range.start = uffdio_continue.range.start; 1900 wake_userfault(ctx, &range); 1901 } 1902 ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN; 1903 1904 out: 1905 return ret; 1906 } 1907 1908 static inline int userfaultfd_poison(struct userfaultfd_ctx *ctx, unsigned long arg) 1909 { 1910 __s64 ret; 1911 struct uffdio_poison uffdio_poison; 1912 struct uffdio_poison __user *user_uffdio_poison; 1913 struct userfaultfd_wake_range range; 1914 1915 user_uffdio_poison = (struct uffdio_poison __user *)arg; 1916 1917 ret = -EAGAIN; 1918 if (atomic_read(&ctx->mmap_changing)) 1919 goto out; 1920 1921 ret = -EFAULT; 1922 if (copy_from_user(&uffdio_poison, user_uffdio_poison, 1923 /* don't copy the output fields */ 1924 sizeof(uffdio_poison) - (sizeof(__s64)))) 1925 goto out; 1926 1927 ret = validate_range(ctx->mm, uffdio_poison.range.start, 1928 uffdio_poison.range.len); 1929 if (ret) 1930 goto out; 1931 1932 ret = -EINVAL; 1933 if (uffdio_poison.mode & ~UFFDIO_POISON_MODE_DONTWAKE) 1934 goto out; 1935 1936 if (mmget_not_zero(ctx->mm)) { 1937 ret = mfill_atomic_poison(ctx, uffdio_poison.range.start, 1938 uffdio_poison.range.len, 0); 1939 mmput(ctx->mm); 1940 } else { 1941 return -ESRCH; 1942 } 1943 1944 if (unlikely(put_user(ret, &user_uffdio_poison->updated))) 1945 return -EFAULT; 1946 if (ret < 0) 1947 goto out; 1948 1949 /* len == 0 would wake all */ 1950 BUG_ON(!ret); 1951 range.len = ret; 1952 if (!(uffdio_poison.mode & UFFDIO_POISON_MODE_DONTWAKE)) { 1953 range.start = uffdio_poison.range.start; 1954 wake_userfault(ctx, &range); 1955 } 1956 ret = range.len == uffdio_poison.range.len ? 0 : -EAGAIN; 1957 1958 out: 1959 return ret; 1960 } 1961 1962 bool userfaultfd_wp_async(struct vm_area_struct *vma) 1963 { 1964 return userfaultfd_wp_async_ctx(vma->vm_userfaultfd_ctx.ctx); 1965 } 1966 1967 static inline unsigned int uffd_ctx_features(__u64 user_features) 1968 { 1969 /* 1970 * For the current set of features the bits just coincide. Set 1971 * UFFD_FEATURE_INITIALIZED to mark the features as enabled. 1972 */ 1973 return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED; 1974 } 1975 1976 static int userfaultfd_move(struct userfaultfd_ctx *ctx, 1977 unsigned long arg) 1978 { 1979 __s64 ret; 1980 struct uffdio_move uffdio_move; 1981 struct uffdio_move __user *user_uffdio_move; 1982 struct userfaultfd_wake_range range; 1983 struct mm_struct *mm = ctx->mm; 1984 1985 user_uffdio_move = (struct uffdio_move __user *) arg; 1986 1987 if (atomic_read(&ctx->mmap_changing)) 1988 return -EAGAIN; 1989 1990 if (copy_from_user(&uffdio_move, user_uffdio_move, 1991 /* don't copy "move" last field */ 1992 sizeof(uffdio_move)-sizeof(__s64))) 1993 return -EFAULT; 1994 1995 /* Do not allow cross-mm moves. */ 1996 if (mm != current->mm) 1997 return -EINVAL; 1998 1999 ret = validate_range(mm, uffdio_move.dst, uffdio_move.len); 2000 if (ret) 2001 return ret; 2002 2003 ret = validate_range(mm, uffdio_move.src, uffdio_move.len); 2004 if (ret) 2005 return ret; 2006 2007 if (uffdio_move.mode & ~(UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES| 2008 UFFDIO_MOVE_MODE_DONTWAKE)) 2009 return -EINVAL; 2010 2011 if (mmget_not_zero(mm)) { 2012 ret = move_pages(ctx, uffdio_move.dst, uffdio_move.src, 2013 uffdio_move.len, uffdio_move.mode); 2014 mmput(mm); 2015 } else { 2016 return -ESRCH; 2017 } 2018 2019 if (unlikely(put_user(ret, &user_uffdio_move->move))) 2020 return -EFAULT; 2021 if (ret < 0) 2022 goto out; 2023 2024 /* len == 0 would wake all */ 2025 VM_WARN_ON(!ret); 2026 range.len = ret; 2027 if (!(uffdio_move.mode & UFFDIO_MOVE_MODE_DONTWAKE)) { 2028 range.start = uffdio_move.dst; 2029 wake_userfault(ctx, &range); 2030 } 2031 ret = range.len == uffdio_move.len ? 0 : -EAGAIN; 2032 2033 out: 2034 return ret; 2035 } 2036 2037 /* 2038 * userland asks for a certain API version and we return which bits 2039 * and ioctl commands are implemented in this kernel for such API 2040 * version or -EINVAL if unknown. 2041 */ 2042 static int userfaultfd_api(struct userfaultfd_ctx *ctx, 2043 unsigned long arg) 2044 { 2045 struct uffdio_api uffdio_api; 2046 void __user *buf = (void __user *)arg; 2047 unsigned int ctx_features; 2048 int ret; 2049 __u64 features; 2050 2051 ret = -EFAULT; 2052 if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api))) 2053 goto out; 2054 features = uffdio_api.features; 2055 ret = -EINVAL; 2056 if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) 2057 goto err_out; 2058 ret = -EPERM; 2059 if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE)) 2060 goto err_out; 2061 2062 /* WP_ASYNC relies on WP_UNPOPULATED, choose it unconditionally */ 2063 if (features & UFFD_FEATURE_WP_ASYNC) 2064 features |= UFFD_FEATURE_WP_UNPOPULATED; 2065 2066 /* report all available features and ioctls to userland */ 2067 uffdio_api.features = UFFD_API_FEATURES; 2068 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR 2069 uffdio_api.features &= 2070 ~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM); 2071 #endif 2072 #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP 2073 uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP; 2074 #endif 2075 #ifndef CONFIG_PTE_MARKER_UFFD_WP 2076 uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM; 2077 uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED; 2078 uffdio_api.features &= ~UFFD_FEATURE_WP_ASYNC; 2079 #endif 2080 uffdio_api.ioctls = UFFD_API_IOCTLS; 2081 ret = -EFAULT; 2082 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) 2083 goto out; 2084 2085 /* only enable the requested features for this uffd context */ 2086 ctx_features = uffd_ctx_features(features); 2087 ret = -EINVAL; 2088 if (cmpxchg(&ctx->features, 0, ctx_features) != 0) 2089 goto err_out; 2090 2091 ret = 0; 2092 out: 2093 return ret; 2094 err_out: 2095 memset(&uffdio_api, 0, sizeof(uffdio_api)); 2096 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) 2097 ret = -EFAULT; 2098 goto out; 2099 } 2100 2101 static long userfaultfd_ioctl(struct file *file, unsigned cmd, 2102 unsigned long arg) 2103 { 2104 int ret = -EINVAL; 2105 struct userfaultfd_ctx *ctx = file->private_data; 2106 2107 if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx)) 2108 return -EINVAL; 2109 2110 switch(cmd) { 2111 case UFFDIO_API: 2112 ret = userfaultfd_api(ctx, arg); 2113 break; 2114 case UFFDIO_REGISTER: 2115 ret = userfaultfd_register(ctx, arg); 2116 break; 2117 case UFFDIO_UNREGISTER: 2118 ret = userfaultfd_unregister(ctx, arg); 2119 break; 2120 case UFFDIO_WAKE: 2121 ret = userfaultfd_wake(ctx, arg); 2122 break; 2123 case UFFDIO_COPY: 2124 ret = userfaultfd_copy(ctx, arg); 2125 break; 2126 case UFFDIO_ZEROPAGE: 2127 ret = userfaultfd_zeropage(ctx, arg); 2128 break; 2129 case UFFDIO_MOVE: 2130 ret = userfaultfd_move(ctx, arg); 2131 break; 2132 case UFFDIO_WRITEPROTECT: 2133 ret = userfaultfd_writeprotect(ctx, arg); 2134 break; 2135 case UFFDIO_CONTINUE: 2136 ret = userfaultfd_continue(ctx, arg); 2137 break; 2138 case UFFDIO_POISON: 2139 ret = userfaultfd_poison(ctx, arg); 2140 break; 2141 } 2142 return ret; 2143 } 2144 2145 #ifdef CONFIG_PROC_FS 2146 static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) 2147 { 2148 struct userfaultfd_ctx *ctx = f->private_data; 2149 wait_queue_entry_t *wq; 2150 unsigned long pending = 0, total = 0; 2151 2152 spin_lock_irq(&ctx->fault_pending_wqh.lock); 2153 list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) { 2154 pending++; 2155 total++; 2156 } 2157 list_for_each_entry(wq, &ctx->fault_wqh.head, entry) { 2158 total++; 2159 } 2160 spin_unlock_irq(&ctx->fault_pending_wqh.lock); 2161 2162 /* 2163 * If more protocols will be added, there will be all shown 2164 * separated by a space. Like this: 2165 * protocols: aa:... bb:... 2166 */ 2167 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n", 2168 pending, total, UFFD_API, ctx->features, 2169 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS); 2170 } 2171 #endif 2172 2173 static const struct file_operations userfaultfd_fops = { 2174 #ifdef CONFIG_PROC_FS 2175 .show_fdinfo = userfaultfd_show_fdinfo, 2176 #endif 2177 .release = userfaultfd_release, 2178 .poll = userfaultfd_poll, 2179 .read = userfaultfd_read, 2180 .unlocked_ioctl = userfaultfd_ioctl, 2181 .compat_ioctl = compat_ptr_ioctl, 2182 .llseek = noop_llseek, 2183 }; 2184 2185 static void init_once_userfaultfd_ctx(void *mem) 2186 { 2187 struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem; 2188 2189 init_waitqueue_head(&ctx->fault_pending_wqh); 2190 init_waitqueue_head(&ctx->fault_wqh); 2191 init_waitqueue_head(&ctx->event_wqh); 2192 init_waitqueue_head(&ctx->fd_wqh); 2193 seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock); 2194 } 2195 2196 static int new_userfaultfd(int flags) 2197 { 2198 struct userfaultfd_ctx *ctx; 2199 int fd; 2200 2201 BUG_ON(!current->mm); 2202 2203 /* Check the UFFD_* constants for consistency. */ 2204 BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS); 2205 BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC); 2206 BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK); 2207 2208 if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY)) 2209 return -EINVAL; 2210 2211 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); 2212 if (!ctx) 2213 return -ENOMEM; 2214 2215 refcount_set(&ctx->refcount, 1); 2216 ctx->flags = flags; 2217 ctx->features = 0; 2218 ctx->released = false; 2219 init_rwsem(&ctx->map_changing_lock); 2220 atomic_set(&ctx->mmap_changing, 0); 2221 ctx->mm = current->mm; 2222 /* prevent the mm struct to be freed */ 2223 mmgrab(ctx->mm); 2224 2225 /* Create a new inode so that the LSM can block the creation. */ 2226 fd = anon_inode_create_getfd("[userfaultfd]", &userfaultfd_fops, ctx, 2227 O_RDONLY | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL); 2228 if (fd < 0) { 2229 mmdrop(ctx->mm); 2230 kmem_cache_free(userfaultfd_ctx_cachep, ctx); 2231 } 2232 return fd; 2233 } 2234 2235 static inline bool userfaultfd_syscall_allowed(int flags) 2236 { 2237 /* Userspace-only page faults are always allowed */ 2238 if (flags & UFFD_USER_MODE_ONLY) 2239 return true; 2240 2241 /* 2242 * The user is requesting a userfaultfd which can handle kernel faults. 2243 * Privileged users are always allowed to do this. 2244 */ 2245 if (capable(CAP_SYS_PTRACE)) 2246 return true; 2247 2248 /* Otherwise, access to kernel fault handling is sysctl controlled. */ 2249 return sysctl_unprivileged_userfaultfd; 2250 } 2251 2252 SYSCALL_DEFINE1(userfaultfd, int, flags) 2253 { 2254 if (!userfaultfd_syscall_allowed(flags)) 2255 return -EPERM; 2256 2257 return new_userfaultfd(flags); 2258 } 2259 2260 static long userfaultfd_dev_ioctl(struct file *file, unsigned int cmd, unsigned long flags) 2261 { 2262 if (cmd != USERFAULTFD_IOC_NEW) 2263 return -EINVAL; 2264 2265 return new_userfaultfd(flags); 2266 } 2267 2268 static const struct file_operations userfaultfd_dev_fops = { 2269 .unlocked_ioctl = userfaultfd_dev_ioctl, 2270 .compat_ioctl = userfaultfd_dev_ioctl, 2271 .owner = THIS_MODULE, 2272 .llseek = noop_llseek, 2273 }; 2274 2275 static struct miscdevice userfaultfd_misc = { 2276 .minor = MISC_DYNAMIC_MINOR, 2277 .name = "userfaultfd", 2278 .fops = &userfaultfd_dev_fops 2279 }; 2280 2281 static int __init userfaultfd_init(void) 2282 { 2283 int ret; 2284 2285 ret = misc_register(&userfaultfd_misc); 2286 if (ret) 2287 return ret; 2288 2289 userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache", 2290 sizeof(struct userfaultfd_ctx), 2291 0, 2292 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2293 init_once_userfaultfd_ctx); 2294 #ifdef CONFIG_SYSCTL 2295 register_sysctl_init("vm", vm_userfaultfd_table); 2296 #endif 2297 return 0; 2298 } 2299 __initcall(userfaultfd_init); 2300