xref: /linux/fs/userfaultfd.c (revision 26b433d0da062d6e19d75350c0171d3cf8ff560d)
1 /*
2  *  fs/userfaultfd.c
3  *
4  *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
5  *  Copyright (C) 2008-2009 Red Hat, Inc.
6  *  Copyright (C) 2015  Red Hat, Inc.
7  *
8  *  This work is licensed under the terms of the GNU GPL, version 2. See
9  *  the COPYING file in the top-level directory.
10  *
11  *  Some part derived from fs/eventfd.c (anon inode setup) and
12  *  mm/ksm.c (mm hashing).
13  */
14 
15 #include <linux/list.h>
16 #include <linux/hashtable.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/mm.h>
19 #include <linux/mm.h>
20 #include <linux/poll.h>
21 #include <linux/slab.h>
22 #include <linux/seq_file.h>
23 #include <linux/file.h>
24 #include <linux/bug.h>
25 #include <linux/anon_inodes.h>
26 #include <linux/syscalls.h>
27 #include <linux/userfaultfd_k.h>
28 #include <linux/mempolicy.h>
29 #include <linux/ioctl.h>
30 #include <linux/security.h>
31 #include <linux/hugetlb.h>
32 
33 static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
34 
35 enum userfaultfd_state {
36 	UFFD_STATE_WAIT_API,
37 	UFFD_STATE_RUNNING,
38 };
39 
40 /*
41  * Start with fault_pending_wqh and fault_wqh so they're more likely
42  * to be in the same cacheline.
43  */
44 struct userfaultfd_ctx {
45 	/* waitqueue head for the pending (i.e. not read) userfaults */
46 	wait_queue_head_t fault_pending_wqh;
47 	/* waitqueue head for the userfaults */
48 	wait_queue_head_t fault_wqh;
49 	/* waitqueue head for the pseudo fd to wakeup poll/read */
50 	wait_queue_head_t fd_wqh;
51 	/* waitqueue head for events */
52 	wait_queue_head_t event_wqh;
53 	/* a refile sequence protected by fault_pending_wqh lock */
54 	struct seqcount refile_seq;
55 	/* pseudo fd refcounting */
56 	atomic_t refcount;
57 	/* userfaultfd syscall flags */
58 	unsigned int flags;
59 	/* features requested from the userspace */
60 	unsigned int features;
61 	/* state machine */
62 	enum userfaultfd_state state;
63 	/* released */
64 	bool released;
65 	/* mm with one ore more vmas attached to this userfaultfd_ctx */
66 	struct mm_struct *mm;
67 };
68 
69 struct userfaultfd_fork_ctx {
70 	struct userfaultfd_ctx *orig;
71 	struct userfaultfd_ctx *new;
72 	struct list_head list;
73 };
74 
75 struct userfaultfd_unmap_ctx {
76 	struct userfaultfd_ctx *ctx;
77 	unsigned long start;
78 	unsigned long end;
79 	struct list_head list;
80 };
81 
82 struct userfaultfd_wait_queue {
83 	struct uffd_msg msg;
84 	wait_queue_entry_t wq;
85 	struct userfaultfd_ctx *ctx;
86 	bool waken;
87 };
88 
89 struct userfaultfd_wake_range {
90 	unsigned long start;
91 	unsigned long len;
92 };
93 
94 static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
95 				     int wake_flags, void *key)
96 {
97 	struct userfaultfd_wake_range *range = key;
98 	int ret;
99 	struct userfaultfd_wait_queue *uwq;
100 	unsigned long start, len;
101 
102 	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
103 	ret = 0;
104 	/* len == 0 means wake all */
105 	start = range->start;
106 	len = range->len;
107 	if (len && (start > uwq->msg.arg.pagefault.address ||
108 		    start + len <= uwq->msg.arg.pagefault.address))
109 		goto out;
110 	WRITE_ONCE(uwq->waken, true);
111 	/*
112 	 * The Program-Order guarantees provided by the scheduler
113 	 * ensure uwq->waken is visible before the task is woken.
114 	 */
115 	ret = wake_up_state(wq->private, mode);
116 	if (ret) {
117 		/*
118 		 * Wake only once, autoremove behavior.
119 		 *
120 		 * After the effect of list_del_init is visible to the other
121 		 * CPUs, the waitqueue may disappear from under us, see the
122 		 * !list_empty_careful() in handle_userfault().
123 		 *
124 		 * try_to_wake_up() has an implicit smp_mb(), and the
125 		 * wq->private is read before calling the extern function
126 		 * "wake_up_state" (which in turns calls try_to_wake_up).
127 		 */
128 		list_del_init(&wq->entry);
129 	}
130 out:
131 	return ret;
132 }
133 
134 /**
135  * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
136  * context.
137  * @ctx: [in] Pointer to the userfaultfd context.
138  */
139 static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
140 {
141 	if (!atomic_inc_not_zero(&ctx->refcount))
142 		BUG();
143 }
144 
145 /**
146  * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
147  * context.
148  * @ctx: [in] Pointer to userfaultfd context.
149  *
150  * The userfaultfd context reference must have been previously acquired either
151  * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
152  */
153 static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
154 {
155 	if (atomic_dec_and_test(&ctx->refcount)) {
156 		VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
157 		VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
158 		VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
159 		VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
160 		VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
161 		VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
162 		VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
163 		VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
164 		mmdrop(ctx->mm);
165 		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
166 	}
167 }
168 
169 static inline void msg_init(struct uffd_msg *msg)
170 {
171 	BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
172 	/*
173 	 * Must use memset to zero out the paddings or kernel data is
174 	 * leaked to userland.
175 	 */
176 	memset(msg, 0, sizeof(struct uffd_msg));
177 }
178 
179 static inline struct uffd_msg userfault_msg(unsigned long address,
180 					    unsigned int flags,
181 					    unsigned long reason)
182 {
183 	struct uffd_msg msg;
184 	msg_init(&msg);
185 	msg.event = UFFD_EVENT_PAGEFAULT;
186 	msg.arg.pagefault.address = address;
187 	if (flags & FAULT_FLAG_WRITE)
188 		/*
189 		 * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the
190 		 * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WRITE
191 		 * was not set in a UFFD_EVENT_PAGEFAULT, it means it
192 		 * was a read fault, otherwise if set it means it's
193 		 * a write fault.
194 		 */
195 		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
196 	if (reason & VM_UFFD_WP)
197 		/*
198 		 * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the
199 		 * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WP was
200 		 * not set in a UFFD_EVENT_PAGEFAULT, it means it was
201 		 * a missing fault, otherwise if set it means it's a
202 		 * write protect fault.
203 		 */
204 		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
205 	return msg;
206 }
207 
208 #ifdef CONFIG_HUGETLB_PAGE
209 /*
210  * Same functionality as userfaultfd_must_wait below with modifications for
211  * hugepmd ranges.
212  */
213 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
214 					 struct vm_area_struct *vma,
215 					 unsigned long address,
216 					 unsigned long flags,
217 					 unsigned long reason)
218 {
219 	struct mm_struct *mm = ctx->mm;
220 	pte_t *pte;
221 	bool ret = true;
222 
223 	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
224 
225 	pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
226 	if (!pte)
227 		goto out;
228 
229 	ret = false;
230 
231 	/*
232 	 * Lockless access: we're in a wait_event so it's ok if it
233 	 * changes under us.
234 	 */
235 	if (huge_pte_none(*pte))
236 		ret = true;
237 	if (!huge_pte_write(*pte) && (reason & VM_UFFD_WP))
238 		ret = true;
239 out:
240 	return ret;
241 }
242 #else
243 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
244 					 struct vm_area_struct *vma,
245 					 unsigned long address,
246 					 unsigned long flags,
247 					 unsigned long reason)
248 {
249 	return false;	/* should never get here */
250 }
251 #endif /* CONFIG_HUGETLB_PAGE */
252 
253 /*
254  * Verify the pagetables are still not ok after having reigstered into
255  * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
256  * userfault that has already been resolved, if userfaultfd_read and
257  * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
258  * threads.
259  */
260 static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
261 					 unsigned long address,
262 					 unsigned long flags,
263 					 unsigned long reason)
264 {
265 	struct mm_struct *mm = ctx->mm;
266 	pgd_t *pgd;
267 	p4d_t *p4d;
268 	pud_t *pud;
269 	pmd_t *pmd, _pmd;
270 	pte_t *pte;
271 	bool ret = true;
272 
273 	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
274 
275 	pgd = pgd_offset(mm, address);
276 	if (!pgd_present(*pgd))
277 		goto out;
278 	p4d = p4d_offset(pgd, address);
279 	if (!p4d_present(*p4d))
280 		goto out;
281 	pud = pud_offset(p4d, address);
282 	if (!pud_present(*pud))
283 		goto out;
284 	pmd = pmd_offset(pud, address);
285 	/*
286 	 * READ_ONCE must function as a barrier with narrower scope
287 	 * and it must be equivalent to:
288 	 *	_pmd = *pmd; barrier();
289 	 *
290 	 * This is to deal with the instability (as in
291 	 * pmd_trans_unstable) of the pmd.
292 	 */
293 	_pmd = READ_ONCE(*pmd);
294 	if (!pmd_present(_pmd))
295 		goto out;
296 
297 	ret = false;
298 	if (pmd_trans_huge(_pmd))
299 		goto out;
300 
301 	/*
302 	 * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it
303 	 * and use the standard pte_offset_map() instead of parsing _pmd.
304 	 */
305 	pte = pte_offset_map(pmd, address);
306 	/*
307 	 * Lockless access: we're in a wait_event so it's ok if it
308 	 * changes under us.
309 	 */
310 	if (pte_none(*pte))
311 		ret = true;
312 	pte_unmap(pte);
313 
314 out:
315 	return ret;
316 }
317 
318 /*
319  * The locking rules involved in returning VM_FAULT_RETRY depending on
320  * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
321  * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
322  * recommendation in __lock_page_or_retry is not an understatement.
323  *
324  * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_sem must be released
325  * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
326  * not set.
327  *
328  * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
329  * set, VM_FAULT_RETRY can still be returned if and only if there are
330  * fatal_signal_pending()s, and the mmap_sem must be released before
331  * returning it.
332  */
333 int handle_userfault(struct vm_fault *vmf, unsigned long reason)
334 {
335 	struct mm_struct *mm = vmf->vma->vm_mm;
336 	struct userfaultfd_ctx *ctx;
337 	struct userfaultfd_wait_queue uwq;
338 	int ret;
339 	bool must_wait, return_to_userland;
340 	long blocking_state;
341 
342 	ret = VM_FAULT_SIGBUS;
343 
344 	/*
345 	 * We don't do userfault handling for the final child pid update.
346 	 *
347 	 * We also don't do userfault handling during
348 	 * coredumping. hugetlbfs has the special
349 	 * follow_hugetlb_page() to skip missing pages in the
350 	 * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
351 	 * the no_page_table() helper in follow_page_mask(), but the
352 	 * shmem_vm_ops->fault method is invoked even during
353 	 * coredumping without mmap_sem and it ends up here.
354 	 */
355 	if (current->flags & (PF_EXITING|PF_DUMPCORE))
356 		goto out;
357 
358 	/*
359 	 * Coredumping runs without mmap_sem so we can only check that
360 	 * the mmap_sem is held, if PF_DUMPCORE was not set.
361 	 */
362 	WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
363 
364 	ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
365 	if (!ctx)
366 		goto out;
367 
368 	BUG_ON(ctx->mm != mm);
369 
370 	VM_BUG_ON(reason & ~(VM_UFFD_MISSING|VM_UFFD_WP));
371 	VM_BUG_ON(!(reason & VM_UFFD_MISSING) ^ !!(reason & VM_UFFD_WP));
372 
373 	/*
374 	 * If it's already released don't get it. This avoids to loop
375 	 * in __get_user_pages if userfaultfd_release waits on the
376 	 * caller of handle_userfault to release the mmap_sem.
377 	 */
378 	if (unlikely(ACCESS_ONCE(ctx->released)))
379 		goto out;
380 
381 	/*
382 	 * Check that we can return VM_FAULT_RETRY.
383 	 *
384 	 * NOTE: it should become possible to return VM_FAULT_RETRY
385 	 * even if FAULT_FLAG_TRIED is set without leading to gup()
386 	 * -EBUSY failures, if the userfaultfd is to be extended for
387 	 * VM_UFFD_WP tracking and we intend to arm the userfault
388 	 * without first stopping userland access to the memory. For
389 	 * VM_UFFD_MISSING userfaults this is enough for now.
390 	 */
391 	if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
392 		/*
393 		 * Validate the invariant that nowait must allow retry
394 		 * to be sure not to return SIGBUS erroneously on
395 		 * nowait invocations.
396 		 */
397 		BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
398 #ifdef CONFIG_DEBUG_VM
399 		if (printk_ratelimit()) {
400 			printk(KERN_WARNING
401 			       "FAULT_FLAG_ALLOW_RETRY missing %x\n",
402 			       vmf->flags);
403 			dump_stack();
404 		}
405 #endif
406 		goto out;
407 	}
408 
409 	/*
410 	 * Handle nowait, not much to do other than tell it to retry
411 	 * and wait.
412 	 */
413 	ret = VM_FAULT_RETRY;
414 	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
415 		goto out;
416 
417 	/* take the reference before dropping the mmap_sem */
418 	userfaultfd_ctx_get(ctx);
419 
420 	init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
421 	uwq.wq.private = current;
422 	uwq.msg = userfault_msg(vmf->address, vmf->flags, reason);
423 	uwq.ctx = ctx;
424 	uwq.waken = false;
425 
426 	return_to_userland =
427 		(vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
428 		(FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
429 	blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
430 			 TASK_KILLABLE;
431 
432 	spin_lock(&ctx->fault_pending_wqh.lock);
433 	/*
434 	 * After the __add_wait_queue the uwq is visible to userland
435 	 * through poll/read().
436 	 */
437 	__add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
438 	/*
439 	 * The smp_mb() after __set_current_state prevents the reads
440 	 * following the spin_unlock to happen before the list_add in
441 	 * __add_wait_queue.
442 	 */
443 	set_current_state(blocking_state);
444 	spin_unlock(&ctx->fault_pending_wqh.lock);
445 
446 	if (!is_vm_hugetlb_page(vmf->vma))
447 		must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
448 						  reason);
449 	else
450 		must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
451 						       vmf->address,
452 						       vmf->flags, reason);
453 	up_read(&mm->mmap_sem);
454 
455 	if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&
456 		   (return_to_userland ? !signal_pending(current) :
457 		    !fatal_signal_pending(current)))) {
458 		wake_up_poll(&ctx->fd_wqh, POLLIN);
459 		schedule();
460 		ret |= VM_FAULT_MAJOR;
461 
462 		/*
463 		 * False wakeups can orginate even from rwsem before
464 		 * up_read() however userfaults will wait either for a
465 		 * targeted wakeup on the specific uwq waitqueue from
466 		 * wake_userfault() or for signals or for uffd
467 		 * release.
468 		 */
469 		while (!READ_ONCE(uwq.waken)) {
470 			/*
471 			 * This needs the full smp_store_mb()
472 			 * guarantee as the state write must be
473 			 * visible to other CPUs before reading
474 			 * uwq.waken from other CPUs.
475 			 */
476 			set_current_state(blocking_state);
477 			if (READ_ONCE(uwq.waken) ||
478 			    READ_ONCE(ctx->released) ||
479 			    (return_to_userland ? signal_pending(current) :
480 			     fatal_signal_pending(current)))
481 				break;
482 			schedule();
483 		}
484 	}
485 
486 	__set_current_state(TASK_RUNNING);
487 
488 	if (return_to_userland) {
489 		if (signal_pending(current) &&
490 		    !fatal_signal_pending(current)) {
491 			/*
492 			 * If we got a SIGSTOP or SIGCONT and this is
493 			 * a normal userland page fault, just let
494 			 * userland return so the signal will be
495 			 * handled and gdb debugging works.  The page
496 			 * fault code immediately after we return from
497 			 * this function is going to release the
498 			 * mmap_sem and it's not depending on it
499 			 * (unlike gup would if we were not to return
500 			 * VM_FAULT_RETRY).
501 			 *
502 			 * If a fatal signal is pending we still take
503 			 * the streamlined VM_FAULT_RETRY failure path
504 			 * and there's no need to retake the mmap_sem
505 			 * in such case.
506 			 */
507 			down_read(&mm->mmap_sem);
508 			ret = VM_FAULT_NOPAGE;
509 		}
510 	}
511 
512 	/*
513 	 * Here we race with the list_del; list_add in
514 	 * userfaultfd_ctx_read(), however because we don't ever run
515 	 * list_del_init() to refile across the two lists, the prev
516 	 * and next pointers will never point to self. list_add also
517 	 * would never let any of the two pointers to point to
518 	 * self. So list_empty_careful won't risk to see both pointers
519 	 * pointing to self at any time during the list refile. The
520 	 * only case where list_del_init() is called is the full
521 	 * removal in the wake function and there we don't re-list_add
522 	 * and it's fine not to block on the spinlock. The uwq on this
523 	 * kernel stack can be released after the list_del_init.
524 	 */
525 	if (!list_empty_careful(&uwq.wq.entry)) {
526 		spin_lock(&ctx->fault_pending_wqh.lock);
527 		/*
528 		 * No need of list_del_init(), the uwq on the stack
529 		 * will be freed shortly anyway.
530 		 */
531 		list_del(&uwq.wq.entry);
532 		spin_unlock(&ctx->fault_pending_wqh.lock);
533 	}
534 
535 	/*
536 	 * ctx may go away after this if the userfault pseudo fd is
537 	 * already released.
538 	 */
539 	userfaultfd_ctx_put(ctx);
540 
541 out:
542 	return ret;
543 }
544 
545 static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
546 					      struct userfaultfd_wait_queue *ewq)
547 {
548 	if (WARN_ON_ONCE(current->flags & PF_EXITING))
549 		goto out;
550 
551 	ewq->ctx = ctx;
552 	init_waitqueue_entry(&ewq->wq, current);
553 
554 	spin_lock(&ctx->event_wqh.lock);
555 	/*
556 	 * After the __add_wait_queue the uwq is visible to userland
557 	 * through poll/read().
558 	 */
559 	__add_wait_queue(&ctx->event_wqh, &ewq->wq);
560 	for (;;) {
561 		set_current_state(TASK_KILLABLE);
562 		if (ewq->msg.event == 0)
563 			break;
564 		if (ACCESS_ONCE(ctx->released) ||
565 		    fatal_signal_pending(current)) {
566 			__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
567 			if (ewq->msg.event == UFFD_EVENT_FORK) {
568 				struct userfaultfd_ctx *new;
569 
570 				new = (struct userfaultfd_ctx *)
571 					(unsigned long)
572 					ewq->msg.arg.reserved.reserved1;
573 
574 				userfaultfd_ctx_put(new);
575 			}
576 			break;
577 		}
578 
579 		spin_unlock(&ctx->event_wqh.lock);
580 
581 		wake_up_poll(&ctx->fd_wqh, POLLIN);
582 		schedule();
583 
584 		spin_lock(&ctx->event_wqh.lock);
585 	}
586 	__set_current_state(TASK_RUNNING);
587 	spin_unlock(&ctx->event_wqh.lock);
588 
589 	/*
590 	 * ctx may go away after this if the userfault pseudo fd is
591 	 * already released.
592 	 */
593 out:
594 	userfaultfd_ctx_put(ctx);
595 }
596 
597 static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
598 				       struct userfaultfd_wait_queue *ewq)
599 {
600 	ewq->msg.event = 0;
601 	wake_up_locked(&ctx->event_wqh);
602 	__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
603 }
604 
605 int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
606 {
607 	struct userfaultfd_ctx *ctx = NULL, *octx;
608 	struct userfaultfd_fork_ctx *fctx;
609 
610 	octx = vma->vm_userfaultfd_ctx.ctx;
611 	if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
612 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
613 		vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
614 		return 0;
615 	}
616 
617 	list_for_each_entry(fctx, fcs, list)
618 		if (fctx->orig == octx) {
619 			ctx = fctx->new;
620 			break;
621 		}
622 
623 	if (!ctx) {
624 		fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
625 		if (!fctx)
626 			return -ENOMEM;
627 
628 		ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
629 		if (!ctx) {
630 			kfree(fctx);
631 			return -ENOMEM;
632 		}
633 
634 		atomic_set(&ctx->refcount, 1);
635 		ctx->flags = octx->flags;
636 		ctx->state = UFFD_STATE_RUNNING;
637 		ctx->features = octx->features;
638 		ctx->released = false;
639 		ctx->mm = vma->vm_mm;
640 		atomic_inc(&ctx->mm->mm_count);
641 
642 		userfaultfd_ctx_get(octx);
643 		fctx->orig = octx;
644 		fctx->new = ctx;
645 		list_add_tail(&fctx->list, fcs);
646 	}
647 
648 	vma->vm_userfaultfd_ctx.ctx = ctx;
649 	return 0;
650 }
651 
652 static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
653 {
654 	struct userfaultfd_ctx *ctx = fctx->orig;
655 	struct userfaultfd_wait_queue ewq;
656 
657 	msg_init(&ewq.msg);
658 
659 	ewq.msg.event = UFFD_EVENT_FORK;
660 	ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
661 
662 	userfaultfd_event_wait_completion(ctx, &ewq);
663 }
664 
665 void dup_userfaultfd_complete(struct list_head *fcs)
666 {
667 	struct userfaultfd_fork_ctx *fctx, *n;
668 
669 	list_for_each_entry_safe(fctx, n, fcs, list) {
670 		dup_fctx(fctx);
671 		list_del(&fctx->list);
672 		kfree(fctx);
673 	}
674 }
675 
676 void mremap_userfaultfd_prep(struct vm_area_struct *vma,
677 			     struct vm_userfaultfd_ctx *vm_ctx)
678 {
679 	struct userfaultfd_ctx *ctx;
680 
681 	ctx = vma->vm_userfaultfd_ctx.ctx;
682 	if (ctx && (ctx->features & UFFD_FEATURE_EVENT_REMAP)) {
683 		vm_ctx->ctx = ctx;
684 		userfaultfd_ctx_get(ctx);
685 	}
686 }
687 
688 void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
689 				 unsigned long from, unsigned long to,
690 				 unsigned long len)
691 {
692 	struct userfaultfd_ctx *ctx = vm_ctx->ctx;
693 	struct userfaultfd_wait_queue ewq;
694 
695 	if (!ctx)
696 		return;
697 
698 	if (to & ~PAGE_MASK) {
699 		userfaultfd_ctx_put(ctx);
700 		return;
701 	}
702 
703 	msg_init(&ewq.msg);
704 
705 	ewq.msg.event = UFFD_EVENT_REMAP;
706 	ewq.msg.arg.remap.from = from;
707 	ewq.msg.arg.remap.to = to;
708 	ewq.msg.arg.remap.len = len;
709 
710 	userfaultfd_event_wait_completion(ctx, &ewq);
711 }
712 
713 bool userfaultfd_remove(struct vm_area_struct *vma,
714 			unsigned long start, unsigned long end)
715 {
716 	struct mm_struct *mm = vma->vm_mm;
717 	struct userfaultfd_ctx *ctx;
718 	struct userfaultfd_wait_queue ewq;
719 
720 	ctx = vma->vm_userfaultfd_ctx.ctx;
721 	if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
722 		return true;
723 
724 	userfaultfd_ctx_get(ctx);
725 	up_read(&mm->mmap_sem);
726 
727 	msg_init(&ewq.msg);
728 
729 	ewq.msg.event = UFFD_EVENT_REMOVE;
730 	ewq.msg.arg.remove.start = start;
731 	ewq.msg.arg.remove.end = end;
732 
733 	userfaultfd_event_wait_completion(ctx, &ewq);
734 
735 	return false;
736 }
737 
738 static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
739 			  unsigned long start, unsigned long end)
740 {
741 	struct userfaultfd_unmap_ctx *unmap_ctx;
742 
743 	list_for_each_entry(unmap_ctx, unmaps, list)
744 		if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
745 		    unmap_ctx->end == end)
746 			return true;
747 
748 	return false;
749 }
750 
751 int userfaultfd_unmap_prep(struct vm_area_struct *vma,
752 			   unsigned long start, unsigned long end,
753 			   struct list_head *unmaps)
754 {
755 	for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
756 		struct userfaultfd_unmap_ctx *unmap_ctx;
757 		struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
758 
759 		if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
760 		    has_unmap_ctx(ctx, unmaps, start, end))
761 			continue;
762 
763 		unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
764 		if (!unmap_ctx)
765 			return -ENOMEM;
766 
767 		userfaultfd_ctx_get(ctx);
768 		unmap_ctx->ctx = ctx;
769 		unmap_ctx->start = start;
770 		unmap_ctx->end = end;
771 		list_add_tail(&unmap_ctx->list, unmaps);
772 	}
773 
774 	return 0;
775 }
776 
777 void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
778 {
779 	struct userfaultfd_unmap_ctx *ctx, *n;
780 	struct userfaultfd_wait_queue ewq;
781 
782 	list_for_each_entry_safe(ctx, n, uf, list) {
783 		msg_init(&ewq.msg);
784 
785 		ewq.msg.event = UFFD_EVENT_UNMAP;
786 		ewq.msg.arg.remove.start = ctx->start;
787 		ewq.msg.arg.remove.end = ctx->end;
788 
789 		userfaultfd_event_wait_completion(ctx->ctx, &ewq);
790 
791 		list_del(&ctx->list);
792 		kfree(ctx);
793 	}
794 }
795 
796 static int userfaultfd_release(struct inode *inode, struct file *file)
797 {
798 	struct userfaultfd_ctx *ctx = file->private_data;
799 	struct mm_struct *mm = ctx->mm;
800 	struct vm_area_struct *vma, *prev;
801 	/* len == 0 means wake all */
802 	struct userfaultfd_wake_range range = { .len = 0, };
803 	unsigned long new_flags;
804 
805 	ACCESS_ONCE(ctx->released) = true;
806 
807 	if (!mmget_not_zero(mm))
808 		goto wakeup;
809 
810 	/*
811 	 * Flush page faults out of all CPUs. NOTE: all page faults
812 	 * must be retried without returning VM_FAULT_SIGBUS if
813 	 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
814 	 * changes while handle_userfault released the mmap_sem. So
815 	 * it's critical that released is set to true (above), before
816 	 * taking the mmap_sem for writing.
817 	 */
818 	down_write(&mm->mmap_sem);
819 	prev = NULL;
820 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
821 		cond_resched();
822 		BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
823 		       !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
824 		if (vma->vm_userfaultfd_ctx.ctx != ctx) {
825 			prev = vma;
826 			continue;
827 		}
828 		new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
829 		prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
830 				 new_flags, vma->anon_vma,
831 				 vma->vm_file, vma->vm_pgoff,
832 				 vma_policy(vma),
833 				 NULL_VM_UFFD_CTX);
834 		if (prev)
835 			vma = prev;
836 		else
837 			prev = vma;
838 		vma->vm_flags = new_flags;
839 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
840 	}
841 	up_write(&mm->mmap_sem);
842 	mmput(mm);
843 wakeup:
844 	/*
845 	 * After no new page faults can wait on this fault_*wqh, flush
846 	 * the last page faults that may have been already waiting on
847 	 * the fault_*wqh.
848 	 */
849 	spin_lock(&ctx->fault_pending_wqh.lock);
850 	__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
851 	__wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
852 	spin_unlock(&ctx->fault_pending_wqh.lock);
853 
854 	/* Flush pending events that may still wait on event_wqh */
855 	wake_up_all(&ctx->event_wqh);
856 
857 	wake_up_poll(&ctx->fd_wqh, POLLHUP);
858 	userfaultfd_ctx_put(ctx);
859 	return 0;
860 }
861 
862 /* fault_pending_wqh.lock must be hold by the caller */
863 static inline struct userfaultfd_wait_queue *find_userfault_in(
864 		wait_queue_head_t *wqh)
865 {
866 	wait_queue_entry_t *wq;
867 	struct userfaultfd_wait_queue *uwq;
868 
869 	VM_BUG_ON(!spin_is_locked(&wqh->lock));
870 
871 	uwq = NULL;
872 	if (!waitqueue_active(wqh))
873 		goto out;
874 	/* walk in reverse to provide FIFO behavior to read userfaults */
875 	wq = list_last_entry(&wqh->head, typeof(*wq), entry);
876 	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
877 out:
878 	return uwq;
879 }
880 
881 static inline struct userfaultfd_wait_queue *find_userfault(
882 		struct userfaultfd_ctx *ctx)
883 {
884 	return find_userfault_in(&ctx->fault_pending_wqh);
885 }
886 
887 static inline struct userfaultfd_wait_queue *find_userfault_evt(
888 		struct userfaultfd_ctx *ctx)
889 {
890 	return find_userfault_in(&ctx->event_wqh);
891 }
892 
893 static unsigned int userfaultfd_poll(struct file *file, poll_table *wait)
894 {
895 	struct userfaultfd_ctx *ctx = file->private_data;
896 	unsigned int ret;
897 
898 	poll_wait(file, &ctx->fd_wqh, wait);
899 
900 	switch (ctx->state) {
901 	case UFFD_STATE_WAIT_API:
902 		return POLLERR;
903 	case UFFD_STATE_RUNNING:
904 		/*
905 		 * poll() never guarantees that read won't block.
906 		 * userfaults can be waken before they're read().
907 		 */
908 		if (unlikely(!(file->f_flags & O_NONBLOCK)))
909 			return POLLERR;
910 		/*
911 		 * lockless access to see if there are pending faults
912 		 * __pollwait last action is the add_wait_queue but
913 		 * the spin_unlock would allow the waitqueue_active to
914 		 * pass above the actual list_add inside
915 		 * add_wait_queue critical section. So use a full
916 		 * memory barrier to serialize the list_add write of
917 		 * add_wait_queue() with the waitqueue_active read
918 		 * below.
919 		 */
920 		ret = 0;
921 		smp_mb();
922 		if (waitqueue_active(&ctx->fault_pending_wqh))
923 			ret = POLLIN;
924 		else if (waitqueue_active(&ctx->event_wqh))
925 			ret = POLLIN;
926 
927 		return ret;
928 	default:
929 		WARN_ON_ONCE(1);
930 		return POLLERR;
931 	}
932 }
933 
934 static const struct file_operations userfaultfd_fops;
935 
936 static int resolve_userfault_fork(struct userfaultfd_ctx *ctx,
937 				  struct userfaultfd_ctx *new,
938 				  struct uffd_msg *msg)
939 {
940 	int fd;
941 	struct file *file;
942 	unsigned int flags = new->flags & UFFD_SHARED_FCNTL_FLAGS;
943 
944 	fd = get_unused_fd_flags(flags);
945 	if (fd < 0)
946 		return fd;
947 
948 	file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, new,
949 				  O_RDWR | flags);
950 	if (IS_ERR(file)) {
951 		put_unused_fd(fd);
952 		return PTR_ERR(file);
953 	}
954 
955 	fd_install(fd, file);
956 	msg->arg.reserved.reserved1 = 0;
957 	msg->arg.fork.ufd = fd;
958 
959 	return 0;
960 }
961 
962 static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
963 				    struct uffd_msg *msg)
964 {
965 	ssize_t ret;
966 	DECLARE_WAITQUEUE(wait, current);
967 	struct userfaultfd_wait_queue *uwq;
968 	/*
969 	 * Handling fork event requires sleeping operations, so
970 	 * we drop the event_wqh lock, then do these ops, then
971 	 * lock it back and wake up the waiter. While the lock is
972 	 * dropped the ewq may go away so we keep track of it
973 	 * carefully.
974 	 */
975 	LIST_HEAD(fork_event);
976 	struct userfaultfd_ctx *fork_nctx = NULL;
977 
978 	/* always take the fd_wqh lock before the fault_pending_wqh lock */
979 	spin_lock(&ctx->fd_wqh.lock);
980 	__add_wait_queue(&ctx->fd_wqh, &wait);
981 	for (;;) {
982 		set_current_state(TASK_INTERRUPTIBLE);
983 		spin_lock(&ctx->fault_pending_wqh.lock);
984 		uwq = find_userfault(ctx);
985 		if (uwq) {
986 			/*
987 			 * Use a seqcount to repeat the lockless check
988 			 * in wake_userfault() to avoid missing
989 			 * wakeups because during the refile both
990 			 * waitqueue could become empty if this is the
991 			 * only userfault.
992 			 */
993 			write_seqcount_begin(&ctx->refile_seq);
994 
995 			/*
996 			 * The fault_pending_wqh.lock prevents the uwq
997 			 * to disappear from under us.
998 			 *
999 			 * Refile this userfault from
1000 			 * fault_pending_wqh to fault_wqh, it's not
1001 			 * pending anymore after we read it.
1002 			 *
1003 			 * Use list_del() by hand (as
1004 			 * userfaultfd_wake_function also uses
1005 			 * list_del_init() by hand) to be sure nobody
1006 			 * changes __remove_wait_queue() to use
1007 			 * list_del_init() in turn breaking the
1008 			 * !list_empty_careful() check in
1009 			 * handle_userfault(). The uwq->wq.head list
1010 			 * must never be empty at any time during the
1011 			 * refile, or the waitqueue could disappear
1012 			 * from under us. The "wait_queue_head_t"
1013 			 * parameter of __remove_wait_queue() is unused
1014 			 * anyway.
1015 			 */
1016 			list_del(&uwq->wq.entry);
1017 			__add_wait_queue(&ctx->fault_wqh, &uwq->wq);
1018 
1019 			write_seqcount_end(&ctx->refile_seq);
1020 
1021 			/* careful to always initialize msg if ret == 0 */
1022 			*msg = uwq->msg;
1023 			spin_unlock(&ctx->fault_pending_wqh.lock);
1024 			ret = 0;
1025 			break;
1026 		}
1027 		spin_unlock(&ctx->fault_pending_wqh.lock);
1028 
1029 		spin_lock(&ctx->event_wqh.lock);
1030 		uwq = find_userfault_evt(ctx);
1031 		if (uwq) {
1032 			*msg = uwq->msg;
1033 
1034 			if (uwq->msg.event == UFFD_EVENT_FORK) {
1035 				fork_nctx = (struct userfaultfd_ctx *)
1036 					(unsigned long)
1037 					uwq->msg.arg.reserved.reserved1;
1038 				list_move(&uwq->wq.entry, &fork_event);
1039 				spin_unlock(&ctx->event_wqh.lock);
1040 				ret = 0;
1041 				break;
1042 			}
1043 
1044 			userfaultfd_event_complete(ctx, uwq);
1045 			spin_unlock(&ctx->event_wqh.lock);
1046 			ret = 0;
1047 			break;
1048 		}
1049 		spin_unlock(&ctx->event_wqh.lock);
1050 
1051 		if (signal_pending(current)) {
1052 			ret = -ERESTARTSYS;
1053 			break;
1054 		}
1055 		if (no_wait) {
1056 			ret = -EAGAIN;
1057 			break;
1058 		}
1059 		spin_unlock(&ctx->fd_wqh.lock);
1060 		schedule();
1061 		spin_lock(&ctx->fd_wqh.lock);
1062 	}
1063 	__remove_wait_queue(&ctx->fd_wqh, &wait);
1064 	__set_current_state(TASK_RUNNING);
1065 	spin_unlock(&ctx->fd_wqh.lock);
1066 
1067 	if (!ret && msg->event == UFFD_EVENT_FORK) {
1068 		ret = resolve_userfault_fork(ctx, fork_nctx, msg);
1069 
1070 		if (!ret) {
1071 			spin_lock(&ctx->event_wqh.lock);
1072 			if (!list_empty(&fork_event)) {
1073 				uwq = list_first_entry(&fork_event,
1074 						       typeof(*uwq),
1075 						       wq.entry);
1076 				list_del(&uwq->wq.entry);
1077 				__add_wait_queue(&ctx->event_wqh, &uwq->wq);
1078 				userfaultfd_event_complete(ctx, uwq);
1079 			}
1080 			spin_unlock(&ctx->event_wqh.lock);
1081 		}
1082 	}
1083 
1084 	return ret;
1085 }
1086 
1087 static ssize_t userfaultfd_read(struct file *file, char __user *buf,
1088 				size_t count, loff_t *ppos)
1089 {
1090 	struct userfaultfd_ctx *ctx = file->private_data;
1091 	ssize_t _ret, ret = 0;
1092 	struct uffd_msg msg;
1093 	int no_wait = file->f_flags & O_NONBLOCK;
1094 
1095 	if (ctx->state == UFFD_STATE_WAIT_API)
1096 		return -EINVAL;
1097 
1098 	for (;;) {
1099 		if (count < sizeof(msg))
1100 			return ret ? ret : -EINVAL;
1101 		_ret = userfaultfd_ctx_read(ctx, no_wait, &msg);
1102 		if (_ret < 0)
1103 			return ret ? ret : _ret;
1104 		if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
1105 			return ret ? ret : -EFAULT;
1106 		ret += sizeof(msg);
1107 		buf += sizeof(msg);
1108 		count -= sizeof(msg);
1109 		/*
1110 		 * Allow to read more than one fault at time but only
1111 		 * block if waiting for the very first one.
1112 		 */
1113 		no_wait = O_NONBLOCK;
1114 	}
1115 }
1116 
1117 static void __wake_userfault(struct userfaultfd_ctx *ctx,
1118 			     struct userfaultfd_wake_range *range)
1119 {
1120 	spin_lock(&ctx->fault_pending_wqh.lock);
1121 	/* wake all in the range and autoremove */
1122 	if (waitqueue_active(&ctx->fault_pending_wqh))
1123 		__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
1124 				     range);
1125 	if (waitqueue_active(&ctx->fault_wqh))
1126 		__wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, range);
1127 	spin_unlock(&ctx->fault_pending_wqh.lock);
1128 }
1129 
1130 static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
1131 					   struct userfaultfd_wake_range *range)
1132 {
1133 	unsigned seq;
1134 	bool need_wakeup;
1135 
1136 	/*
1137 	 * To be sure waitqueue_active() is not reordered by the CPU
1138 	 * before the pagetable update, use an explicit SMP memory
1139 	 * barrier here. PT lock release or up_read(mmap_sem) still
1140 	 * have release semantics that can allow the
1141 	 * waitqueue_active() to be reordered before the pte update.
1142 	 */
1143 	smp_mb();
1144 
1145 	/*
1146 	 * Use waitqueue_active because it's very frequent to
1147 	 * change the address space atomically even if there are no
1148 	 * userfaults yet. So we take the spinlock only when we're
1149 	 * sure we've userfaults to wake.
1150 	 */
1151 	do {
1152 		seq = read_seqcount_begin(&ctx->refile_seq);
1153 		need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
1154 			waitqueue_active(&ctx->fault_wqh);
1155 		cond_resched();
1156 	} while (read_seqcount_retry(&ctx->refile_seq, seq));
1157 	if (need_wakeup)
1158 		__wake_userfault(ctx, range);
1159 }
1160 
1161 static __always_inline int validate_range(struct mm_struct *mm,
1162 					  __u64 start, __u64 len)
1163 {
1164 	__u64 task_size = mm->task_size;
1165 
1166 	if (start & ~PAGE_MASK)
1167 		return -EINVAL;
1168 	if (len & ~PAGE_MASK)
1169 		return -EINVAL;
1170 	if (!len)
1171 		return -EINVAL;
1172 	if (start < mmap_min_addr)
1173 		return -EINVAL;
1174 	if (start >= task_size)
1175 		return -EINVAL;
1176 	if (len > task_size - start)
1177 		return -EINVAL;
1178 	return 0;
1179 }
1180 
1181 static inline bool vma_can_userfault(struct vm_area_struct *vma)
1182 {
1183 	return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
1184 		vma_is_shmem(vma);
1185 }
1186 
1187 static int userfaultfd_register(struct userfaultfd_ctx *ctx,
1188 				unsigned long arg)
1189 {
1190 	struct mm_struct *mm = ctx->mm;
1191 	struct vm_area_struct *vma, *prev, *cur;
1192 	int ret;
1193 	struct uffdio_register uffdio_register;
1194 	struct uffdio_register __user *user_uffdio_register;
1195 	unsigned long vm_flags, new_flags;
1196 	bool found;
1197 	bool non_anon_pages;
1198 	unsigned long start, end, vma_end;
1199 
1200 	user_uffdio_register = (struct uffdio_register __user *) arg;
1201 
1202 	ret = -EFAULT;
1203 	if (copy_from_user(&uffdio_register, user_uffdio_register,
1204 			   sizeof(uffdio_register)-sizeof(__u64)))
1205 		goto out;
1206 
1207 	ret = -EINVAL;
1208 	if (!uffdio_register.mode)
1209 		goto out;
1210 	if (uffdio_register.mode & ~(UFFDIO_REGISTER_MODE_MISSING|
1211 				     UFFDIO_REGISTER_MODE_WP))
1212 		goto out;
1213 	vm_flags = 0;
1214 	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
1215 		vm_flags |= VM_UFFD_MISSING;
1216 	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
1217 		vm_flags |= VM_UFFD_WP;
1218 		/*
1219 		 * FIXME: remove the below error constraint by
1220 		 * implementing the wprotect tracking mode.
1221 		 */
1222 		ret = -EINVAL;
1223 		goto out;
1224 	}
1225 
1226 	ret = validate_range(mm, uffdio_register.range.start,
1227 			     uffdio_register.range.len);
1228 	if (ret)
1229 		goto out;
1230 
1231 	start = uffdio_register.range.start;
1232 	end = start + uffdio_register.range.len;
1233 
1234 	ret = -ENOMEM;
1235 	if (!mmget_not_zero(mm))
1236 		goto out;
1237 
1238 	down_write(&mm->mmap_sem);
1239 	vma = find_vma_prev(mm, start, &prev);
1240 	if (!vma)
1241 		goto out_unlock;
1242 
1243 	/* check that there's at least one vma in the range */
1244 	ret = -EINVAL;
1245 	if (vma->vm_start >= end)
1246 		goto out_unlock;
1247 
1248 	/*
1249 	 * If the first vma contains huge pages, make sure start address
1250 	 * is aligned to huge page size.
1251 	 */
1252 	if (is_vm_hugetlb_page(vma)) {
1253 		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1254 
1255 		if (start & (vma_hpagesize - 1))
1256 			goto out_unlock;
1257 	}
1258 
1259 	/*
1260 	 * Search for not compatible vmas.
1261 	 */
1262 	found = false;
1263 	non_anon_pages = false;
1264 	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
1265 		cond_resched();
1266 
1267 		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1268 		       !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
1269 
1270 		/* check not compatible vmas */
1271 		ret = -EINVAL;
1272 		if (!vma_can_userfault(cur))
1273 			goto out_unlock;
1274 		/*
1275 		 * If this vma contains ending address, and huge pages
1276 		 * check alignment.
1277 		 */
1278 		if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
1279 		    end > cur->vm_start) {
1280 			unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
1281 
1282 			ret = -EINVAL;
1283 
1284 			if (end & (vma_hpagesize - 1))
1285 				goto out_unlock;
1286 		}
1287 
1288 		/*
1289 		 * Check that this vma isn't already owned by a
1290 		 * different userfaultfd. We can't allow more than one
1291 		 * userfaultfd to own a single vma simultaneously or we
1292 		 * wouldn't know which one to deliver the userfaults to.
1293 		 */
1294 		ret = -EBUSY;
1295 		if (cur->vm_userfaultfd_ctx.ctx &&
1296 		    cur->vm_userfaultfd_ctx.ctx != ctx)
1297 			goto out_unlock;
1298 
1299 		/*
1300 		 * Note vmas containing huge pages
1301 		 */
1302 		if (is_vm_hugetlb_page(cur) || vma_is_shmem(cur))
1303 			non_anon_pages = true;
1304 
1305 		found = true;
1306 	}
1307 	BUG_ON(!found);
1308 
1309 	if (vma->vm_start < start)
1310 		prev = vma;
1311 
1312 	ret = 0;
1313 	do {
1314 		cond_resched();
1315 
1316 		BUG_ON(!vma_can_userfault(vma));
1317 		BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
1318 		       vma->vm_userfaultfd_ctx.ctx != ctx);
1319 
1320 		/*
1321 		 * Nothing to do: this vma is already registered into this
1322 		 * userfaultfd and with the right tracking mode too.
1323 		 */
1324 		if (vma->vm_userfaultfd_ctx.ctx == ctx &&
1325 		    (vma->vm_flags & vm_flags) == vm_flags)
1326 			goto skip;
1327 
1328 		if (vma->vm_start > start)
1329 			start = vma->vm_start;
1330 		vma_end = min(end, vma->vm_end);
1331 
1332 		new_flags = (vma->vm_flags & ~vm_flags) | vm_flags;
1333 		prev = vma_merge(mm, prev, start, vma_end, new_flags,
1334 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
1335 				 vma_policy(vma),
1336 				 ((struct vm_userfaultfd_ctx){ ctx }));
1337 		if (prev) {
1338 			vma = prev;
1339 			goto next;
1340 		}
1341 		if (vma->vm_start < start) {
1342 			ret = split_vma(mm, vma, start, 1);
1343 			if (ret)
1344 				break;
1345 		}
1346 		if (vma->vm_end > end) {
1347 			ret = split_vma(mm, vma, end, 0);
1348 			if (ret)
1349 				break;
1350 		}
1351 	next:
1352 		/*
1353 		 * In the vma_merge() successful mprotect-like case 8:
1354 		 * the next vma was merged into the current one and
1355 		 * the current one has not been updated yet.
1356 		 */
1357 		vma->vm_flags = new_flags;
1358 		vma->vm_userfaultfd_ctx.ctx = ctx;
1359 
1360 	skip:
1361 		prev = vma;
1362 		start = vma->vm_end;
1363 		vma = vma->vm_next;
1364 	} while (vma && vma->vm_start < end);
1365 out_unlock:
1366 	up_write(&mm->mmap_sem);
1367 	mmput(mm);
1368 	if (!ret) {
1369 		/*
1370 		 * Now that we scanned all vmas we can already tell
1371 		 * userland which ioctls methods are guaranteed to
1372 		 * succeed on this range.
1373 		 */
1374 		if (put_user(non_anon_pages ? UFFD_API_RANGE_IOCTLS_BASIC :
1375 			     UFFD_API_RANGE_IOCTLS,
1376 			     &user_uffdio_register->ioctls))
1377 			ret = -EFAULT;
1378 	}
1379 out:
1380 	return ret;
1381 }
1382 
1383 static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
1384 				  unsigned long arg)
1385 {
1386 	struct mm_struct *mm = ctx->mm;
1387 	struct vm_area_struct *vma, *prev, *cur;
1388 	int ret;
1389 	struct uffdio_range uffdio_unregister;
1390 	unsigned long new_flags;
1391 	bool found;
1392 	unsigned long start, end, vma_end;
1393 	const void __user *buf = (void __user *)arg;
1394 
1395 	ret = -EFAULT;
1396 	if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
1397 		goto out;
1398 
1399 	ret = validate_range(mm, uffdio_unregister.start,
1400 			     uffdio_unregister.len);
1401 	if (ret)
1402 		goto out;
1403 
1404 	start = uffdio_unregister.start;
1405 	end = start + uffdio_unregister.len;
1406 
1407 	ret = -ENOMEM;
1408 	if (!mmget_not_zero(mm))
1409 		goto out;
1410 
1411 	down_write(&mm->mmap_sem);
1412 	vma = find_vma_prev(mm, start, &prev);
1413 	if (!vma)
1414 		goto out_unlock;
1415 
1416 	/* check that there's at least one vma in the range */
1417 	ret = -EINVAL;
1418 	if (vma->vm_start >= end)
1419 		goto out_unlock;
1420 
1421 	/*
1422 	 * If the first vma contains huge pages, make sure start address
1423 	 * is aligned to huge page size.
1424 	 */
1425 	if (is_vm_hugetlb_page(vma)) {
1426 		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1427 
1428 		if (start & (vma_hpagesize - 1))
1429 			goto out_unlock;
1430 	}
1431 
1432 	/*
1433 	 * Search for not compatible vmas.
1434 	 */
1435 	found = false;
1436 	ret = -EINVAL;
1437 	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
1438 		cond_resched();
1439 
1440 		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1441 		       !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
1442 
1443 		/*
1444 		 * Check not compatible vmas, not strictly required
1445 		 * here as not compatible vmas cannot have an
1446 		 * userfaultfd_ctx registered on them, but this
1447 		 * provides for more strict behavior to notice
1448 		 * unregistration errors.
1449 		 */
1450 		if (!vma_can_userfault(cur))
1451 			goto out_unlock;
1452 
1453 		found = true;
1454 	}
1455 	BUG_ON(!found);
1456 
1457 	if (vma->vm_start < start)
1458 		prev = vma;
1459 
1460 	ret = 0;
1461 	do {
1462 		cond_resched();
1463 
1464 		BUG_ON(!vma_can_userfault(vma));
1465 
1466 		/*
1467 		 * Nothing to do: this vma is already registered into this
1468 		 * userfaultfd and with the right tracking mode too.
1469 		 */
1470 		if (!vma->vm_userfaultfd_ctx.ctx)
1471 			goto skip;
1472 
1473 		if (vma->vm_start > start)
1474 			start = vma->vm_start;
1475 		vma_end = min(end, vma->vm_end);
1476 
1477 		if (userfaultfd_missing(vma)) {
1478 			/*
1479 			 * Wake any concurrent pending userfault while
1480 			 * we unregister, so they will not hang
1481 			 * permanently and it avoids userland to call
1482 			 * UFFDIO_WAKE explicitly.
1483 			 */
1484 			struct userfaultfd_wake_range range;
1485 			range.start = start;
1486 			range.len = vma_end - start;
1487 			wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
1488 		}
1489 
1490 		new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
1491 		prev = vma_merge(mm, prev, start, vma_end, new_flags,
1492 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
1493 				 vma_policy(vma),
1494 				 NULL_VM_UFFD_CTX);
1495 		if (prev) {
1496 			vma = prev;
1497 			goto next;
1498 		}
1499 		if (vma->vm_start < start) {
1500 			ret = split_vma(mm, vma, start, 1);
1501 			if (ret)
1502 				break;
1503 		}
1504 		if (vma->vm_end > end) {
1505 			ret = split_vma(mm, vma, end, 0);
1506 			if (ret)
1507 				break;
1508 		}
1509 	next:
1510 		/*
1511 		 * In the vma_merge() successful mprotect-like case 8:
1512 		 * the next vma was merged into the current one and
1513 		 * the current one has not been updated yet.
1514 		 */
1515 		vma->vm_flags = new_flags;
1516 		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
1517 
1518 	skip:
1519 		prev = vma;
1520 		start = vma->vm_end;
1521 		vma = vma->vm_next;
1522 	} while (vma && vma->vm_start < end);
1523 out_unlock:
1524 	up_write(&mm->mmap_sem);
1525 	mmput(mm);
1526 out:
1527 	return ret;
1528 }
1529 
1530 /*
1531  * userfaultfd_wake may be used in combination with the
1532  * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
1533  */
1534 static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
1535 			    unsigned long arg)
1536 {
1537 	int ret;
1538 	struct uffdio_range uffdio_wake;
1539 	struct userfaultfd_wake_range range;
1540 	const void __user *buf = (void __user *)arg;
1541 
1542 	ret = -EFAULT;
1543 	if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
1544 		goto out;
1545 
1546 	ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
1547 	if (ret)
1548 		goto out;
1549 
1550 	range.start = uffdio_wake.start;
1551 	range.len = uffdio_wake.len;
1552 
1553 	/*
1554 	 * len == 0 means wake all and we don't want to wake all here,
1555 	 * so check it again to be sure.
1556 	 */
1557 	VM_BUG_ON(!range.len);
1558 
1559 	wake_userfault(ctx, &range);
1560 	ret = 0;
1561 
1562 out:
1563 	return ret;
1564 }
1565 
1566 static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1567 			    unsigned long arg)
1568 {
1569 	__s64 ret;
1570 	struct uffdio_copy uffdio_copy;
1571 	struct uffdio_copy __user *user_uffdio_copy;
1572 	struct userfaultfd_wake_range range;
1573 
1574 	user_uffdio_copy = (struct uffdio_copy __user *) arg;
1575 
1576 	ret = -EFAULT;
1577 	if (copy_from_user(&uffdio_copy, user_uffdio_copy,
1578 			   /* don't copy "copy" last field */
1579 			   sizeof(uffdio_copy)-sizeof(__s64)))
1580 		goto out;
1581 
1582 	ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
1583 	if (ret)
1584 		goto out;
1585 	/*
1586 	 * double check for wraparound just in case. copy_from_user()
1587 	 * will later check uffdio_copy.src + uffdio_copy.len to fit
1588 	 * in the userland range.
1589 	 */
1590 	ret = -EINVAL;
1591 	if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
1592 		goto out;
1593 	if (uffdio_copy.mode & ~UFFDIO_COPY_MODE_DONTWAKE)
1594 		goto out;
1595 	if (mmget_not_zero(ctx->mm)) {
1596 		ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
1597 				   uffdio_copy.len);
1598 		mmput(ctx->mm);
1599 	} else {
1600 		return -ESRCH;
1601 	}
1602 	if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
1603 		return -EFAULT;
1604 	if (ret < 0)
1605 		goto out;
1606 	BUG_ON(!ret);
1607 	/* len == 0 would wake all */
1608 	range.len = ret;
1609 	if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
1610 		range.start = uffdio_copy.dst;
1611 		wake_userfault(ctx, &range);
1612 	}
1613 	ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
1614 out:
1615 	return ret;
1616 }
1617 
1618 static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1619 				unsigned long arg)
1620 {
1621 	__s64 ret;
1622 	struct uffdio_zeropage uffdio_zeropage;
1623 	struct uffdio_zeropage __user *user_uffdio_zeropage;
1624 	struct userfaultfd_wake_range range;
1625 
1626 	user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
1627 
1628 	ret = -EFAULT;
1629 	if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
1630 			   /* don't copy "zeropage" last field */
1631 			   sizeof(uffdio_zeropage)-sizeof(__s64)))
1632 		goto out;
1633 
1634 	ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
1635 			     uffdio_zeropage.range.len);
1636 	if (ret)
1637 		goto out;
1638 	ret = -EINVAL;
1639 	if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
1640 		goto out;
1641 
1642 	if (mmget_not_zero(ctx->mm)) {
1643 		ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
1644 				     uffdio_zeropage.range.len);
1645 		mmput(ctx->mm);
1646 	} else {
1647 		return -ESRCH;
1648 	}
1649 	if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
1650 		return -EFAULT;
1651 	if (ret < 0)
1652 		goto out;
1653 	/* len == 0 would wake all */
1654 	BUG_ON(!ret);
1655 	range.len = ret;
1656 	if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
1657 		range.start = uffdio_zeropage.range.start;
1658 		wake_userfault(ctx, &range);
1659 	}
1660 	ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
1661 out:
1662 	return ret;
1663 }
1664 
1665 static inline unsigned int uffd_ctx_features(__u64 user_features)
1666 {
1667 	/*
1668 	 * For the current set of features the bits just coincide
1669 	 */
1670 	return (unsigned int)user_features;
1671 }
1672 
1673 /*
1674  * userland asks for a certain API version and we return which bits
1675  * and ioctl commands are implemented in this kernel for such API
1676  * version or -EINVAL if unknown.
1677  */
1678 static int userfaultfd_api(struct userfaultfd_ctx *ctx,
1679 			   unsigned long arg)
1680 {
1681 	struct uffdio_api uffdio_api;
1682 	void __user *buf = (void __user *)arg;
1683 	int ret;
1684 	__u64 features;
1685 
1686 	ret = -EINVAL;
1687 	if (ctx->state != UFFD_STATE_WAIT_API)
1688 		goto out;
1689 	ret = -EFAULT;
1690 	if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
1691 		goto out;
1692 	features = uffdio_api.features;
1693 	if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) {
1694 		memset(&uffdio_api, 0, sizeof(uffdio_api));
1695 		if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
1696 			goto out;
1697 		ret = -EINVAL;
1698 		goto out;
1699 	}
1700 	/* report all available features and ioctls to userland */
1701 	uffdio_api.features = UFFD_API_FEATURES;
1702 	uffdio_api.ioctls = UFFD_API_IOCTLS;
1703 	ret = -EFAULT;
1704 	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
1705 		goto out;
1706 	ctx->state = UFFD_STATE_RUNNING;
1707 	/* only enable the requested features for this uffd context */
1708 	ctx->features = uffd_ctx_features(features);
1709 	ret = 0;
1710 out:
1711 	return ret;
1712 }
1713 
1714 static long userfaultfd_ioctl(struct file *file, unsigned cmd,
1715 			      unsigned long arg)
1716 {
1717 	int ret = -EINVAL;
1718 	struct userfaultfd_ctx *ctx = file->private_data;
1719 
1720 	if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
1721 		return -EINVAL;
1722 
1723 	switch(cmd) {
1724 	case UFFDIO_API:
1725 		ret = userfaultfd_api(ctx, arg);
1726 		break;
1727 	case UFFDIO_REGISTER:
1728 		ret = userfaultfd_register(ctx, arg);
1729 		break;
1730 	case UFFDIO_UNREGISTER:
1731 		ret = userfaultfd_unregister(ctx, arg);
1732 		break;
1733 	case UFFDIO_WAKE:
1734 		ret = userfaultfd_wake(ctx, arg);
1735 		break;
1736 	case UFFDIO_COPY:
1737 		ret = userfaultfd_copy(ctx, arg);
1738 		break;
1739 	case UFFDIO_ZEROPAGE:
1740 		ret = userfaultfd_zeropage(ctx, arg);
1741 		break;
1742 	}
1743 	return ret;
1744 }
1745 
1746 #ifdef CONFIG_PROC_FS
1747 static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
1748 {
1749 	struct userfaultfd_ctx *ctx = f->private_data;
1750 	wait_queue_entry_t *wq;
1751 	struct userfaultfd_wait_queue *uwq;
1752 	unsigned long pending = 0, total = 0;
1753 
1754 	spin_lock(&ctx->fault_pending_wqh.lock);
1755 	list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
1756 		uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
1757 		pending++;
1758 		total++;
1759 	}
1760 	list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
1761 		uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
1762 		total++;
1763 	}
1764 	spin_unlock(&ctx->fault_pending_wqh.lock);
1765 
1766 	/*
1767 	 * If more protocols will be added, there will be all shown
1768 	 * separated by a space. Like this:
1769 	 *	protocols: aa:... bb:...
1770 	 */
1771 	seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
1772 		   pending, total, UFFD_API, ctx->features,
1773 		   UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
1774 }
1775 #endif
1776 
1777 static const struct file_operations userfaultfd_fops = {
1778 #ifdef CONFIG_PROC_FS
1779 	.show_fdinfo	= userfaultfd_show_fdinfo,
1780 #endif
1781 	.release	= userfaultfd_release,
1782 	.poll		= userfaultfd_poll,
1783 	.read		= userfaultfd_read,
1784 	.unlocked_ioctl = userfaultfd_ioctl,
1785 	.compat_ioctl	= userfaultfd_ioctl,
1786 	.llseek		= noop_llseek,
1787 };
1788 
1789 static void init_once_userfaultfd_ctx(void *mem)
1790 {
1791 	struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
1792 
1793 	init_waitqueue_head(&ctx->fault_pending_wqh);
1794 	init_waitqueue_head(&ctx->fault_wqh);
1795 	init_waitqueue_head(&ctx->event_wqh);
1796 	init_waitqueue_head(&ctx->fd_wqh);
1797 	seqcount_init(&ctx->refile_seq);
1798 }
1799 
1800 /**
1801  * userfaultfd_file_create - Creates a userfaultfd file pointer.
1802  * @flags: Flags for the userfaultfd file.
1803  *
1804  * This function creates a userfaultfd file pointer, w/out installing
1805  * it into the fd table. This is useful when the userfaultfd file is
1806  * used during the initialization of data structures that require
1807  * extra setup after the userfaultfd creation. So the userfaultfd
1808  * creation is split into the file pointer creation phase, and the
1809  * file descriptor installation phase.  In this way races with
1810  * userspace closing the newly installed file descriptor can be
1811  * avoided.  Returns a userfaultfd file pointer, or a proper error
1812  * pointer.
1813  */
1814 static struct file *userfaultfd_file_create(int flags)
1815 {
1816 	struct file *file;
1817 	struct userfaultfd_ctx *ctx;
1818 
1819 	BUG_ON(!current->mm);
1820 
1821 	/* Check the UFFD_* constants for consistency.  */
1822 	BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
1823 	BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
1824 
1825 	file = ERR_PTR(-EINVAL);
1826 	if (flags & ~UFFD_SHARED_FCNTL_FLAGS)
1827 		goto out;
1828 
1829 	file = ERR_PTR(-ENOMEM);
1830 	ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
1831 	if (!ctx)
1832 		goto out;
1833 
1834 	atomic_set(&ctx->refcount, 1);
1835 	ctx->flags = flags;
1836 	ctx->features = 0;
1837 	ctx->state = UFFD_STATE_WAIT_API;
1838 	ctx->released = false;
1839 	ctx->mm = current->mm;
1840 	/* prevent the mm struct to be freed */
1841 	mmgrab(ctx->mm);
1842 
1843 	file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
1844 				  O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
1845 	if (IS_ERR(file)) {
1846 		mmdrop(ctx->mm);
1847 		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
1848 	}
1849 out:
1850 	return file;
1851 }
1852 
1853 SYSCALL_DEFINE1(userfaultfd, int, flags)
1854 {
1855 	int fd, error;
1856 	struct file *file;
1857 
1858 	error = get_unused_fd_flags(flags & UFFD_SHARED_FCNTL_FLAGS);
1859 	if (error < 0)
1860 		return error;
1861 	fd = error;
1862 
1863 	file = userfaultfd_file_create(flags);
1864 	if (IS_ERR(file)) {
1865 		error = PTR_ERR(file);
1866 		goto err_put_unused_fd;
1867 	}
1868 	fd_install(fd, file);
1869 
1870 	return fd;
1871 
1872 err_put_unused_fd:
1873 	put_unused_fd(fd);
1874 
1875 	return error;
1876 }
1877 
1878 static int __init userfaultfd_init(void)
1879 {
1880 	userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
1881 						sizeof(struct userfaultfd_ctx),
1882 						0,
1883 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1884 						init_once_userfaultfd_ctx);
1885 	return 0;
1886 }
1887 __initcall(userfaultfd_init);
1888