xref: /linux/mm/gup.c (revision 372e2db7210df7c45ead46429aeb1443ba148060)
1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/err.h>
4 #include <linux/spinlock.h>
5 
6 #include <linux/mm.h>
7 #include <linux/memremap.h>
8 #include <linux/pagemap.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/swapops.h>
12 
13 #include <linux/sched.h>
14 #include <linux/rwsem.h>
15 #include <linux/hugetlb.h>
16 
17 #include <asm/mmu_context.h>
18 #include <asm/pgtable.h>
19 #include <asm/tlbflush.h>
20 
21 #include "internal.h"
22 
23 static struct page *no_page_table(struct vm_area_struct *vma,
24 		unsigned int flags)
25 {
26 	/*
27 	 * When core dumping an enormous anonymous area that nobody
28 	 * has touched so far, we don't want to allocate unnecessary pages or
29 	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
30 	 * then get_dump_page() will return NULL to leave a hole in the dump.
31 	 * But we can only make this optimization where a hole would surely
32 	 * be zero-filled if handle_mm_fault() actually did handle it.
33 	 */
34 	if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
35 		return ERR_PTR(-EFAULT);
36 	return NULL;
37 }
38 
39 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
40 		pte_t *pte, unsigned int flags)
41 {
42 	/* No page to get reference */
43 	if (flags & FOLL_GET)
44 		return -EFAULT;
45 
46 	if (flags & FOLL_TOUCH) {
47 		pte_t entry = *pte;
48 
49 		if (flags & FOLL_WRITE)
50 			entry = pte_mkdirty(entry);
51 		entry = pte_mkyoung(entry);
52 
53 		if (!pte_same(*pte, entry)) {
54 			set_pte_at(vma->vm_mm, address, pte, entry);
55 			update_mmu_cache(vma, address, pte);
56 		}
57 	}
58 
59 	/* Proper page table entry exists, but no corresponding struct page */
60 	return -EEXIST;
61 }
62 
63 /*
64  * FOLL_FORCE can write to even unwritable pte's, but only
65  * after we've gone through a COW cycle and they are dirty.
66  */
67 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
68 {
69 	return pte_write(pte) ||
70 		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
71 }
72 
73 static struct page *follow_page_pte(struct vm_area_struct *vma,
74 		unsigned long address, pmd_t *pmd, unsigned int flags)
75 {
76 	struct mm_struct *mm = vma->vm_mm;
77 	struct dev_pagemap *pgmap = NULL;
78 	struct page *page;
79 	spinlock_t *ptl;
80 	pte_t *ptep, pte;
81 
82 retry:
83 	if (unlikely(pmd_bad(*pmd)))
84 		return no_page_table(vma, flags);
85 
86 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
87 	pte = *ptep;
88 	if (!pte_present(pte)) {
89 		swp_entry_t entry;
90 		/*
91 		 * KSM's break_ksm() relies upon recognizing a ksm page
92 		 * even while it is being migrated, so for that case we
93 		 * need migration_entry_wait().
94 		 */
95 		if (likely(!(flags & FOLL_MIGRATION)))
96 			goto no_page;
97 		if (pte_none(pte))
98 			goto no_page;
99 		entry = pte_to_swp_entry(pte);
100 		if (!is_migration_entry(entry))
101 			goto no_page;
102 		pte_unmap_unlock(ptep, ptl);
103 		migration_entry_wait(mm, pmd, address);
104 		goto retry;
105 	}
106 	if ((flags & FOLL_NUMA) && pte_protnone(pte))
107 		goto no_page;
108 	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
109 		pte_unmap_unlock(ptep, ptl);
110 		return NULL;
111 	}
112 
113 	page = vm_normal_page(vma, address, pte);
114 	if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
115 		/*
116 		 * Only return device mapping pages in the FOLL_GET case since
117 		 * they are only valid while holding the pgmap reference.
118 		 */
119 		pgmap = get_dev_pagemap(pte_pfn(pte), NULL);
120 		if (pgmap)
121 			page = pte_page(pte);
122 		else
123 			goto no_page;
124 	} else if (unlikely(!page)) {
125 		if (flags & FOLL_DUMP) {
126 			/* Avoid special (like zero) pages in core dumps */
127 			page = ERR_PTR(-EFAULT);
128 			goto out;
129 		}
130 
131 		if (is_zero_pfn(pte_pfn(pte))) {
132 			page = pte_page(pte);
133 		} else {
134 			int ret;
135 
136 			ret = follow_pfn_pte(vma, address, ptep, flags);
137 			page = ERR_PTR(ret);
138 			goto out;
139 		}
140 	}
141 
142 	if (flags & FOLL_SPLIT && PageTransCompound(page)) {
143 		int ret;
144 		get_page(page);
145 		pte_unmap_unlock(ptep, ptl);
146 		lock_page(page);
147 		ret = split_huge_page(page);
148 		unlock_page(page);
149 		put_page(page);
150 		if (ret)
151 			return ERR_PTR(ret);
152 		goto retry;
153 	}
154 
155 	if (flags & FOLL_GET) {
156 		get_page(page);
157 
158 		/* drop the pgmap reference now that we hold the page */
159 		if (pgmap) {
160 			put_dev_pagemap(pgmap);
161 			pgmap = NULL;
162 		}
163 	}
164 	if (flags & FOLL_TOUCH) {
165 		if ((flags & FOLL_WRITE) &&
166 		    !pte_dirty(pte) && !PageDirty(page))
167 			set_page_dirty(page);
168 		/*
169 		 * pte_mkyoung() would be more correct here, but atomic care
170 		 * is needed to avoid losing the dirty bit: it is easier to use
171 		 * mark_page_accessed().
172 		 */
173 		mark_page_accessed(page);
174 	}
175 	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
176 		/* Do not mlock pte-mapped THP */
177 		if (PageTransCompound(page))
178 			goto out;
179 
180 		/*
181 		 * The preliminary mapping check is mainly to avoid the
182 		 * pointless overhead of lock_page on the ZERO_PAGE
183 		 * which might bounce very badly if there is contention.
184 		 *
185 		 * If the page is already locked, we don't need to
186 		 * handle it now - vmscan will handle it later if and
187 		 * when it attempts to reclaim the page.
188 		 */
189 		if (page->mapping && trylock_page(page)) {
190 			lru_add_drain();  /* push cached pages to LRU */
191 			/*
192 			 * Because we lock page here, and migration is
193 			 * blocked by the pte's page reference, and we
194 			 * know the page is still mapped, we don't even
195 			 * need to check for file-cache page truncation.
196 			 */
197 			mlock_vma_page(page);
198 			unlock_page(page);
199 		}
200 	}
201 out:
202 	pte_unmap_unlock(ptep, ptl);
203 	return page;
204 no_page:
205 	pte_unmap_unlock(ptep, ptl);
206 	if (!pte_none(pte))
207 		return NULL;
208 	return no_page_table(vma, flags);
209 }
210 
211 /**
212  * follow_page_mask - look up a page descriptor from a user-virtual address
213  * @vma: vm_area_struct mapping @address
214  * @address: virtual address to look up
215  * @flags: flags modifying lookup behaviour
216  * @page_mask: on output, *page_mask is set according to the size of the page
217  *
218  * @flags can have FOLL_ flags set, defined in <linux/mm.h>
219  *
220  * Returns the mapped (struct page *), %NULL if no mapping exists, or
221  * an error pointer if there is a mapping to something not represented
222  * by a page descriptor (see also vm_normal_page()).
223  */
224 struct page *follow_page_mask(struct vm_area_struct *vma,
225 			      unsigned long address, unsigned int flags,
226 			      unsigned int *page_mask)
227 {
228 	pgd_t *pgd;
229 	pud_t *pud;
230 	pmd_t *pmd;
231 	spinlock_t *ptl;
232 	struct page *page;
233 	struct mm_struct *mm = vma->vm_mm;
234 
235 	*page_mask = 0;
236 
237 	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
238 	if (!IS_ERR(page)) {
239 		BUG_ON(flags & FOLL_GET);
240 		return page;
241 	}
242 
243 	pgd = pgd_offset(mm, address);
244 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
245 		return no_page_table(vma, flags);
246 
247 	pud = pud_offset(pgd, address);
248 	if (pud_none(*pud))
249 		return no_page_table(vma, flags);
250 	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
251 		page = follow_huge_pud(mm, address, pud, flags);
252 		if (page)
253 			return page;
254 		return no_page_table(vma, flags);
255 	}
256 	if (unlikely(pud_bad(*pud)))
257 		return no_page_table(vma, flags);
258 
259 	pmd = pmd_offset(pud, address);
260 	if (pmd_none(*pmd))
261 		return no_page_table(vma, flags);
262 	if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
263 		page = follow_huge_pmd(mm, address, pmd, flags);
264 		if (page)
265 			return page;
266 		return no_page_table(vma, flags);
267 	}
268 	if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
269 		return no_page_table(vma, flags);
270 	if (pmd_devmap(*pmd)) {
271 		ptl = pmd_lock(mm, pmd);
272 		page = follow_devmap_pmd(vma, address, pmd, flags);
273 		spin_unlock(ptl);
274 		if (page)
275 			return page;
276 	}
277 	if (likely(!pmd_trans_huge(*pmd)))
278 		return follow_page_pte(vma, address, pmd, flags);
279 
280 	ptl = pmd_lock(mm, pmd);
281 	if (unlikely(!pmd_trans_huge(*pmd))) {
282 		spin_unlock(ptl);
283 		return follow_page_pte(vma, address, pmd, flags);
284 	}
285 	if (flags & FOLL_SPLIT) {
286 		int ret;
287 		page = pmd_page(*pmd);
288 		if (is_huge_zero_page(page)) {
289 			spin_unlock(ptl);
290 			ret = 0;
291 			split_huge_pmd(vma, pmd, address);
292 			if (pmd_trans_unstable(pmd))
293 				ret = -EBUSY;
294 		} else {
295 			get_page(page);
296 			spin_unlock(ptl);
297 			lock_page(page);
298 			ret = split_huge_page(page);
299 			unlock_page(page);
300 			put_page(page);
301 			if (pmd_none(*pmd))
302 				return no_page_table(vma, flags);
303 		}
304 
305 		return ret ? ERR_PTR(ret) :
306 			follow_page_pte(vma, address, pmd, flags);
307 	}
308 
309 	page = follow_trans_huge_pmd(vma, address, pmd, flags);
310 	spin_unlock(ptl);
311 	*page_mask = HPAGE_PMD_NR - 1;
312 	return page;
313 }
314 
315 static int get_gate_page(struct mm_struct *mm, unsigned long address,
316 		unsigned int gup_flags, struct vm_area_struct **vma,
317 		struct page **page)
318 {
319 	pgd_t *pgd;
320 	pud_t *pud;
321 	pmd_t *pmd;
322 	pte_t *pte;
323 	int ret = -EFAULT;
324 
325 	/* user gate pages are read-only */
326 	if (gup_flags & FOLL_WRITE)
327 		return -EFAULT;
328 	if (address > TASK_SIZE)
329 		pgd = pgd_offset_k(address);
330 	else
331 		pgd = pgd_offset_gate(mm, address);
332 	BUG_ON(pgd_none(*pgd));
333 	pud = pud_offset(pgd, address);
334 	BUG_ON(pud_none(*pud));
335 	pmd = pmd_offset(pud, address);
336 	if (pmd_none(*pmd))
337 		return -EFAULT;
338 	VM_BUG_ON(pmd_trans_huge(*pmd));
339 	pte = pte_offset_map(pmd, address);
340 	if (pte_none(*pte))
341 		goto unmap;
342 	*vma = get_gate_vma(mm);
343 	if (!page)
344 		goto out;
345 	*page = vm_normal_page(*vma, address, *pte);
346 	if (!*page) {
347 		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
348 			goto unmap;
349 		*page = pte_page(*pte);
350 	}
351 	get_page(*page);
352 out:
353 	ret = 0;
354 unmap:
355 	pte_unmap(pte);
356 	return ret;
357 }
358 
359 /*
360  * mmap_sem must be held on entry.  If @nonblocking != NULL and
361  * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
362  * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
363  */
364 static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
365 		unsigned long address, unsigned int *flags, int *nonblocking)
366 {
367 	unsigned int fault_flags = 0;
368 	int ret;
369 
370 	/* mlock all present pages, but do not fault in new pages */
371 	if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
372 		return -ENOENT;
373 	/* For mm_populate(), just skip the stack guard page. */
374 	if ((*flags & FOLL_POPULATE) &&
375 			(stack_guard_page_start(vma, address) ||
376 			 stack_guard_page_end(vma, address + PAGE_SIZE)))
377 		return -ENOENT;
378 	if (*flags & FOLL_WRITE)
379 		fault_flags |= FAULT_FLAG_WRITE;
380 	if (*flags & FOLL_REMOTE)
381 		fault_flags |= FAULT_FLAG_REMOTE;
382 	if (nonblocking)
383 		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
384 	if (*flags & FOLL_NOWAIT)
385 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
386 	if (*flags & FOLL_TRIED) {
387 		VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
388 		fault_flags |= FAULT_FLAG_TRIED;
389 	}
390 
391 	ret = handle_mm_fault(vma, address, fault_flags);
392 	if (ret & VM_FAULT_ERROR) {
393 		if (ret & VM_FAULT_OOM)
394 			return -ENOMEM;
395 		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
396 			return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
397 		if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
398 			return -EFAULT;
399 		BUG();
400 	}
401 
402 	if (tsk) {
403 		if (ret & VM_FAULT_MAJOR)
404 			tsk->maj_flt++;
405 		else
406 			tsk->min_flt++;
407 	}
408 
409 	if (ret & VM_FAULT_RETRY) {
410 		if (nonblocking)
411 			*nonblocking = 0;
412 		return -EBUSY;
413 	}
414 
415 	/*
416 	 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
417 	 * necessary, even if maybe_mkwrite decided not to set pte_write. We
418 	 * can thus safely do subsequent page lookups as if they were reads.
419 	 * But only do so when looping for pte_write is futile: in some cases
420 	 * userspace may also be wanting to write to the gotten user page,
421 	 * which a read fault here might prevent (a readonly page might get
422 	 * reCOWed by userspace write).
423 	 */
424 	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
425 	        *flags |= FOLL_COW;
426 	return 0;
427 }
428 
429 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
430 {
431 	vm_flags_t vm_flags = vma->vm_flags;
432 	int write = (gup_flags & FOLL_WRITE);
433 	int foreign = (gup_flags & FOLL_REMOTE);
434 
435 	if (vm_flags & (VM_IO | VM_PFNMAP))
436 		return -EFAULT;
437 
438 	if (write) {
439 		if (!(vm_flags & VM_WRITE)) {
440 			if (!(gup_flags & FOLL_FORCE))
441 				return -EFAULT;
442 			/*
443 			 * We used to let the write,force case do COW in a
444 			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
445 			 * set a breakpoint in a read-only mapping of an
446 			 * executable, without corrupting the file (yet only
447 			 * when that file had been opened for writing!).
448 			 * Anon pages in shared mappings are surprising: now
449 			 * just reject it.
450 			 */
451 			if (!is_cow_mapping(vm_flags))
452 				return -EFAULT;
453 		}
454 	} else if (!(vm_flags & VM_READ)) {
455 		if (!(gup_flags & FOLL_FORCE))
456 			return -EFAULT;
457 		/*
458 		 * Is there actually any vma we can reach here which does not
459 		 * have VM_MAYREAD set?
460 		 */
461 		if (!(vm_flags & VM_MAYREAD))
462 			return -EFAULT;
463 	}
464 	/*
465 	 * gups are always data accesses, not instruction
466 	 * fetches, so execute=false here
467 	 */
468 	if (!arch_vma_access_permitted(vma, write, false, foreign))
469 		return -EFAULT;
470 	return 0;
471 }
472 
473 /**
474  * __get_user_pages() - pin user pages in memory
475  * @tsk:	task_struct of target task
476  * @mm:		mm_struct of target mm
477  * @start:	starting user address
478  * @nr_pages:	number of pages from start to pin
479  * @gup_flags:	flags modifying pin behaviour
480  * @pages:	array that receives pointers to the pages pinned.
481  *		Should be at least nr_pages long. Or NULL, if caller
482  *		only intends to ensure the pages are faulted in.
483  * @vmas:	array of pointers to vmas corresponding to each page.
484  *		Or NULL if the caller does not require them.
485  * @nonblocking: whether waiting for disk IO or mmap_sem contention
486  *
487  * Returns number of pages pinned. This may be fewer than the number
488  * requested. If nr_pages is 0 or negative, returns 0. If no pages
489  * were pinned, returns -errno. Each page returned must be released
490  * with a put_page() call when it is finished with. vmas will only
491  * remain valid while mmap_sem is held.
492  *
493  * Must be called with mmap_sem held.  It may be released.  See below.
494  *
495  * __get_user_pages walks a process's page tables and takes a reference to
496  * each struct page that each user address corresponds to at a given
497  * instant. That is, it takes the page that would be accessed if a user
498  * thread accesses the given user virtual address at that instant.
499  *
500  * This does not guarantee that the page exists in the user mappings when
501  * __get_user_pages returns, and there may even be a completely different
502  * page there in some cases (eg. if mmapped pagecache has been invalidated
503  * and subsequently re faulted). However it does guarantee that the page
504  * won't be freed completely. And mostly callers simply care that the page
505  * contains data that was valid *at some point in time*. Typically, an IO
506  * or similar operation cannot guarantee anything stronger anyway because
507  * locks can't be held over the syscall boundary.
508  *
509  * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
510  * the page is written to, set_page_dirty (or set_page_dirty_lock, as
511  * appropriate) must be called after the page is finished with, and
512  * before put_page is called.
513  *
514  * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
515  * or mmap_sem contention, and if waiting is needed to pin all pages,
516  * *@nonblocking will be set to 0.  Further, if @gup_flags does not
517  * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
518  * this case.
519  *
520  * A caller using such a combination of @nonblocking and @gup_flags
521  * must therefore hold the mmap_sem for reading only, and recognize
522  * when it's been released.  Otherwise, it must be held for either
523  * reading or writing and will not be released.
524  *
525  * In most cases, get_user_pages or get_user_pages_fast should be used
526  * instead of __get_user_pages. __get_user_pages should be used only if
527  * you need some special @gup_flags.
528  */
529 static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
530 		unsigned long start, unsigned long nr_pages,
531 		unsigned int gup_flags, struct page **pages,
532 		struct vm_area_struct **vmas, int *nonblocking)
533 {
534 	long i = 0;
535 	unsigned int page_mask;
536 	struct vm_area_struct *vma = NULL;
537 
538 	if (!nr_pages)
539 		return 0;
540 
541 	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
542 
543 	/*
544 	 * If FOLL_FORCE is set then do not force a full fault as the hinting
545 	 * fault information is unrelated to the reference behaviour of a task
546 	 * using the address space
547 	 */
548 	if (!(gup_flags & FOLL_FORCE))
549 		gup_flags |= FOLL_NUMA;
550 
551 	do {
552 		struct page *page;
553 		unsigned int foll_flags = gup_flags;
554 		unsigned int page_increm;
555 
556 		/* first iteration or cross vma bound */
557 		if (!vma || start >= vma->vm_end) {
558 			vma = find_extend_vma(mm, start);
559 			if (!vma && in_gate_area(mm, start)) {
560 				int ret;
561 				ret = get_gate_page(mm, start & PAGE_MASK,
562 						gup_flags, &vma,
563 						pages ? &pages[i] : NULL);
564 				if (ret)
565 					return i ? : ret;
566 				page_mask = 0;
567 				goto next_page;
568 			}
569 
570 			if (!vma || check_vma_flags(vma, gup_flags))
571 				return i ? : -EFAULT;
572 			if (is_vm_hugetlb_page(vma)) {
573 				i = follow_hugetlb_page(mm, vma, pages, vmas,
574 						&start, &nr_pages, i,
575 						gup_flags);
576 				continue;
577 			}
578 		}
579 retry:
580 		/*
581 		 * If we have a pending SIGKILL, don't keep faulting pages and
582 		 * potentially allocating memory.
583 		 */
584 		if (unlikely(fatal_signal_pending(current)))
585 			return i ? i : -ERESTARTSYS;
586 		cond_resched();
587 		page = follow_page_mask(vma, start, foll_flags, &page_mask);
588 		if (!page) {
589 			int ret;
590 			ret = faultin_page(tsk, vma, start, &foll_flags,
591 					nonblocking);
592 			switch (ret) {
593 			case 0:
594 				goto retry;
595 			case -EFAULT:
596 			case -ENOMEM:
597 			case -EHWPOISON:
598 				return i ? i : ret;
599 			case -EBUSY:
600 				return i;
601 			case -ENOENT:
602 				goto next_page;
603 			}
604 			BUG();
605 		} else if (PTR_ERR(page) == -EEXIST) {
606 			/*
607 			 * Proper page table entry exists, but no corresponding
608 			 * struct page.
609 			 */
610 			goto next_page;
611 		} else if (IS_ERR(page)) {
612 			return i ? i : PTR_ERR(page);
613 		}
614 		if (pages) {
615 			pages[i] = page;
616 			flush_anon_page(vma, page, start);
617 			flush_dcache_page(page);
618 			page_mask = 0;
619 		}
620 next_page:
621 		if (vmas) {
622 			vmas[i] = vma;
623 			page_mask = 0;
624 		}
625 		page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
626 		if (page_increm > nr_pages)
627 			page_increm = nr_pages;
628 		i += page_increm;
629 		start += page_increm * PAGE_SIZE;
630 		nr_pages -= page_increm;
631 	} while (nr_pages);
632 	return i;
633 }
634 
635 static bool vma_permits_fault(struct vm_area_struct *vma,
636 			      unsigned int fault_flags)
637 {
638 	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
639 	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
640 	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
641 
642 	if (!(vm_flags & vma->vm_flags))
643 		return false;
644 
645 	/*
646 	 * The architecture might have a hardware protection
647 	 * mechanism other than read/write that can deny access.
648 	 *
649 	 * gup always represents data access, not instruction
650 	 * fetches, so execute=false here:
651 	 */
652 	if (!arch_vma_access_permitted(vma, write, false, foreign))
653 		return false;
654 
655 	return true;
656 }
657 
658 /*
659  * fixup_user_fault() - manually resolve a user page fault
660  * @tsk:	the task_struct to use for page fault accounting, or
661  *		NULL if faults are not to be recorded.
662  * @mm:		mm_struct of target mm
663  * @address:	user address
664  * @fault_flags:flags to pass down to handle_mm_fault()
665  * @unlocked:	did we unlock the mmap_sem while retrying, maybe NULL if caller
666  *		does not allow retry
667  *
668  * This is meant to be called in the specific scenario where for locking reasons
669  * we try to access user memory in atomic context (within a pagefault_disable()
670  * section), this returns -EFAULT, and we want to resolve the user fault before
671  * trying again.
672  *
673  * Typically this is meant to be used by the futex code.
674  *
675  * The main difference with get_user_pages() is that this function will
676  * unconditionally call handle_mm_fault() which will in turn perform all the
677  * necessary SW fixup of the dirty and young bits in the PTE, while
678  * get_user_pages() only guarantees to update these in the struct page.
679  *
680  * This is important for some architectures where those bits also gate the
681  * access permission to the page because they are maintained in software.  On
682  * such architectures, gup() will not be enough to make a subsequent access
683  * succeed.
684  *
685  * This function will not return with an unlocked mmap_sem. So it has not the
686  * same semantics wrt the @mm->mmap_sem as does filemap_fault().
687  */
688 int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
689 		     unsigned long address, unsigned int fault_flags,
690 		     bool *unlocked)
691 {
692 	struct vm_area_struct *vma;
693 	int ret, major = 0;
694 
695 	if (unlocked)
696 		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
697 
698 retry:
699 	vma = find_extend_vma(mm, address);
700 	if (!vma || address < vma->vm_start)
701 		return -EFAULT;
702 
703 	if (!vma_permits_fault(vma, fault_flags))
704 		return -EFAULT;
705 
706 	ret = handle_mm_fault(vma, address, fault_flags);
707 	major |= ret & VM_FAULT_MAJOR;
708 	if (ret & VM_FAULT_ERROR) {
709 		if (ret & VM_FAULT_OOM)
710 			return -ENOMEM;
711 		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
712 			return -EHWPOISON;
713 		if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
714 			return -EFAULT;
715 		BUG();
716 	}
717 
718 	if (ret & VM_FAULT_RETRY) {
719 		down_read(&mm->mmap_sem);
720 		if (!(fault_flags & FAULT_FLAG_TRIED)) {
721 			*unlocked = true;
722 			fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
723 			fault_flags |= FAULT_FLAG_TRIED;
724 			goto retry;
725 		}
726 	}
727 
728 	if (tsk) {
729 		if (major)
730 			tsk->maj_flt++;
731 		else
732 			tsk->min_flt++;
733 	}
734 	return 0;
735 }
736 EXPORT_SYMBOL_GPL(fixup_user_fault);
737 
738 static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
739 						struct mm_struct *mm,
740 						unsigned long start,
741 						unsigned long nr_pages,
742 						struct page **pages,
743 						struct vm_area_struct **vmas,
744 						int *locked, bool notify_drop,
745 						unsigned int flags)
746 {
747 	long ret, pages_done;
748 	bool lock_dropped;
749 
750 	if (locked) {
751 		/* if VM_FAULT_RETRY can be returned, vmas become invalid */
752 		BUG_ON(vmas);
753 		/* check caller initialized locked */
754 		BUG_ON(*locked != 1);
755 	}
756 
757 	if (pages)
758 		flags |= FOLL_GET;
759 
760 	pages_done = 0;
761 	lock_dropped = false;
762 	for (;;) {
763 		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
764 				       vmas, locked);
765 		if (!locked)
766 			/* VM_FAULT_RETRY couldn't trigger, bypass */
767 			return ret;
768 
769 		/* VM_FAULT_RETRY cannot return errors */
770 		if (!*locked) {
771 			BUG_ON(ret < 0);
772 			BUG_ON(ret >= nr_pages);
773 		}
774 
775 		if (!pages)
776 			/* If it's a prefault don't insist harder */
777 			return ret;
778 
779 		if (ret > 0) {
780 			nr_pages -= ret;
781 			pages_done += ret;
782 			if (!nr_pages)
783 				break;
784 		}
785 		if (*locked) {
786 			/* VM_FAULT_RETRY didn't trigger */
787 			if (!pages_done)
788 				pages_done = ret;
789 			break;
790 		}
791 		/* VM_FAULT_RETRY triggered, so seek to the faulting offset */
792 		pages += ret;
793 		start += ret << PAGE_SHIFT;
794 
795 		/*
796 		 * Repeat on the address that fired VM_FAULT_RETRY
797 		 * without FAULT_FLAG_ALLOW_RETRY but with
798 		 * FAULT_FLAG_TRIED.
799 		 */
800 		*locked = 1;
801 		lock_dropped = true;
802 		down_read(&mm->mmap_sem);
803 		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
804 				       pages, NULL, NULL);
805 		if (ret != 1) {
806 			BUG_ON(ret > 1);
807 			if (!pages_done)
808 				pages_done = ret;
809 			break;
810 		}
811 		nr_pages--;
812 		pages_done++;
813 		if (!nr_pages)
814 			break;
815 		pages++;
816 		start += PAGE_SIZE;
817 	}
818 	if (notify_drop && lock_dropped && *locked) {
819 		/*
820 		 * We must let the caller know we temporarily dropped the lock
821 		 * and so the critical section protected by it was lost.
822 		 */
823 		up_read(&mm->mmap_sem);
824 		*locked = 0;
825 	}
826 	return pages_done;
827 }
828 
829 /*
830  * We can leverage the VM_FAULT_RETRY functionality in the page fault
831  * paths better by using either get_user_pages_locked() or
832  * get_user_pages_unlocked().
833  *
834  * get_user_pages_locked() is suitable to replace the form:
835  *
836  *      down_read(&mm->mmap_sem);
837  *      do_something()
838  *      get_user_pages(tsk, mm, ..., pages, NULL);
839  *      up_read(&mm->mmap_sem);
840  *
841  *  to:
842  *
843  *      int locked = 1;
844  *      down_read(&mm->mmap_sem);
845  *      do_something()
846  *      get_user_pages_locked(tsk, mm, ..., pages, &locked);
847  *      if (locked)
848  *          up_read(&mm->mmap_sem);
849  */
850 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
851 			   unsigned int gup_flags, struct page **pages,
852 			   int *locked)
853 {
854 	return __get_user_pages_locked(current, current->mm, start, nr_pages,
855 				       pages, NULL, locked, true,
856 				       gup_flags | FOLL_TOUCH);
857 }
858 EXPORT_SYMBOL(get_user_pages_locked);
859 
860 /*
861  * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows for
862  * tsk, mm to be specified.
863  *
864  * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the
865  * caller if required (just like with __get_user_pages). "FOLL_GET"
866  * is set implicitly if "pages" is non-NULL.
867  */
868 __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
869 					       unsigned long start, unsigned long nr_pages,
870 					       struct page **pages, unsigned int gup_flags)
871 {
872 	long ret;
873 	int locked = 1;
874 
875 	down_read(&mm->mmap_sem);
876 	ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
877 				      &locked, false, gup_flags);
878 	if (locked)
879 		up_read(&mm->mmap_sem);
880 	return ret;
881 }
882 EXPORT_SYMBOL(__get_user_pages_unlocked);
883 
884 /*
885  * get_user_pages_unlocked() is suitable to replace the form:
886  *
887  *      down_read(&mm->mmap_sem);
888  *      get_user_pages(tsk, mm, ..., pages, NULL);
889  *      up_read(&mm->mmap_sem);
890  *
891  *  with:
892  *
893  *      get_user_pages_unlocked(tsk, mm, ..., pages);
894  *
895  * It is functionally equivalent to get_user_pages_fast so
896  * get_user_pages_fast should be used instead if specific gup_flags
897  * (e.g. FOLL_FORCE) are not required.
898  */
899 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
900 			     struct page **pages, unsigned int gup_flags)
901 {
902 	return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
903 					 pages, gup_flags | FOLL_TOUCH);
904 }
905 EXPORT_SYMBOL(get_user_pages_unlocked);
906 
907 /*
908  * get_user_pages_remote() - pin user pages in memory
909  * @tsk:	the task_struct to use for page fault accounting, or
910  *		NULL if faults are not to be recorded.
911  * @mm:		mm_struct of target mm
912  * @start:	starting user address
913  * @nr_pages:	number of pages from start to pin
914  * @gup_flags:	flags modifying lookup behaviour
915  * @pages:	array that receives pointers to the pages pinned.
916  *		Should be at least nr_pages long. Or NULL, if caller
917  *		only intends to ensure the pages are faulted in.
918  * @vmas:	array of pointers to vmas corresponding to each page.
919  *		Or NULL if the caller does not require them.
920  *
921  * Returns number of pages pinned. This may be fewer than the number
922  * requested. If nr_pages is 0 or negative, returns 0. If no pages
923  * were pinned, returns -errno. Each page returned must be released
924  * with a put_page() call when it is finished with. vmas will only
925  * remain valid while mmap_sem is held.
926  *
927  * Must be called with mmap_sem held for read or write.
928  *
929  * get_user_pages walks a process's page tables and takes a reference to
930  * each struct page that each user address corresponds to at a given
931  * instant. That is, it takes the page that would be accessed if a user
932  * thread accesses the given user virtual address at that instant.
933  *
934  * This does not guarantee that the page exists in the user mappings when
935  * get_user_pages returns, and there may even be a completely different
936  * page there in some cases (eg. if mmapped pagecache has been invalidated
937  * and subsequently re faulted). However it does guarantee that the page
938  * won't be freed completely. And mostly callers simply care that the page
939  * contains data that was valid *at some point in time*. Typically, an IO
940  * or similar operation cannot guarantee anything stronger anyway because
941  * locks can't be held over the syscall boundary.
942  *
943  * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
944  * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
945  * be called after the page is finished with, and before put_page is called.
946  *
947  * get_user_pages is typically used for fewer-copy IO operations, to get a
948  * handle on the memory by some means other than accesses via the user virtual
949  * addresses. The pages may be submitted for DMA to devices or accessed via
950  * their kernel linear mapping (via the kmap APIs). Care should be taken to
951  * use the correct cache flushing APIs.
952  *
953  * See also get_user_pages_fast, for performance critical applications.
954  *
955  * get_user_pages should be phased out in favor of
956  * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
957  * should use get_user_pages because it cannot pass
958  * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
959  */
960 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
961 		unsigned long start, unsigned long nr_pages,
962 		unsigned int gup_flags, struct page **pages,
963 		struct vm_area_struct **vmas)
964 {
965 	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
966 				       NULL, false,
967 				       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
968 }
969 EXPORT_SYMBOL(get_user_pages_remote);
970 
971 /*
972  * This is the same as get_user_pages_remote(), just with a
973  * less-flexible calling convention where we assume that the task
974  * and mm being operated on are the current task's.  We also
975  * obviously don't pass FOLL_REMOTE in here.
976  */
977 long get_user_pages(unsigned long start, unsigned long nr_pages,
978 		unsigned int gup_flags, struct page **pages,
979 		struct vm_area_struct **vmas)
980 {
981 	return __get_user_pages_locked(current, current->mm, start, nr_pages,
982 				       pages, vmas, NULL, false,
983 				       gup_flags | FOLL_TOUCH);
984 }
985 EXPORT_SYMBOL(get_user_pages);
986 
987 /**
988  * populate_vma_page_range() -  populate a range of pages in the vma.
989  * @vma:   target vma
990  * @start: start address
991  * @end:   end address
992  * @nonblocking:
993  *
994  * This takes care of mlocking the pages too if VM_LOCKED is set.
995  *
996  * return 0 on success, negative error code on error.
997  *
998  * vma->vm_mm->mmap_sem must be held.
999  *
1000  * If @nonblocking is NULL, it may be held for read or write and will
1001  * be unperturbed.
1002  *
1003  * If @nonblocking is non-NULL, it must held for read only and may be
1004  * released.  If it's released, *@nonblocking will be set to 0.
1005  */
1006 long populate_vma_page_range(struct vm_area_struct *vma,
1007 		unsigned long start, unsigned long end, int *nonblocking)
1008 {
1009 	struct mm_struct *mm = vma->vm_mm;
1010 	unsigned long nr_pages = (end - start) / PAGE_SIZE;
1011 	int gup_flags;
1012 
1013 	VM_BUG_ON(start & ~PAGE_MASK);
1014 	VM_BUG_ON(end   & ~PAGE_MASK);
1015 	VM_BUG_ON_VMA(start < vma->vm_start, vma);
1016 	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
1017 	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
1018 
1019 	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
1020 	if (vma->vm_flags & VM_LOCKONFAULT)
1021 		gup_flags &= ~FOLL_POPULATE;
1022 	/*
1023 	 * We want to touch writable mappings with a write fault in order
1024 	 * to break COW, except for shared mappings because these don't COW
1025 	 * and we would not want to dirty them for nothing.
1026 	 */
1027 	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1028 		gup_flags |= FOLL_WRITE;
1029 
1030 	/*
1031 	 * We want mlock to succeed for regions that have any permissions
1032 	 * other than PROT_NONE.
1033 	 */
1034 	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
1035 		gup_flags |= FOLL_FORCE;
1036 
1037 	/*
1038 	 * We made sure addr is within a VMA, so the following will
1039 	 * not result in a stack expansion that recurses back here.
1040 	 */
1041 	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
1042 				NULL, NULL, nonblocking);
1043 }
1044 
1045 /*
1046  * __mm_populate - populate and/or mlock pages within a range of address space.
1047  *
1048  * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1049  * flags. VMAs must be already marked with the desired vm_flags, and
1050  * mmap_sem must not be held.
1051  */
1052 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1053 {
1054 	struct mm_struct *mm = current->mm;
1055 	unsigned long end, nstart, nend;
1056 	struct vm_area_struct *vma = NULL;
1057 	int locked = 0;
1058 	long ret = 0;
1059 
1060 	VM_BUG_ON(start & ~PAGE_MASK);
1061 	VM_BUG_ON(len != PAGE_ALIGN(len));
1062 	end = start + len;
1063 
1064 	for (nstart = start; nstart < end; nstart = nend) {
1065 		/*
1066 		 * We want to fault in pages for [nstart; end) address range.
1067 		 * Find first corresponding VMA.
1068 		 */
1069 		if (!locked) {
1070 			locked = 1;
1071 			down_read(&mm->mmap_sem);
1072 			vma = find_vma(mm, nstart);
1073 		} else if (nstart >= vma->vm_end)
1074 			vma = vma->vm_next;
1075 		if (!vma || vma->vm_start >= end)
1076 			break;
1077 		/*
1078 		 * Set [nstart; nend) to intersection of desired address
1079 		 * range with the first VMA. Also, skip undesirable VMA types.
1080 		 */
1081 		nend = min(end, vma->vm_end);
1082 		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1083 			continue;
1084 		if (nstart < vma->vm_start)
1085 			nstart = vma->vm_start;
1086 		/*
1087 		 * Now fault in a range of pages. populate_vma_page_range()
1088 		 * double checks the vma flags, so that it won't mlock pages
1089 		 * if the vma was already munlocked.
1090 		 */
1091 		ret = populate_vma_page_range(vma, nstart, nend, &locked);
1092 		if (ret < 0) {
1093 			if (ignore_errors) {
1094 				ret = 0;
1095 				continue;	/* continue at next VMA */
1096 			}
1097 			break;
1098 		}
1099 		nend = nstart + ret * PAGE_SIZE;
1100 		ret = 0;
1101 	}
1102 	if (locked)
1103 		up_read(&mm->mmap_sem);
1104 	return ret;	/* 0 or negative error code */
1105 }
1106 
1107 /**
1108  * get_dump_page() - pin user page in memory while writing it to core dump
1109  * @addr: user address
1110  *
1111  * Returns struct page pointer of user page pinned for dump,
1112  * to be freed afterwards by put_page().
1113  *
1114  * Returns NULL on any kind of failure - a hole must then be inserted into
1115  * the corefile, to preserve alignment with its headers; and also returns
1116  * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1117  * allowing a hole to be left in the corefile to save diskspace.
1118  *
1119  * Called without mmap_sem, but after all other threads have been killed.
1120  */
1121 #ifdef CONFIG_ELF_CORE
1122 struct page *get_dump_page(unsigned long addr)
1123 {
1124 	struct vm_area_struct *vma;
1125 	struct page *page;
1126 
1127 	if (__get_user_pages(current, current->mm, addr, 1,
1128 			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
1129 			     NULL) < 1)
1130 		return NULL;
1131 	flush_cache_page(vma, addr, page_to_pfn(page));
1132 	return page;
1133 }
1134 #endif /* CONFIG_ELF_CORE */
1135 
1136 /*
1137  * Generic RCU Fast GUP
1138  *
1139  * get_user_pages_fast attempts to pin user pages by walking the page
1140  * tables directly and avoids taking locks. Thus the walker needs to be
1141  * protected from page table pages being freed from under it, and should
1142  * block any THP splits.
1143  *
1144  * One way to achieve this is to have the walker disable interrupts, and
1145  * rely on IPIs from the TLB flushing code blocking before the page table
1146  * pages are freed. This is unsuitable for architectures that do not need
1147  * to broadcast an IPI when invalidating TLBs.
1148  *
1149  * Another way to achieve this is to batch up page table containing pages
1150  * belonging to more than one mm_user, then rcu_sched a callback to free those
1151  * pages. Disabling interrupts will allow the fast_gup walker to both block
1152  * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
1153  * (which is a relatively rare event). The code below adopts this strategy.
1154  *
1155  * Before activating this code, please be aware that the following assumptions
1156  * are currently made:
1157  *
1158  *  *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free
1159  *      pages containing page tables.
1160  *
1161  *  *) ptes can be read atomically by the architecture.
1162  *
1163  *  *) access_ok is sufficient to validate userspace address ranges.
1164  *
1165  * The last two assumptions can be relaxed by the addition of helper functions.
1166  *
1167  * This code is based heavily on the PowerPC implementation by Nick Piggin.
1168  */
1169 #ifdef CONFIG_HAVE_GENERIC_RCU_GUP
1170 
1171 #ifdef __HAVE_ARCH_PTE_SPECIAL
1172 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1173 			 int write, struct page **pages, int *nr)
1174 {
1175 	pte_t *ptep, *ptem;
1176 	int ret = 0;
1177 
1178 	ptem = ptep = pte_offset_map(&pmd, addr);
1179 	do {
1180 		/*
1181 		 * In the line below we are assuming that the pte can be read
1182 		 * atomically. If this is not the case for your architecture,
1183 		 * please wrap this in a helper function!
1184 		 *
1185 		 * for an example see gup_get_pte in arch/x86/mm/gup.c
1186 		 */
1187 		pte_t pte = READ_ONCE(*ptep);
1188 		struct page *head, *page;
1189 
1190 		/*
1191 		 * Similar to the PMD case below, NUMA hinting must take slow
1192 		 * path using the pte_protnone check.
1193 		 */
1194 		if (!pte_present(pte) || pte_special(pte) ||
1195 			pte_protnone(pte) || (write && !pte_write(pte)))
1196 			goto pte_unmap;
1197 
1198 		if (!arch_pte_access_permitted(pte, write))
1199 			goto pte_unmap;
1200 
1201 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1202 		page = pte_page(pte);
1203 		head = compound_head(page);
1204 
1205 		if (!page_cache_get_speculative(head))
1206 			goto pte_unmap;
1207 
1208 		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
1209 			put_page(head);
1210 			goto pte_unmap;
1211 		}
1212 
1213 		VM_BUG_ON_PAGE(compound_head(page) != head, page);
1214 		pages[*nr] = page;
1215 		(*nr)++;
1216 
1217 	} while (ptep++, addr += PAGE_SIZE, addr != end);
1218 
1219 	ret = 1;
1220 
1221 pte_unmap:
1222 	pte_unmap(ptem);
1223 	return ret;
1224 }
1225 #else
1226 
1227 /*
1228  * If we can't determine whether or not a pte is special, then fail immediately
1229  * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
1230  * to be special.
1231  *
1232  * For a futex to be placed on a THP tail page, get_futex_key requires a
1233  * __get_user_pages_fast implementation that can pin pages. Thus it's still
1234  * useful to have gup_huge_pmd even if we can't operate on ptes.
1235  */
1236 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
1237 			 int write, struct page **pages, int *nr)
1238 {
1239 	return 0;
1240 }
1241 #endif /* __HAVE_ARCH_PTE_SPECIAL */
1242 
1243 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
1244 		unsigned long end, int write, struct page **pages, int *nr)
1245 {
1246 	struct page *head, *page;
1247 	int refs;
1248 
1249 	if (write && !pmd_write(orig))
1250 		return 0;
1251 
1252 	refs = 0;
1253 	head = pmd_page(orig);
1254 	page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1255 	do {
1256 		VM_BUG_ON_PAGE(compound_head(page) != head, page);
1257 		pages[*nr] = page;
1258 		(*nr)++;
1259 		page++;
1260 		refs++;
1261 	} while (addr += PAGE_SIZE, addr != end);
1262 
1263 	if (!page_cache_add_speculative(head, refs)) {
1264 		*nr -= refs;
1265 		return 0;
1266 	}
1267 
1268 	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
1269 		*nr -= refs;
1270 		while (refs--)
1271 			put_page(head);
1272 		return 0;
1273 	}
1274 
1275 	return 1;
1276 }
1277 
1278 static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
1279 		unsigned long end, int write, struct page **pages, int *nr)
1280 {
1281 	struct page *head, *page;
1282 	int refs;
1283 
1284 	if (write && !pud_write(orig))
1285 		return 0;
1286 
1287 	refs = 0;
1288 	head = pud_page(orig);
1289 	page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
1290 	do {
1291 		VM_BUG_ON_PAGE(compound_head(page) != head, page);
1292 		pages[*nr] = page;
1293 		(*nr)++;
1294 		page++;
1295 		refs++;
1296 	} while (addr += PAGE_SIZE, addr != end);
1297 
1298 	if (!page_cache_add_speculative(head, refs)) {
1299 		*nr -= refs;
1300 		return 0;
1301 	}
1302 
1303 	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
1304 		*nr -= refs;
1305 		while (refs--)
1306 			put_page(head);
1307 		return 0;
1308 	}
1309 
1310 	return 1;
1311 }
1312 
1313 static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
1314 			unsigned long end, int write,
1315 			struct page **pages, int *nr)
1316 {
1317 	int refs;
1318 	struct page *head, *page;
1319 
1320 	if (write && !pgd_write(orig))
1321 		return 0;
1322 
1323 	refs = 0;
1324 	head = pgd_page(orig);
1325 	page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
1326 	do {
1327 		VM_BUG_ON_PAGE(compound_head(page) != head, page);
1328 		pages[*nr] = page;
1329 		(*nr)++;
1330 		page++;
1331 		refs++;
1332 	} while (addr += PAGE_SIZE, addr != end);
1333 
1334 	if (!page_cache_add_speculative(head, refs)) {
1335 		*nr -= refs;
1336 		return 0;
1337 	}
1338 
1339 	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
1340 		*nr -= refs;
1341 		while (refs--)
1342 			put_page(head);
1343 		return 0;
1344 	}
1345 
1346 	return 1;
1347 }
1348 
1349 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
1350 		int write, struct page **pages, int *nr)
1351 {
1352 	unsigned long next;
1353 	pmd_t *pmdp;
1354 
1355 	pmdp = pmd_offset(&pud, addr);
1356 	do {
1357 		pmd_t pmd = READ_ONCE(*pmdp);
1358 
1359 		next = pmd_addr_end(addr, end);
1360 		if (pmd_none(pmd))
1361 			return 0;
1362 
1363 		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
1364 			/*
1365 			 * NUMA hinting faults need to be handled in the GUP
1366 			 * slowpath for accounting purposes and so that they
1367 			 * can be serialised against THP migration.
1368 			 */
1369 			if (pmd_protnone(pmd))
1370 				return 0;
1371 
1372 			if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
1373 				pages, nr))
1374 				return 0;
1375 
1376 		} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
1377 			/*
1378 			 * architecture have different format for hugetlbfs
1379 			 * pmd format and THP pmd format
1380 			 */
1381 			if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
1382 					 PMD_SHIFT, next, write, pages, nr))
1383 				return 0;
1384 		} else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
1385 				return 0;
1386 	} while (pmdp++, addr = next, addr != end);
1387 
1388 	return 1;
1389 }
1390 
1391 static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
1392 			 int write, struct page **pages, int *nr)
1393 {
1394 	unsigned long next;
1395 	pud_t *pudp;
1396 
1397 	pudp = pud_offset(&pgd, addr);
1398 	do {
1399 		pud_t pud = READ_ONCE(*pudp);
1400 
1401 		next = pud_addr_end(addr, end);
1402 		if (pud_none(pud))
1403 			return 0;
1404 		if (unlikely(pud_huge(pud))) {
1405 			if (!gup_huge_pud(pud, pudp, addr, next, write,
1406 					  pages, nr))
1407 				return 0;
1408 		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
1409 			if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
1410 					 PUD_SHIFT, next, write, pages, nr))
1411 				return 0;
1412 		} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
1413 			return 0;
1414 	} while (pudp++, addr = next, addr != end);
1415 
1416 	return 1;
1417 }
1418 
1419 /*
1420  * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
1421  * the regular GUP. It will only return non-negative values.
1422  */
1423 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1424 			  struct page **pages)
1425 {
1426 	struct mm_struct *mm = current->mm;
1427 	unsigned long addr, len, end;
1428 	unsigned long next, flags;
1429 	pgd_t *pgdp;
1430 	int nr = 0;
1431 
1432 	start &= PAGE_MASK;
1433 	addr = start;
1434 	len = (unsigned long) nr_pages << PAGE_SHIFT;
1435 	end = start + len;
1436 
1437 	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
1438 					start, len)))
1439 		return 0;
1440 
1441 	/*
1442 	 * Disable interrupts.  We use the nested form as we can already have
1443 	 * interrupts disabled by get_futex_key.
1444 	 *
1445 	 * With interrupts disabled, we block page table pages from being
1446 	 * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h
1447 	 * for more details.
1448 	 *
1449 	 * We do not adopt an rcu_read_lock(.) here as we also want to
1450 	 * block IPIs that come from THPs splitting.
1451 	 */
1452 
1453 	local_irq_save(flags);
1454 	pgdp = pgd_offset(mm, addr);
1455 	do {
1456 		pgd_t pgd = READ_ONCE(*pgdp);
1457 
1458 		next = pgd_addr_end(addr, end);
1459 		if (pgd_none(pgd))
1460 			break;
1461 		if (unlikely(pgd_huge(pgd))) {
1462 			if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
1463 					  pages, &nr))
1464 				break;
1465 		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
1466 			if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
1467 					 PGDIR_SHIFT, next, write, pages, &nr))
1468 				break;
1469 		} else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
1470 			break;
1471 	} while (pgdp++, addr = next, addr != end);
1472 	local_irq_restore(flags);
1473 
1474 	return nr;
1475 }
1476 
1477 /**
1478  * get_user_pages_fast() - pin user pages in memory
1479  * @start:	starting user address
1480  * @nr_pages:	number of pages from start to pin
1481  * @write:	whether pages will be written to
1482  * @pages:	array that receives pointers to the pages pinned.
1483  *		Should be at least nr_pages long.
1484  *
1485  * Attempt to pin user pages in memory without taking mm->mmap_sem.
1486  * If not successful, it will fall back to taking the lock and
1487  * calling get_user_pages().
1488  *
1489  * Returns number of pages pinned. This may be fewer than the number
1490  * requested. If nr_pages is 0 or negative, returns 0. If no pages
1491  * were pinned, returns -errno.
1492  */
1493 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1494 			struct page **pages)
1495 {
1496 	int nr, ret;
1497 
1498 	start &= PAGE_MASK;
1499 	nr = __get_user_pages_fast(start, nr_pages, write, pages);
1500 	ret = nr;
1501 
1502 	if (nr < nr_pages) {
1503 		/* Try to get the remaining pages with get_user_pages */
1504 		start += nr << PAGE_SHIFT;
1505 		pages += nr;
1506 
1507 		ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
1508 				write ? FOLL_WRITE : 0);
1509 
1510 		/* Have to be a bit careful with return values */
1511 		if (nr > 0) {
1512 			if (ret < 0)
1513 				ret = nr;
1514 			else
1515 				ret += nr;
1516 		}
1517 	}
1518 
1519 	return ret;
1520 }
1521 
1522 #endif /* CONFIG_HAVE_GENERIC_RCU_GUP */
1523