xref: /linux/mm/hmm.c (revision 45050692dec83a67c0325535aae984f56560e3a9)
1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2133ff0eaSJérôme Glisse /*
3133ff0eaSJérôme Glisse  * Copyright 2013 Red Hat Inc.
4133ff0eaSJérôme Glisse  *
5f813f219SJérôme Glisse  * Authors: Jérôme Glisse <jglisse@redhat.com>
6133ff0eaSJérôme Glisse  */
7133ff0eaSJérôme Glisse /*
8133ff0eaSJérôme Glisse  * Refer to include/linux/hmm.h for information about heterogeneous memory
9133ff0eaSJérôme Glisse  * management or HMM for short.
10133ff0eaSJérôme Glisse  */
11a520110eSChristoph Hellwig #include <linux/pagewalk.h>
12133ff0eaSJérôme Glisse #include <linux/hmm.h>
13858b54daSJérôme Glisse #include <linux/init.h>
14da4c3c73SJérôme Glisse #include <linux/rmap.h>
15da4c3c73SJérôme Glisse #include <linux/swap.h>
16133ff0eaSJérôme Glisse #include <linux/slab.h>
17133ff0eaSJérôme Glisse #include <linux/sched.h>
184ef589dcSJérôme Glisse #include <linux/mmzone.h>
194ef589dcSJérôme Glisse #include <linux/pagemap.h>
20da4c3c73SJérôme Glisse #include <linux/swapops.h>
21da4c3c73SJérôme Glisse #include <linux/hugetlb.h>
224ef589dcSJérôme Glisse #include <linux/memremap.h>
23c8a53b2dSJason Gunthorpe #include <linux/sched/mm.h>
247b2d55d2SJérôme Glisse #include <linux/jump_label.h>
2555c0ece8SJérôme Glisse #include <linux/dma-mapping.h>
26c0b12405SJérôme Glisse #include <linux/mmu_notifier.h>
274ef589dcSJérôme Glisse #include <linux/memory_hotplug.h>
284ef589dcSJérôme Glisse 
2974eee180SJérôme Glisse struct hmm_vma_walk {
3074eee180SJérôme Glisse 	struct hmm_range	*range;
31992de9a8SJérôme Glisse 	struct dev_pagemap	*pgmap;
3274eee180SJérôme Glisse 	unsigned long		last;
339a4903e4SChristoph Hellwig 	unsigned int		flags;
3474eee180SJérôme Glisse };
3574eee180SJérôme Glisse 
362aee09d8SJérôme Glisse static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
372aee09d8SJérôme Glisse 			    bool write_fault, uint64_t *pfn)
3874eee180SJérôme Glisse {
399b1ae605SKuehling, Felix 	unsigned int flags = FAULT_FLAG_REMOTE;
4074eee180SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
41f88a1e90SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
4274eee180SJérôme Glisse 	struct vm_area_struct *vma = walk->vma;
4350a7ca3cSSouptick Joarder 	vm_fault_t ret;
4474eee180SJérôme Glisse 
456c64f2bbSRalph Campbell 	if (!vma)
466c64f2bbSRalph Campbell 		goto err;
476c64f2bbSRalph Campbell 
489a4903e4SChristoph Hellwig 	if (write_fault)
499a4903e4SChristoph Hellwig 		flags |= FAULT_FLAG_WRITE;
509a4903e4SChristoph Hellwig 
5150a7ca3cSSouptick Joarder 	ret = handle_mm_fault(vma, addr, flags);
526c64f2bbSRalph Campbell 	if (ret & VM_FAULT_ERROR)
536c64f2bbSRalph Campbell 		goto err;
5474eee180SJérôme Glisse 
5573231612SJérôme Glisse 	return -EBUSY;
566c64f2bbSRalph Campbell 
576c64f2bbSRalph Campbell err:
586c64f2bbSRalph Campbell 	*pfn = range->values[HMM_PFN_ERROR];
596c64f2bbSRalph Campbell 	return -EFAULT;
6074eee180SJérôme Glisse }
6174eee180SJérôme Glisse 
62d28c2c9aSRalph Campbell static int hmm_pfns_fill(unsigned long addr, unsigned long end,
63d28c2c9aSRalph Campbell 		struct hmm_range *range, enum hmm_pfn_value_e value)
64da4c3c73SJérôme Glisse {
65ff05c0c6SJérôme Glisse 	uint64_t *pfns = range->pfns;
66da4c3c73SJérôme Glisse 	unsigned long i;
67da4c3c73SJérôme Glisse 
68da4c3c73SJérôme Glisse 	i = (addr - range->start) >> PAGE_SHIFT;
69da4c3c73SJérôme Glisse 	for (; addr < end; addr += PAGE_SIZE, i++)
70d28c2c9aSRalph Campbell 		pfns[i] = range->values[value];
71da4c3c73SJérôme Glisse 
72da4c3c73SJérôme Glisse 	return 0;
73da4c3c73SJérôme Glisse }
74da4c3c73SJérôme Glisse 
755504ed29SJérôme Glisse /*
76d2e8d551SRalph Campbell  * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s)
77d2e8d551SRalph Campbell  * @addr: range virtual start address (inclusive)
785504ed29SJérôme Glisse  * @end: range virtual end address (exclusive)
792aee09d8SJérôme Glisse  * @fault: should we fault or not ?
802aee09d8SJérôme Glisse  * @write_fault: write fault ?
815504ed29SJérôme Glisse  * @walk: mm_walk structure
82085ea250SRalph Campbell  * Return: 0 on success, -EBUSY after page fault, or page fault error
835504ed29SJérôme Glisse  *
845504ed29SJérôme Glisse  * This function will be called whenever pmd_none() or pte_none() returns true,
855504ed29SJérôme Glisse  * or whenever there is no page directory covering the virtual address range.
865504ed29SJérôme Glisse  */
872aee09d8SJérôme Glisse static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
882aee09d8SJérôme Glisse 			      bool fault, bool write_fault,
89da4c3c73SJérôme Glisse 			      struct mm_walk *walk)
90da4c3c73SJérôme Glisse {
9174eee180SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
9274eee180SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
93ff05c0c6SJérôme Glisse 	uint64_t *pfns = range->pfns;
947f08263dSChristoph Hellwig 	unsigned long i;
95da4c3c73SJérôme Glisse 
9674eee180SJérôme Glisse 	hmm_vma_walk->last = addr;
977f08263dSChristoph Hellwig 	i = (addr - range->start) >> PAGE_SHIFT;
9863d5066fSJérôme Glisse 
99c18ce674SRalph Campbell 	if (write_fault && walk->vma && !(walk->vma->vm_flags & VM_WRITE))
100c18ce674SRalph Campbell 		return -EPERM;
101c18ce674SRalph Campbell 
1027f08263dSChristoph Hellwig 	for (; addr < end; addr += PAGE_SIZE, i++) {
103f88a1e90SJérôme Glisse 		pfns[i] = range->values[HMM_PFN_NONE];
1042aee09d8SJérôme Glisse 		if (fault || write_fault) {
10574eee180SJérôme Glisse 			int ret;
106da4c3c73SJérôme Glisse 
1072aee09d8SJérôme Glisse 			ret = hmm_vma_do_fault(walk, addr, write_fault,
1082aee09d8SJérôme Glisse 					       &pfns[i]);
10973231612SJérôme Glisse 			if (ret != -EBUSY)
11074eee180SJérôme Glisse 				return ret;
11174eee180SJérôme Glisse 		}
11274eee180SJérôme Glisse 	}
11374eee180SJérôme Glisse 
11473231612SJérôme Glisse 	return (fault || write_fault) ? -EBUSY : 0;
1152aee09d8SJérôme Glisse }
1162aee09d8SJérôme Glisse 
1172aee09d8SJérôme Glisse static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
1182aee09d8SJérôme Glisse 				      uint64_t pfns, uint64_t cpu_flags,
1192aee09d8SJérôme Glisse 				      bool *fault, bool *write_fault)
1202aee09d8SJérôme Glisse {
121f88a1e90SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
122f88a1e90SJérôme Glisse 
123d45d464bSChristoph Hellwig 	if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
1242aee09d8SJérôme Glisse 		return;
1252aee09d8SJérôme Glisse 
126023a019aSJérôme Glisse 	/*
127023a019aSJérôme Glisse 	 * So we not only consider the individual per page request we also
128023a019aSJérôme Glisse 	 * consider the default flags requested for the range. The API can
129d2e8d551SRalph Campbell 	 * be used 2 ways. The first one where the HMM user coalesces
130d2e8d551SRalph Campbell 	 * multiple page faults into one request and sets flags per pfn for
131d2e8d551SRalph Campbell 	 * those faults. The second one where the HMM user wants to pre-
132023a019aSJérôme Glisse 	 * fault a range with specific flags. For the latter one it is a
133023a019aSJérôme Glisse 	 * waste to have the user pre-fill the pfn arrays with a default
134023a019aSJérôme Glisse 	 * flags value.
135023a019aSJérôme Glisse 	 */
136023a019aSJérôme Glisse 	pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
137023a019aSJérôme Glisse 
1382aee09d8SJérôme Glisse 	/* We aren't ask to do anything ... */
139f88a1e90SJérôme Glisse 	if (!(pfns & range->flags[HMM_PFN_VALID]))
1402aee09d8SJérôme Glisse 		return;
141d2e8d551SRalph Campbell 	/* If this is device memory then only fault if explicitly requested */
142f88a1e90SJérôme Glisse 	if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
143f88a1e90SJérôme Glisse 		/* Do we fault on device memory ? */
144f88a1e90SJérôme Glisse 		if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
145f88a1e90SJérôme Glisse 			*write_fault = pfns & range->flags[HMM_PFN_WRITE];
146f88a1e90SJérôme Glisse 			*fault = true;
147f88a1e90SJérôme Glisse 		}
1482aee09d8SJérôme Glisse 		return;
1492aee09d8SJérôme Glisse 	}
150f88a1e90SJérôme Glisse 
151f88a1e90SJérôme Glisse 	/* If CPU page table is not valid then we need to fault */
152f88a1e90SJérôme Glisse 	*fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
153f88a1e90SJérôme Glisse 	/* Need to write fault ? */
154f88a1e90SJérôme Glisse 	if ((pfns & range->flags[HMM_PFN_WRITE]) &&
155f88a1e90SJérôme Glisse 	    !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
156f88a1e90SJérôme Glisse 		*write_fault = true;
1572aee09d8SJérôme Glisse 		*fault = true;
1582aee09d8SJérôme Glisse 	}
1592aee09d8SJérôme Glisse }
1602aee09d8SJérôme Glisse 
1612aee09d8SJérôme Glisse static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
1622aee09d8SJérôme Glisse 				 const uint64_t *pfns, unsigned long npages,
1632aee09d8SJérôme Glisse 				 uint64_t cpu_flags, bool *fault,
1642aee09d8SJérôme Glisse 				 bool *write_fault)
1652aee09d8SJérôme Glisse {
1662aee09d8SJérôme Glisse 	unsigned long i;
1672aee09d8SJérôme Glisse 
168d45d464bSChristoph Hellwig 	if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) {
1692aee09d8SJérôme Glisse 		*fault = *write_fault = false;
1702aee09d8SJérôme Glisse 		return;
1712aee09d8SJérôme Glisse 	}
1722aee09d8SJérôme Glisse 
173a3e0d41cSJérôme Glisse 	*fault = *write_fault = false;
1742aee09d8SJérôme Glisse 	for (i = 0; i < npages; ++i) {
1752aee09d8SJérôme Glisse 		hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
1762aee09d8SJérôme Glisse 				   fault, write_fault);
177a3e0d41cSJérôme Glisse 		if ((*write_fault))
1782aee09d8SJérôme Glisse 			return;
1792aee09d8SJérôme Glisse 	}
1802aee09d8SJérôme Glisse }
1812aee09d8SJérôme Glisse 
1822aee09d8SJérôme Glisse static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
183b7a16c7aSSteven Price 			     __always_unused int depth, struct mm_walk *walk)
1842aee09d8SJérôme Glisse {
1852aee09d8SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
1862aee09d8SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
1872aee09d8SJérôme Glisse 	bool fault, write_fault;
1882aee09d8SJérôme Glisse 	unsigned long i, npages;
1892aee09d8SJérôme Glisse 	uint64_t *pfns;
1902aee09d8SJérôme Glisse 
1912aee09d8SJérôme Glisse 	i = (addr - range->start) >> PAGE_SHIFT;
1922aee09d8SJérôme Glisse 	npages = (end - addr) >> PAGE_SHIFT;
1932aee09d8SJérôme Glisse 	pfns = &range->pfns[i];
1942aee09d8SJérôme Glisse 	hmm_range_need_fault(hmm_vma_walk, pfns, npages,
1952aee09d8SJérôme Glisse 			     0, &fault, &write_fault);
1962aee09d8SJérôme Glisse 	return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
1972aee09d8SJérôme Glisse }
1982aee09d8SJérôme Glisse 
199f88a1e90SJérôme Glisse static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
2002aee09d8SJérôme Glisse {
2012aee09d8SJérôme Glisse 	if (pmd_protnone(pmd))
2022aee09d8SJérôme Glisse 		return 0;
203f88a1e90SJérôme Glisse 	return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
204f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_WRITE] :
205f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_VALID];
206da4c3c73SJérôme Glisse }
207da4c3c73SJérôme Glisse 
208992de9a8SJérôme Glisse #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2099d3973d6SChristoph Hellwig static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
2109d3973d6SChristoph Hellwig 		unsigned long end, uint64_t *pfns, pmd_t pmd)
2119d3973d6SChristoph Hellwig {
21253f5c3f4SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
213f88a1e90SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
2142aee09d8SJérôme Glisse 	unsigned long pfn, npages, i;
2152aee09d8SJérôme Glisse 	bool fault, write_fault;
216f88a1e90SJérôme Glisse 	uint64_t cpu_flags;
21753f5c3f4SJérôme Glisse 
2182aee09d8SJérôme Glisse 	npages = (end - addr) >> PAGE_SHIFT;
219f88a1e90SJérôme Glisse 	cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
2202aee09d8SJérôme Glisse 	hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
2212aee09d8SJérôme Glisse 			     &fault, &write_fault);
22253f5c3f4SJérôme Glisse 
22324cee8abSJason Gunthorpe 	if (fault || write_fault)
2242aee09d8SJérôme Glisse 		return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
22553f5c3f4SJérôme Glisse 
226309f9a4fSChristoph Hellwig 	pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
227992de9a8SJérôme Glisse 	for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
228992de9a8SJérôme Glisse 		if (pmd_devmap(pmd)) {
229992de9a8SJérôme Glisse 			hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
230992de9a8SJérôme Glisse 					      hmm_vma_walk->pgmap);
231992de9a8SJérôme Glisse 			if (unlikely(!hmm_vma_walk->pgmap))
232992de9a8SJérôme Glisse 				return -EBUSY;
233992de9a8SJérôme Glisse 		}
234391aab11SJérôme Glisse 		pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
235992de9a8SJérôme Glisse 	}
236992de9a8SJérôme Glisse 	if (hmm_vma_walk->pgmap) {
237992de9a8SJérôme Glisse 		put_dev_pagemap(hmm_vma_walk->pgmap);
238992de9a8SJérôme Glisse 		hmm_vma_walk->pgmap = NULL;
239992de9a8SJérôme Glisse 	}
24053f5c3f4SJérôme Glisse 	hmm_vma_walk->last = end;
24153f5c3f4SJérôme Glisse 	return 0;
24253f5c3f4SJérôme Glisse }
2439d3973d6SChristoph Hellwig #else /* CONFIG_TRANSPARENT_HUGEPAGE */
2449d3973d6SChristoph Hellwig /* stub to allow the code below to compile */
2459d3973d6SChristoph Hellwig int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
2469d3973d6SChristoph Hellwig 		unsigned long end, uint64_t *pfns, pmd_t pmd);
2479d3973d6SChristoph Hellwig #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
24853f5c3f4SJérôme Glisse 
249f88a1e90SJérôme Glisse static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
2502aee09d8SJérôme Glisse {
251789c2af8SPhilip Yang 	if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
2522aee09d8SJérôme Glisse 		return 0;
253f88a1e90SJérôme Glisse 	return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
254f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_WRITE] :
255f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_VALID];
2562aee09d8SJérôme Glisse }
2572aee09d8SJérôme Glisse 
25853f5c3f4SJérôme Glisse static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
25953f5c3f4SJérôme Glisse 			      unsigned long end, pmd_t *pmdp, pte_t *ptep,
26053f5c3f4SJérôme Glisse 			      uint64_t *pfn)
26153f5c3f4SJérôme Glisse {
26253f5c3f4SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
263f88a1e90SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
2642aee09d8SJérôme Glisse 	bool fault, write_fault;
2652aee09d8SJérôme Glisse 	uint64_t cpu_flags;
26653f5c3f4SJérôme Glisse 	pte_t pte = *ptep;
267f88a1e90SJérôme Glisse 	uint64_t orig_pfn = *pfn;
26853f5c3f4SJérôme Glisse 
269f88a1e90SJérôme Glisse 	*pfn = range->values[HMM_PFN_NONE];
27073231612SJérôme Glisse 	fault = write_fault = false;
27153f5c3f4SJérôme Glisse 
27253f5c3f4SJérôme Glisse 	if (pte_none(pte)) {
27373231612SJérôme Glisse 		hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
27473231612SJérôme Glisse 				   &fault, &write_fault);
2752aee09d8SJérôme Glisse 		if (fault || write_fault)
27653f5c3f4SJérôme Glisse 			goto fault;
27753f5c3f4SJérôme Glisse 		return 0;
27853f5c3f4SJérôme Glisse 	}
27953f5c3f4SJérôme Glisse 
28053f5c3f4SJérôme Glisse 	if (!pte_present(pte)) {
28153f5c3f4SJérôme Glisse 		swp_entry_t entry = pte_to_swp_entry(pte);
28253f5c3f4SJérôme Glisse 
28353f5c3f4SJérôme Glisse 		/*
28453f5c3f4SJérôme Glisse 		 * This is a special swap entry, ignore migration, use
28553f5c3f4SJérôme Glisse 		 * device and report anything else as error.
28653f5c3f4SJérôme Glisse 		 */
28753f5c3f4SJérôme Glisse 		if (is_device_private_entry(entry)) {
288f88a1e90SJérôme Glisse 			cpu_flags = range->flags[HMM_PFN_VALID] |
289f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_DEVICE_PRIVATE];
2902aee09d8SJérôme Glisse 			cpu_flags |= is_write_device_private_entry(entry) ?
291f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_WRITE] : 0;
292f88a1e90SJérôme Glisse 			hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
293f88a1e90SJérôme Glisse 					   &fault, &write_fault);
294f88a1e90SJérôme Glisse 			if (fault || write_fault)
295f88a1e90SJérôme Glisse 				goto fault;
296391aab11SJérôme Glisse 			*pfn = hmm_device_entry_from_pfn(range,
297391aab11SJérôme Glisse 					    swp_offset(entry));
298f88a1e90SJérôme Glisse 			*pfn |= cpu_flags;
29953f5c3f4SJérôme Glisse 			return 0;
30053f5c3f4SJérôme Glisse 		}
30153f5c3f4SJérôme Glisse 
30276612d6cSJason Gunthorpe 		hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, &fault,
30376612d6cSJason Gunthorpe 				   &write_fault);
30476612d6cSJason Gunthorpe 		if (!fault && !write_fault)
30576612d6cSJason Gunthorpe 			return 0;
30676612d6cSJason Gunthorpe 
30776612d6cSJason Gunthorpe 		if (!non_swap_entry(entry))
30876612d6cSJason Gunthorpe 			goto fault;
30976612d6cSJason Gunthorpe 
31053f5c3f4SJérôme Glisse 		if (is_migration_entry(entry)) {
31153f5c3f4SJérôme Glisse 			pte_unmap(ptep);
31253f5c3f4SJérôme Glisse 			hmm_vma_walk->last = addr;
313d2e8d551SRalph Campbell 			migration_entry_wait(walk->mm, pmdp, addr);
31473231612SJérôme Glisse 			return -EBUSY;
31553f5c3f4SJérôme Glisse 		}
31653f5c3f4SJérôme Glisse 
31753f5c3f4SJérôme Glisse 		/* Report error for everything else */
318dfdc2207SJason Gunthorpe 		pte_unmap(ptep);
319f88a1e90SJérôme Glisse 		*pfn = range->values[HMM_PFN_ERROR];
32053f5c3f4SJérôme Glisse 		return -EFAULT;
32153f5c3f4SJérôme Glisse 	}
32253f5c3f4SJérôme Glisse 
32376612d6cSJason Gunthorpe 	cpu_flags = pte_to_hmm_pfn_flags(range, pte);
32476612d6cSJason Gunthorpe 	hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, &fault,
32576612d6cSJason Gunthorpe 			   &write_fault);
3262aee09d8SJérôme Glisse 	if (fault || write_fault)
32753f5c3f4SJérôme Glisse 		goto fault;
32853f5c3f4SJérôme Glisse 
329992de9a8SJérôme Glisse 	if (pte_devmap(pte)) {
330992de9a8SJérôme Glisse 		hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
331992de9a8SJérôme Glisse 					      hmm_vma_walk->pgmap);
332dfdc2207SJason Gunthorpe 		if (unlikely(!hmm_vma_walk->pgmap)) {
333dfdc2207SJason Gunthorpe 			pte_unmap(ptep);
334992de9a8SJérôme Glisse 			return -EBUSY;
335dfdc2207SJason Gunthorpe 		}
33640550627SJason Gunthorpe 	}
33740550627SJason Gunthorpe 
33840550627SJason Gunthorpe 	/*
33940550627SJason Gunthorpe 	 * Since each architecture defines a struct page for the zero page, just
34040550627SJason Gunthorpe 	 * fall through and treat it like a normal page.
34140550627SJason Gunthorpe 	 */
34240550627SJason Gunthorpe 	if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
34340550627SJason Gunthorpe 		hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, &fault,
34440550627SJason Gunthorpe 				   &write_fault);
34540550627SJason Gunthorpe 		if (fault || write_fault) {
346dfdc2207SJason Gunthorpe 			pte_unmap(ptep);
347992de9a8SJérôme Glisse 			return -EFAULT;
348992de9a8SJérôme Glisse 		}
34940550627SJason Gunthorpe 		*pfn = range->values[HMM_PFN_SPECIAL];
35040550627SJason Gunthorpe 		return 0;
351ac541f25SRalph Campbell 	}
352992de9a8SJérôme Glisse 
353391aab11SJérôme Glisse 	*pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
35453f5c3f4SJérôme Glisse 	return 0;
35553f5c3f4SJérôme Glisse 
35653f5c3f4SJérôme Glisse fault:
357992de9a8SJérôme Glisse 	if (hmm_vma_walk->pgmap) {
358992de9a8SJérôme Glisse 		put_dev_pagemap(hmm_vma_walk->pgmap);
359992de9a8SJérôme Glisse 		hmm_vma_walk->pgmap = NULL;
360992de9a8SJérôme Glisse 	}
36153f5c3f4SJérôme Glisse 	pte_unmap(ptep);
36253f5c3f4SJérôme Glisse 	/* Fault any virtual address we were asked to fault */
3632aee09d8SJérôme Glisse 	return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
36453f5c3f4SJérôme Glisse }
36553f5c3f4SJérôme Glisse 
366da4c3c73SJérôme Glisse static int hmm_vma_walk_pmd(pmd_t *pmdp,
367da4c3c73SJérôme Glisse 			    unsigned long start,
368da4c3c73SJérôme Glisse 			    unsigned long end,
369da4c3c73SJérôme Glisse 			    struct mm_walk *walk)
370da4c3c73SJérôme Glisse {
37174eee180SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
37274eee180SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
3732288a9a6SJason Gunthorpe 	uint64_t *pfns = &range->pfns[(start - range->start) >> PAGE_SHIFT];
3742288a9a6SJason Gunthorpe 	unsigned long npages = (end - start) >> PAGE_SHIFT;
3752288a9a6SJason Gunthorpe 	unsigned long addr = start;
3762288a9a6SJason Gunthorpe 	bool fault, write_fault;
377da4c3c73SJérôme Glisse 	pte_t *ptep;
378da4c3c73SJérôme Glisse 	pmd_t pmd;
379da4c3c73SJérôme Glisse 
380d08faca0SJérôme Glisse again:
381d08faca0SJérôme Glisse 	pmd = READ_ONCE(*pmdp);
382d08faca0SJérôme Glisse 	if (pmd_none(pmd))
383b7a16c7aSSteven Price 		return hmm_vma_walk_hole(start, end, -1, walk);
384d08faca0SJérôme Glisse 
385d08faca0SJérôme Glisse 	if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
386d08faca0SJérôme Glisse 		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
387d08faca0SJérôme Glisse 				     0, &fault, &write_fault);
388d08faca0SJérôme Glisse 		if (fault || write_fault) {
389d08faca0SJérôme Glisse 			hmm_vma_walk->last = addr;
390d2e8d551SRalph Campbell 			pmd_migration_entry_wait(walk->mm, pmdp);
39173231612SJérôme Glisse 			return -EBUSY;
392d08faca0SJérôme Glisse 		}
3937d082987SJason Gunthorpe 		return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
3942288a9a6SJason Gunthorpe 	}
3952288a9a6SJason Gunthorpe 
3962288a9a6SJason Gunthorpe 	if (!pmd_present(pmd)) {
3972288a9a6SJason Gunthorpe 		hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
3982288a9a6SJason Gunthorpe 				     &write_fault);
3992288a9a6SJason Gunthorpe 		if (fault || write_fault)
4002288a9a6SJason Gunthorpe 			return -EFAULT;
401d28c2c9aSRalph Campbell 		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
4022288a9a6SJason Gunthorpe 	}
403d08faca0SJérôme Glisse 
404d08faca0SJérôme Glisse 	if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
405da4c3c73SJérôme Glisse 		/*
406d2e8d551SRalph Campbell 		 * No need to take pmd_lock here, even if some other thread
407da4c3c73SJérôme Glisse 		 * is splitting the huge pmd we will get that event through
408da4c3c73SJérôme Glisse 		 * mmu_notifier callback.
409da4c3c73SJérôme Glisse 		 *
410d2e8d551SRalph Campbell 		 * So just read pmd value and check again it's a transparent
411da4c3c73SJérôme Glisse 		 * huge or device mapping one and compute corresponding pfn
412da4c3c73SJérôme Glisse 		 * values.
413da4c3c73SJérôme Glisse 		 */
414da4c3c73SJérôme Glisse 		pmd = pmd_read_atomic(pmdp);
415da4c3c73SJérôme Glisse 		barrier();
416da4c3c73SJérôme Glisse 		if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
417da4c3c73SJérôme Glisse 			goto again;
418da4c3c73SJérôme Glisse 
4192288a9a6SJason Gunthorpe 		return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd);
420da4c3c73SJérôme Glisse 	}
421da4c3c73SJérôme Glisse 
422d08faca0SJérôme Glisse 	/*
423d2e8d551SRalph Campbell 	 * We have handled all the valid cases above ie either none, migration,
424d08faca0SJérôme Glisse 	 * huge or transparent huge. At this point either it is a valid pmd
425d08faca0SJérôme Glisse 	 * entry pointing to pte directory or it is a bad pmd that will not
426d08faca0SJérôme Glisse 	 * recover.
427d08faca0SJérôme Glisse 	 */
4282288a9a6SJason Gunthorpe 	if (pmd_bad(pmd)) {
4292288a9a6SJason Gunthorpe 		hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
4302288a9a6SJason Gunthorpe 				     &write_fault);
4312288a9a6SJason Gunthorpe 		if (fault || write_fault)
4322288a9a6SJason Gunthorpe 			return -EFAULT;
433d28c2c9aSRalph Campbell 		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
4342288a9a6SJason Gunthorpe 	}
435da4c3c73SJérôme Glisse 
436da4c3c73SJérôme Glisse 	ptep = pte_offset_map(pmdp, addr);
4372288a9a6SJason Gunthorpe 	for (; addr < end; addr += PAGE_SIZE, ptep++, pfns++) {
43853f5c3f4SJérôme Glisse 		int r;
439da4c3c73SJérôme Glisse 
4402288a9a6SJason Gunthorpe 		r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns);
44153f5c3f4SJérôme Glisse 		if (r) {
442dfdc2207SJason Gunthorpe 			/* hmm_vma_handle_pte() did pte_unmap() */
44374eee180SJérôme Glisse 			hmm_vma_walk->last = addr;
44453f5c3f4SJérôme Glisse 			return r;
44574eee180SJérôme Glisse 		}
446da4c3c73SJérôme Glisse 	}
447992de9a8SJérôme Glisse 	if (hmm_vma_walk->pgmap) {
448992de9a8SJérôme Glisse 		/*
449992de9a8SJérôme Glisse 		 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
450992de9a8SJérôme Glisse 		 * so that we can leverage get_dev_pagemap() optimization which
451992de9a8SJérôme Glisse 		 * will not re-take a reference on a pgmap if we already have
452992de9a8SJérôme Glisse 		 * one.
453992de9a8SJérôme Glisse 		 */
454992de9a8SJérôme Glisse 		put_dev_pagemap(hmm_vma_walk->pgmap);
455992de9a8SJérôme Glisse 		hmm_vma_walk->pgmap = NULL;
456992de9a8SJérôme Glisse 	}
457da4c3c73SJérôme Glisse 	pte_unmap(ptep - 1);
458da4c3c73SJérôme Glisse 
45953f5c3f4SJérôme Glisse 	hmm_vma_walk->last = addr;
460da4c3c73SJérôme Glisse 	return 0;
461da4c3c73SJérôme Glisse }
462da4c3c73SJérôme Glisse 
463f0b3c45cSChristoph Hellwig #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
464f0b3c45cSChristoph Hellwig     defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
465f0b3c45cSChristoph Hellwig static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
466f0b3c45cSChristoph Hellwig {
467f0b3c45cSChristoph Hellwig 	if (!pud_present(pud))
468f0b3c45cSChristoph Hellwig 		return 0;
469f0b3c45cSChristoph Hellwig 	return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
470f0b3c45cSChristoph Hellwig 				range->flags[HMM_PFN_WRITE] :
471f0b3c45cSChristoph Hellwig 				range->flags[HMM_PFN_VALID];
472f0b3c45cSChristoph Hellwig }
473f0b3c45cSChristoph Hellwig 
474f0b3c45cSChristoph Hellwig static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
475992de9a8SJérôme Glisse 		struct mm_walk *walk)
476992de9a8SJérôme Glisse {
477992de9a8SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
478992de9a8SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
4793afc4236SSteven Price 	unsigned long addr = start;
480992de9a8SJérôme Glisse 	pud_t pud;
4813afc4236SSteven Price 	int ret = 0;
4823afc4236SSteven Price 	spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
483992de9a8SJérôme Glisse 
4843afc4236SSteven Price 	if (!ptl)
4853afc4236SSteven Price 		return 0;
4863afc4236SSteven Price 
4873afc4236SSteven Price 	/* Normally we don't want to split the huge page */
4883afc4236SSteven Price 	walk->action = ACTION_CONTINUE;
4893afc4236SSteven Price 
490992de9a8SJérôme Glisse 	pud = READ_ONCE(*pudp);
4913afc4236SSteven Price 	if (pud_none(pud)) {
49205fc1df9SJason Gunthorpe 		spin_unlock(ptl);
49305fc1df9SJason Gunthorpe 		return hmm_vma_walk_hole(start, end, -1, walk);
4943afc4236SSteven Price 	}
495992de9a8SJérôme Glisse 
496992de9a8SJérôme Glisse 	if (pud_huge(pud) && pud_devmap(pud)) {
497992de9a8SJérôme Glisse 		unsigned long i, npages, pfn;
498992de9a8SJérôme Glisse 		uint64_t *pfns, cpu_flags;
499992de9a8SJérôme Glisse 		bool fault, write_fault;
500992de9a8SJérôme Glisse 
5013afc4236SSteven Price 		if (!pud_present(pud)) {
50205fc1df9SJason Gunthorpe 			spin_unlock(ptl);
50305fc1df9SJason Gunthorpe 			return hmm_vma_walk_hole(start, end, -1, walk);
5043afc4236SSteven Price 		}
505992de9a8SJérôme Glisse 
506992de9a8SJérôme Glisse 		i = (addr - range->start) >> PAGE_SHIFT;
507992de9a8SJérôme Glisse 		npages = (end - addr) >> PAGE_SHIFT;
508992de9a8SJérôme Glisse 		pfns = &range->pfns[i];
509992de9a8SJérôme Glisse 
510992de9a8SJérôme Glisse 		cpu_flags = pud_to_hmm_pfn_flags(range, pud);
511992de9a8SJérôme Glisse 		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
512992de9a8SJérôme Glisse 				     cpu_flags, &fault, &write_fault);
5133afc4236SSteven Price 		if (fault || write_fault) {
51405fc1df9SJason Gunthorpe 			spin_unlock(ptl);
51505fc1df9SJason Gunthorpe 			return hmm_vma_walk_hole_(addr, end, fault, write_fault,
51605fc1df9SJason Gunthorpe 						  walk);
5173afc4236SSteven Price 		}
518992de9a8SJérôme Glisse 
519992de9a8SJérôme Glisse 		pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
520992de9a8SJérôme Glisse 		for (i = 0; i < npages; ++i, ++pfn) {
521992de9a8SJérôme Glisse 			hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
522992de9a8SJérôme Glisse 					      hmm_vma_walk->pgmap);
5233afc4236SSteven Price 			if (unlikely(!hmm_vma_walk->pgmap)) {
5243afc4236SSteven Price 				ret = -EBUSY;
5253afc4236SSteven Price 				goto out_unlock;
5263afc4236SSteven Price 			}
527391aab11SJérôme Glisse 			pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
528391aab11SJérôme Glisse 				  cpu_flags;
529992de9a8SJérôme Glisse 		}
530992de9a8SJérôme Glisse 		if (hmm_vma_walk->pgmap) {
531992de9a8SJérôme Glisse 			put_dev_pagemap(hmm_vma_walk->pgmap);
532992de9a8SJérôme Glisse 			hmm_vma_walk->pgmap = NULL;
533992de9a8SJérôme Glisse 		}
534992de9a8SJérôme Glisse 		hmm_vma_walk->last = end;
5353afc4236SSteven Price 		goto out_unlock;
536992de9a8SJérôme Glisse 	}
537992de9a8SJérôme Glisse 
5383afc4236SSteven Price 	/* Ask for the PUD to be split */
5393afc4236SSteven Price 	walk->action = ACTION_SUBTREE;
540992de9a8SJérôme Glisse 
5413afc4236SSteven Price out_unlock:
5423afc4236SSteven Price 	spin_unlock(ptl);
543992de9a8SJérôme Glisse 	return ret;
544992de9a8SJérôme Glisse }
545f0b3c45cSChristoph Hellwig #else
546f0b3c45cSChristoph Hellwig #define hmm_vma_walk_pud	NULL
547f0b3c45cSChristoph Hellwig #endif
548992de9a8SJérôme Glisse 
549251bbe59SChristoph Hellwig #ifdef CONFIG_HUGETLB_PAGE
55063d5066fSJérôme Glisse static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
55163d5066fSJérôme Glisse 				      unsigned long start, unsigned long end,
55263d5066fSJérôme Glisse 				      struct mm_walk *walk)
55363d5066fSJérôme Glisse {
55405c23af4SChristoph Hellwig 	unsigned long addr = start, i, pfn;
55563d5066fSJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
55663d5066fSJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
55763d5066fSJérôme Glisse 	struct vm_area_struct *vma = walk->vma;
55863d5066fSJérôme Glisse 	uint64_t orig_pfn, cpu_flags;
55963d5066fSJérôme Glisse 	bool fault, write_fault;
56063d5066fSJérôme Glisse 	spinlock_t *ptl;
56163d5066fSJérôme Glisse 	pte_t entry;
56263d5066fSJérôme Glisse 
563d2e8d551SRalph Campbell 	ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
56463d5066fSJérôme Glisse 	entry = huge_ptep_get(pte);
56563d5066fSJérôme Glisse 
5667f08263dSChristoph Hellwig 	i = (start - range->start) >> PAGE_SHIFT;
56763d5066fSJérôme Glisse 	orig_pfn = range->pfns[i];
56863d5066fSJérôme Glisse 	range->pfns[i] = range->values[HMM_PFN_NONE];
56963d5066fSJérôme Glisse 	cpu_flags = pte_to_hmm_pfn_flags(range, entry);
57063d5066fSJérôme Glisse 	fault = write_fault = false;
57163d5066fSJérôme Glisse 	hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
57263d5066fSJérôme Glisse 			   &fault, &write_fault);
57363d5066fSJérôme Glisse 	if (fault || write_fault) {
574*45050692SChristoph Hellwig 		spin_unlock(ptl);
575*45050692SChristoph Hellwig 		return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
57663d5066fSJérôme Glisse 	}
57763d5066fSJérôme Glisse 
57805c23af4SChristoph Hellwig 	pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
5797f08263dSChristoph Hellwig 	for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
580391aab11SJérôme Glisse 		range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
581391aab11SJérôme Glisse 				 cpu_flags;
58263d5066fSJérôme Glisse 	hmm_vma_walk->last = end;
58363d5066fSJérôme Glisse 	spin_unlock(ptl);
584*45050692SChristoph Hellwig 	return 0;
58563d5066fSJérôme Glisse }
586251bbe59SChristoph Hellwig #else
587251bbe59SChristoph Hellwig #define hmm_vma_walk_hugetlb_entry NULL
588251bbe59SChristoph Hellwig #endif /* CONFIG_HUGETLB_PAGE */
58963d5066fSJérôme Glisse 
590d28c2c9aSRalph Campbell static int hmm_vma_walk_test(unsigned long start, unsigned long end,
591d28c2c9aSRalph Campbell 			     struct mm_walk *walk)
59233cd47dcSJérôme Glisse {
593d28c2c9aSRalph Campbell 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
594d28c2c9aSRalph Campbell 	struct hmm_range *range = hmm_vma_walk->range;
595d28c2c9aSRalph Campbell 	struct vm_area_struct *vma = walk->vma;
596d28c2c9aSRalph Campbell 
597d28c2c9aSRalph Campbell 	/*
598c2579c9cSJason Gunthorpe 	 * Skip vma ranges that don't have struct page backing them or map I/O
599c2579c9cSJason Gunthorpe 	 * devices directly.
600c2579c9cSJason Gunthorpe 	 *
601d28c2c9aSRalph Campbell 	 * If the vma does not allow read access, then assume that it does not
602c2579c9cSJason Gunthorpe 	 * allow write access either. HMM does not support architectures that
603c2579c9cSJason Gunthorpe 	 * allow write without read.
604d28c2c9aSRalph Campbell 	 */
605c2579c9cSJason Gunthorpe 	if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) ||
606c2579c9cSJason Gunthorpe 	    !(vma->vm_flags & VM_READ)) {
607d28c2c9aSRalph Campbell 		bool fault, write_fault;
608d28c2c9aSRalph Campbell 
609d28c2c9aSRalph Campbell 		/*
610d28c2c9aSRalph Campbell 		 * Check to see if a fault is requested for any page in the
611d28c2c9aSRalph Campbell 		 * range.
612d28c2c9aSRalph Campbell 		 */
613d28c2c9aSRalph Campbell 		hmm_range_need_fault(hmm_vma_walk, range->pfns +
614d28c2c9aSRalph Campbell 					((start - range->start) >> PAGE_SHIFT),
615d28c2c9aSRalph Campbell 					(end - start) >> PAGE_SHIFT,
616d28c2c9aSRalph Campbell 					0, &fault, &write_fault);
617d28c2c9aSRalph Campbell 		if (fault || write_fault)
618d28c2c9aSRalph Campbell 			return -EFAULT;
619d28c2c9aSRalph Campbell 
620c2579c9cSJason Gunthorpe 		hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
621d28c2c9aSRalph Campbell 		hmm_vma_walk->last = end;
622d28c2c9aSRalph Campbell 
623d28c2c9aSRalph Campbell 		/* Skip this vma and continue processing the next vma. */
624d28c2c9aSRalph Campbell 		return 1;
625d28c2c9aSRalph Campbell 	}
626d28c2c9aSRalph Campbell 
627d28c2c9aSRalph Campbell 	return 0;
62833cd47dcSJérôme Glisse }
62933cd47dcSJérôme Glisse 
6307b86ac33SChristoph Hellwig static const struct mm_walk_ops hmm_walk_ops = {
6317b86ac33SChristoph Hellwig 	.pud_entry	= hmm_vma_walk_pud,
6327b86ac33SChristoph Hellwig 	.pmd_entry	= hmm_vma_walk_pmd,
6337b86ac33SChristoph Hellwig 	.pte_hole	= hmm_vma_walk_hole,
6347b86ac33SChristoph Hellwig 	.hugetlb_entry	= hmm_vma_walk_hugetlb_entry,
635d28c2c9aSRalph Campbell 	.test_walk	= hmm_vma_walk_test,
6367b86ac33SChristoph Hellwig };
6377b86ac33SChristoph Hellwig 
6389a4903e4SChristoph Hellwig /**
6399a4903e4SChristoph Hellwig  * hmm_range_fault - try to fault some address in a virtual address range
64008232a45SJérôme Glisse  * @range:	range being faulted
6419a4903e4SChristoph Hellwig  * @flags:	HMM_FAULT_* flags
64273231612SJérôme Glisse  *
6439a4903e4SChristoph Hellwig  * Return: the number of valid pages in range->pfns[] (from range start
6449a4903e4SChristoph Hellwig  * address), which may be zero.  On error one of the following status codes
6459a4903e4SChristoph Hellwig  * can be returned:
6469a4903e4SChristoph Hellwig  *
6479a4903e4SChristoph Hellwig  * -EINVAL:	Invalid arguments or mm or virtual address is in an invalid vma
6489a4903e4SChristoph Hellwig  *		(e.g., device file vma).
64973231612SJérôme Glisse  * -ENOMEM:	Out of memory.
6509a4903e4SChristoph Hellwig  * -EPERM:	Invalid permission (e.g., asking for write and range is read
6519a4903e4SChristoph Hellwig  *		only).
6529a4903e4SChristoph Hellwig  * -EBUSY:	The range has been invalidated and the caller needs to wait for
6539a4903e4SChristoph Hellwig  *		the invalidation to finish.
6549a4903e4SChristoph Hellwig  * -EFAULT:	Invalid (i.e., either no valid vma or it is illegal to access
6559a4903e4SChristoph Hellwig  *		that range) number of valid pages in range->pfns[] (from
65673231612SJérôme Glisse  *              range start address).
65774eee180SJérôme Glisse  *
65874eee180SJérôme Glisse  * This is similar to a regular CPU page fault except that it will not trigger
65973231612SJérôme Glisse  * any memory migration if the memory being faulted is not accessible by CPUs
66073231612SJérôme Glisse  * and caller does not ask for migration.
66174eee180SJérôme Glisse  *
662ff05c0c6SJérôme Glisse  * On error, for one virtual address in the range, the function will mark the
663ff05c0c6SJérôme Glisse  * corresponding HMM pfn entry with an error flag.
66474eee180SJérôme Glisse  */
6659a4903e4SChristoph Hellwig long hmm_range_fault(struct hmm_range *range, unsigned int flags)
66674eee180SJérôme Glisse {
667d28c2c9aSRalph Campbell 	struct hmm_vma_walk hmm_vma_walk = {
668d28c2c9aSRalph Campbell 		.range = range,
669d28c2c9aSRalph Campbell 		.last = range->start,
670d28c2c9aSRalph Campbell 		.flags = flags,
671d28c2c9aSRalph Campbell 	};
672a22dd506SJason Gunthorpe 	struct mm_struct *mm = range->notifier->mm;
67374eee180SJérôme Glisse 	int ret;
67474eee180SJérôme Glisse 
67504ec32fbSJason Gunthorpe 	lockdep_assert_held(&mm->mmap_sem);
676a3e0d41cSJérôme Glisse 
677a3e0d41cSJérôme Glisse 	do {
678a3e0d41cSJérôme Glisse 		/* If range is no longer valid force retry. */
679a22dd506SJason Gunthorpe 		if (mmu_interval_check_retry(range->notifier,
680a22dd506SJason Gunthorpe 					     range->notifier_seq))
6812bcbeaefSChristoph Hellwig 			return -EBUSY;
682d28c2c9aSRalph Campbell 		ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
6837b86ac33SChristoph Hellwig 				      &hmm_walk_ops, &hmm_vma_walk);
684d28c2c9aSRalph Campbell 	} while (ret == -EBUSY);
685a3e0d41cSJérôme Glisse 
686d28c2c9aSRalph Campbell 	if (ret)
68773231612SJérôme Glisse 		return ret;
68873231612SJérôme Glisse 	return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
68974eee180SJérôme Glisse }
69073231612SJérôme Glisse EXPORT_SYMBOL(hmm_range_fault);
691