xref: /linux/mm/hmm.c (revision 17ffdc482982af92bddb59692af1c5e1de23d184)
1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2133ff0eaSJérôme Glisse /*
3133ff0eaSJérôme Glisse  * Copyright 2013 Red Hat Inc.
4133ff0eaSJérôme Glisse  *
5f813f219SJérôme Glisse  * Authors: Jérôme Glisse <jglisse@redhat.com>
6133ff0eaSJérôme Glisse  */
7133ff0eaSJérôme Glisse /*
8133ff0eaSJérôme Glisse  * Refer to include/linux/hmm.h for information about heterogeneous memory
9133ff0eaSJérôme Glisse  * management or HMM for short.
10133ff0eaSJérôme Glisse  */
11a520110eSChristoph Hellwig #include <linux/pagewalk.h>
12133ff0eaSJérôme Glisse #include <linux/hmm.h>
13858b54daSJérôme Glisse #include <linux/init.h>
14da4c3c73SJérôme Glisse #include <linux/rmap.h>
15da4c3c73SJérôme Glisse #include <linux/swap.h>
16133ff0eaSJérôme Glisse #include <linux/slab.h>
17133ff0eaSJérôme Glisse #include <linux/sched.h>
184ef589dcSJérôme Glisse #include <linux/mmzone.h>
194ef589dcSJérôme Glisse #include <linux/pagemap.h>
20da4c3c73SJérôme Glisse #include <linux/swapops.h>
21da4c3c73SJérôme Glisse #include <linux/hugetlb.h>
224ef589dcSJérôme Glisse #include <linux/memremap.h>
23c8a53b2dSJason Gunthorpe #include <linux/sched/mm.h>
247b2d55d2SJérôme Glisse #include <linux/jump_label.h>
2555c0ece8SJérôme Glisse #include <linux/dma-mapping.h>
26c0b12405SJérôme Glisse #include <linux/mmu_notifier.h>
274ef589dcSJérôme Glisse #include <linux/memory_hotplug.h>
284ef589dcSJérôme Glisse 
2974eee180SJérôme Glisse struct hmm_vma_walk {
3074eee180SJérôme Glisse 	struct hmm_range	*range;
31992de9a8SJérôme Glisse 	struct dev_pagemap	*pgmap;
3274eee180SJérôme Glisse 	unsigned long		last;
339a4903e4SChristoph Hellwig 	unsigned int		flags;
3474eee180SJérôme Glisse };
3574eee180SJérôme Glisse 
36d28c2c9aSRalph Campbell static int hmm_pfns_fill(unsigned long addr, unsigned long end,
37d28c2c9aSRalph Campbell 		struct hmm_range *range, enum hmm_pfn_value_e value)
38da4c3c73SJérôme Glisse {
39ff05c0c6SJérôme Glisse 	uint64_t *pfns = range->pfns;
40da4c3c73SJérôme Glisse 	unsigned long i;
41da4c3c73SJérôme Glisse 
42da4c3c73SJérôme Glisse 	i = (addr - range->start) >> PAGE_SHIFT;
43da4c3c73SJérôme Glisse 	for (; addr < end; addr += PAGE_SIZE, i++)
44d28c2c9aSRalph Campbell 		pfns[i] = range->values[value];
45da4c3c73SJérôme Glisse 
46da4c3c73SJérôme Glisse 	return 0;
47da4c3c73SJérôme Glisse }
48da4c3c73SJérôme Glisse 
495504ed29SJérôme Glisse /*
50f8c888a3SChristoph Hellwig  * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
51d2e8d551SRalph Campbell  * @addr: range virtual start address (inclusive)
525504ed29SJérôme Glisse  * @end: range virtual end address (exclusive)
532aee09d8SJérôme Glisse  * @fault: should we fault or not ?
542aee09d8SJérôme Glisse  * @write_fault: write fault ?
555504ed29SJérôme Glisse  * @walk: mm_walk structure
56f8c888a3SChristoph Hellwig  * Return: -EBUSY after page fault, or page fault error
575504ed29SJérôme Glisse  *
585504ed29SJérôme Glisse  * This function will be called whenever pmd_none() or pte_none() returns true,
595504ed29SJérôme Glisse  * or whenever there is no page directory covering the virtual address range.
605504ed29SJérôme Glisse  */
61f8c888a3SChristoph Hellwig static int hmm_vma_fault(unsigned long addr, unsigned long end,
622aee09d8SJérôme Glisse 			      bool fault, bool write_fault,
63da4c3c73SJérôme Glisse 			      struct mm_walk *walk)
64da4c3c73SJérôme Glisse {
6574eee180SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
6674eee180SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
675a0c38d3SChristoph Hellwig 	struct vm_area_struct *vma = walk->vma;
68ff05c0c6SJérôme Glisse 	uint64_t *pfns = range->pfns;
69f8c888a3SChristoph Hellwig 	unsigned long i = (addr - range->start) >> PAGE_SHIFT;
705a0c38d3SChristoph Hellwig 	unsigned int fault_flags = FAULT_FLAG_REMOTE;
71da4c3c73SJérôme Glisse 
72f8c888a3SChristoph Hellwig 	WARN_ON_ONCE(!fault && !write_fault);
7374eee180SJérôme Glisse 	hmm_vma_walk->last = addr;
7463d5066fSJérôme Glisse 
755a0c38d3SChristoph Hellwig 	if (!vma)
765a0c38d3SChristoph Hellwig 		goto out_error;
775a0c38d3SChristoph Hellwig 
785a0c38d3SChristoph Hellwig 	if (write_fault) {
795a0c38d3SChristoph Hellwig 		if (!(vma->vm_flags & VM_WRITE))
80c18ce674SRalph Campbell 			return -EPERM;
815a0c38d3SChristoph Hellwig 		fault_flags |= FAULT_FLAG_WRITE;
8274eee180SJérôme Glisse 	}
8374eee180SJérôme Glisse 
845a0c38d3SChristoph Hellwig 	for (; addr < end; addr += PAGE_SIZE, i++)
855a0c38d3SChristoph Hellwig 		if (handle_mm_fault(vma, addr, fault_flags) & VM_FAULT_ERROR)
865a0c38d3SChristoph Hellwig 			goto out_error;
875a0c38d3SChristoph Hellwig 
88f8c888a3SChristoph Hellwig 	return -EBUSY;
895a0c38d3SChristoph Hellwig 
905a0c38d3SChristoph Hellwig out_error:
915a0c38d3SChristoph Hellwig 	pfns[i] = range->values[HMM_PFN_ERROR];
925a0c38d3SChristoph Hellwig 	return -EFAULT;
932aee09d8SJérôme Glisse }
942aee09d8SJérôme Glisse 
952aee09d8SJérôme Glisse static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
962aee09d8SJérôme Glisse 				      uint64_t pfns, uint64_t cpu_flags,
972aee09d8SJérôme Glisse 				      bool *fault, bool *write_fault)
982aee09d8SJérôme Glisse {
99f88a1e90SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
100f88a1e90SJérôme Glisse 
101d45d464bSChristoph Hellwig 	if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
1022aee09d8SJérôme Glisse 		return;
1032aee09d8SJérôme Glisse 
104023a019aSJérôme Glisse 	/*
105023a019aSJérôme Glisse 	 * So we not only consider the individual per page request we also
106023a019aSJérôme Glisse 	 * consider the default flags requested for the range. The API can
107d2e8d551SRalph Campbell 	 * be used 2 ways. The first one where the HMM user coalesces
108d2e8d551SRalph Campbell 	 * multiple page faults into one request and sets flags per pfn for
109d2e8d551SRalph Campbell 	 * those faults. The second one where the HMM user wants to pre-
110023a019aSJérôme Glisse 	 * fault a range with specific flags. For the latter one it is a
111023a019aSJérôme Glisse 	 * waste to have the user pre-fill the pfn arrays with a default
112023a019aSJérôme Glisse 	 * flags value.
113023a019aSJérôme Glisse 	 */
114023a019aSJérôme Glisse 	pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
115023a019aSJérôme Glisse 
1162aee09d8SJérôme Glisse 	/* We aren't ask to do anything ... */
117f88a1e90SJérôme Glisse 	if (!(pfns & range->flags[HMM_PFN_VALID]))
1182aee09d8SJérôme Glisse 		return;
119f88a1e90SJérôme Glisse 
120f88a1e90SJérôme Glisse 	/* If CPU page table is not valid then we need to fault */
121f88a1e90SJérôme Glisse 	*fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
122f88a1e90SJérôme Glisse 	/* Need to write fault ? */
123f88a1e90SJérôme Glisse 	if ((pfns & range->flags[HMM_PFN_WRITE]) &&
124f88a1e90SJérôme Glisse 	    !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
125f88a1e90SJérôme Glisse 		*write_fault = true;
1262aee09d8SJérôme Glisse 		*fault = true;
1272aee09d8SJérôme Glisse 	}
1282aee09d8SJérôme Glisse }
1292aee09d8SJérôme Glisse 
1302aee09d8SJérôme Glisse static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
1312aee09d8SJérôme Glisse 				 const uint64_t *pfns, unsigned long npages,
1322aee09d8SJérôme Glisse 				 uint64_t cpu_flags, bool *fault,
1332aee09d8SJérôme Glisse 				 bool *write_fault)
1342aee09d8SJérôme Glisse {
1352aee09d8SJérôme Glisse 	unsigned long i;
1362aee09d8SJérôme Glisse 
137d45d464bSChristoph Hellwig 	if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) {
1382aee09d8SJérôme Glisse 		*fault = *write_fault = false;
1392aee09d8SJérôme Glisse 		return;
1402aee09d8SJérôme Glisse 	}
1412aee09d8SJérôme Glisse 
142a3e0d41cSJérôme Glisse 	*fault = *write_fault = false;
1432aee09d8SJérôme Glisse 	for (i = 0; i < npages; ++i) {
1442aee09d8SJérôme Glisse 		hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
1452aee09d8SJérôme Glisse 				   fault, write_fault);
146a3e0d41cSJérôme Glisse 		if ((*write_fault))
1472aee09d8SJérôme Glisse 			return;
1482aee09d8SJérôme Glisse 	}
1492aee09d8SJérôme Glisse }
1502aee09d8SJérôme Glisse 
1512aee09d8SJérôme Glisse static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
152b7a16c7aSSteven Price 			     __always_unused int depth, struct mm_walk *walk)
1532aee09d8SJérôme Glisse {
1542aee09d8SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
1552aee09d8SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
1562aee09d8SJérôme Glisse 	bool fault, write_fault;
1572aee09d8SJérôme Glisse 	unsigned long i, npages;
1582aee09d8SJérôme Glisse 	uint64_t *pfns;
1592aee09d8SJérôme Glisse 
1602aee09d8SJérôme Glisse 	i = (addr - range->start) >> PAGE_SHIFT;
1612aee09d8SJérôme Glisse 	npages = (end - addr) >> PAGE_SHIFT;
1622aee09d8SJérôme Glisse 	pfns = &range->pfns[i];
1632aee09d8SJérôme Glisse 	hmm_range_need_fault(hmm_vma_walk, pfns, npages,
1642aee09d8SJérôme Glisse 			     0, &fault, &write_fault);
165f8c888a3SChristoph Hellwig 	if (fault || write_fault)
166f8c888a3SChristoph Hellwig 		return hmm_vma_fault(addr, end, fault, write_fault, walk);
167f8c888a3SChristoph Hellwig 	hmm_vma_walk->last = addr;
168f8c888a3SChristoph Hellwig 	return hmm_pfns_fill(addr, end, range, HMM_PFN_NONE);
1692aee09d8SJérôme Glisse }
1702aee09d8SJérôme Glisse 
171f88a1e90SJérôme Glisse static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
1722aee09d8SJérôme Glisse {
1732aee09d8SJérôme Glisse 	if (pmd_protnone(pmd))
1742aee09d8SJérôme Glisse 		return 0;
175f88a1e90SJérôme Glisse 	return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
176f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_WRITE] :
177f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_VALID];
178da4c3c73SJérôme Glisse }
179da4c3c73SJérôme Glisse 
180992de9a8SJérôme Glisse #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1819d3973d6SChristoph Hellwig static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
1829d3973d6SChristoph Hellwig 		unsigned long end, uint64_t *pfns, pmd_t pmd)
1839d3973d6SChristoph Hellwig {
18453f5c3f4SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
185f88a1e90SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
1862aee09d8SJérôme Glisse 	unsigned long pfn, npages, i;
1872aee09d8SJérôme Glisse 	bool fault, write_fault;
188f88a1e90SJérôme Glisse 	uint64_t cpu_flags;
18953f5c3f4SJérôme Glisse 
1902aee09d8SJérôme Glisse 	npages = (end - addr) >> PAGE_SHIFT;
191f88a1e90SJérôme Glisse 	cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
1922aee09d8SJérôme Glisse 	hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
1932aee09d8SJérôme Glisse 			     &fault, &write_fault);
19453f5c3f4SJérôme Glisse 
19524cee8abSJason Gunthorpe 	if (fault || write_fault)
196f8c888a3SChristoph Hellwig 		return hmm_vma_fault(addr, end, fault, write_fault, walk);
19753f5c3f4SJérôme Glisse 
198309f9a4fSChristoph Hellwig 	pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
199992de9a8SJérôme Glisse 	for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
200992de9a8SJérôme Glisse 		if (pmd_devmap(pmd)) {
201992de9a8SJérôme Glisse 			hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
202992de9a8SJérôme Glisse 					      hmm_vma_walk->pgmap);
203992de9a8SJérôme Glisse 			if (unlikely(!hmm_vma_walk->pgmap))
204992de9a8SJérôme Glisse 				return -EBUSY;
205992de9a8SJérôme Glisse 		}
206391aab11SJérôme Glisse 		pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
207992de9a8SJérôme Glisse 	}
208992de9a8SJérôme Glisse 	if (hmm_vma_walk->pgmap) {
209992de9a8SJérôme Glisse 		put_dev_pagemap(hmm_vma_walk->pgmap);
210992de9a8SJérôme Glisse 		hmm_vma_walk->pgmap = NULL;
211992de9a8SJérôme Glisse 	}
21253f5c3f4SJérôme Glisse 	hmm_vma_walk->last = end;
21353f5c3f4SJérôme Glisse 	return 0;
21453f5c3f4SJérôme Glisse }
2159d3973d6SChristoph Hellwig #else /* CONFIG_TRANSPARENT_HUGEPAGE */
2169d3973d6SChristoph Hellwig /* stub to allow the code below to compile */
2179d3973d6SChristoph Hellwig int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
2189d3973d6SChristoph Hellwig 		unsigned long end, uint64_t *pfns, pmd_t pmd);
2199d3973d6SChristoph Hellwig #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
22053f5c3f4SJérôme Glisse 
221f88a1e90SJérôme Glisse static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
2222aee09d8SJérôme Glisse {
223789c2af8SPhilip Yang 	if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
2242aee09d8SJérôme Glisse 		return 0;
225f88a1e90SJérôme Glisse 	return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
226f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_WRITE] :
227f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_VALID];
2282aee09d8SJérôme Glisse }
2292aee09d8SJérôme Glisse 
23053f5c3f4SJérôme Glisse static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
23153f5c3f4SJérôme Glisse 			      unsigned long end, pmd_t *pmdp, pte_t *ptep,
23253f5c3f4SJérôme Glisse 			      uint64_t *pfn)
23353f5c3f4SJérôme Glisse {
23453f5c3f4SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
235f88a1e90SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
2362aee09d8SJérôme Glisse 	bool fault, write_fault;
2372aee09d8SJérôme Glisse 	uint64_t cpu_flags;
23853f5c3f4SJérôme Glisse 	pte_t pte = *ptep;
239f88a1e90SJérôme Glisse 	uint64_t orig_pfn = *pfn;
24053f5c3f4SJérôme Glisse 
241f88a1e90SJérôme Glisse 	*pfn = range->values[HMM_PFN_NONE];
24273231612SJérôme Glisse 	fault = write_fault = false;
24353f5c3f4SJérôme Glisse 
24453f5c3f4SJérôme Glisse 	if (pte_none(pte)) {
24573231612SJérôme Glisse 		hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
24673231612SJérôme Glisse 				   &fault, &write_fault);
2472aee09d8SJérôme Glisse 		if (fault || write_fault)
24853f5c3f4SJérôme Glisse 			goto fault;
24953f5c3f4SJérôme Glisse 		return 0;
25053f5c3f4SJérôme Glisse 	}
25153f5c3f4SJérôme Glisse 
25253f5c3f4SJérôme Glisse 	if (!pte_present(pte)) {
25353f5c3f4SJérôme Glisse 		swp_entry_t entry = pte_to_swp_entry(pte);
25453f5c3f4SJérôme Glisse 
25553f5c3f4SJérôme Glisse 		/*
256*17ffdc48SChristoph Hellwig 		 * Never fault in device private pages pages, but just report
257*17ffdc48SChristoph Hellwig 		 * the PFN even if not present.
25853f5c3f4SJérôme Glisse 		 */
25953f5c3f4SJérôme Glisse 		if (is_device_private_entry(entry)) {
260391aab11SJérôme Glisse 			*pfn = hmm_device_entry_from_pfn(range,
261391aab11SJérôme Glisse 					    swp_offset(entry));
262*17ffdc48SChristoph Hellwig 			*pfn |= range->flags[HMM_PFN_VALID];
263*17ffdc48SChristoph Hellwig 			if (is_write_device_private_entry(entry))
264*17ffdc48SChristoph Hellwig 				*pfn |= range->flags[HMM_PFN_WRITE];
26553f5c3f4SJérôme Glisse 			return 0;
26653f5c3f4SJérôme Glisse 		}
26753f5c3f4SJérôme Glisse 
26876612d6cSJason Gunthorpe 		hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, &fault,
26976612d6cSJason Gunthorpe 				   &write_fault);
27076612d6cSJason Gunthorpe 		if (!fault && !write_fault)
27176612d6cSJason Gunthorpe 			return 0;
27276612d6cSJason Gunthorpe 
27376612d6cSJason Gunthorpe 		if (!non_swap_entry(entry))
27476612d6cSJason Gunthorpe 			goto fault;
27576612d6cSJason Gunthorpe 
27653f5c3f4SJérôme Glisse 		if (is_migration_entry(entry)) {
27753f5c3f4SJérôme Glisse 			pte_unmap(ptep);
27853f5c3f4SJérôme Glisse 			hmm_vma_walk->last = addr;
279d2e8d551SRalph Campbell 			migration_entry_wait(walk->mm, pmdp, addr);
28073231612SJérôme Glisse 			return -EBUSY;
28153f5c3f4SJérôme Glisse 		}
28253f5c3f4SJérôme Glisse 
28353f5c3f4SJérôme Glisse 		/* Report error for everything else */
284dfdc2207SJason Gunthorpe 		pte_unmap(ptep);
285f88a1e90SJérôme Glisse 		*pfn = range->values[HMM_PFN_ERROR];
28653f5c3f4SJérôme Glisse 		return -EFAULT;
28753f5c3f4SJérôme Glisse 	}
28853f5c3f4SJérôme Glisse 
28976612d6cSJason Gunthorpe 	cpu_flags = pte_to_hmm_pfn_flags(range, pte);
29076612d6cSJason Gunthorpe 	hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, &fault,
29176612d6cSJason Gunthorpe 			   &write_fault);
2922aee09d8SJérôme Glisse 	if (fault || write_fault)
29353f5c3f4SJérôme Glisse 		goto fault;
29453f5c3f4SJérôme Glisse 
295992de9a8SJérôme Glisse 	if (pte_devmap(pte)) {
296992de9a8SJérôme Glisse 		hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
297992de9a8SJérôme Glisse 					      hmm_vma_walk->pgmap);
298dfdc2207SJason Gunthorpe 		if (unlikely(!hmm_vma_walk->pgmap)) {
299dfdc2207SJason Gunthorpe 			pte_unmap(ptep);
300992de9a8SJérôme Glisse 			return -EBUSY;
301dfdc2207SJason Gunthorpe 		}
30240550627SJason Gunthorpe 	}
30340550627SJason Gunthorpe 
30440550627SJason Gunthorpe 	/*
30540550627SJason Gunthorpe 	 * Since each architecture defines a struct page for the zero page, just
30640550627SJason Gunthorpe 	 * fall through and treat it like a normal page.
30740550627SJason Gunthorpe 	 */
30840550627SJason Gunthorpe 	if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
30940550627SJason Gunthorpe 		hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, &fault,
31040550627SJason Gunthorpe 				   &write_fault);
31140550627SJason Gunthorpe 		if (fault || write_fault) {
312dfdc2207SJason Gunthorpe 			pte_unmap(ptep);
313992de9a8SJérôme Glisse 			return -EFAULT;
314992de9a8SJérôme Glisse 		}
31540550627SJason Gunthorpe 		*pfn = range->values[HMM_PFN_SPECIAL];
31640550627SJason Gunthorpe 		return 0;
317ac541f25SRalph Campbell 	}
318992de9a8SJérôme Glisse 
319391aab11SJérôme Glisse 	*pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
32053f5c3f4SJérôme Glisse 	return 0;
32153f5c3f4SJérôme Glisse 
32253f5c3f4SJérôme Glisse fault:
323992de9a8SJérôme Glisse 	if (hmm_vma_walk->pgmap) {
324992de9a8SJérôme Glisse 		put_dev_pagemap(hmm_vma_walk->pgmap);
325992de9a8SJérôme Glisse 		hmm_vma_walk->pgmap = NULL;
326992de9a8SJérôme Glisse 	}
32753f5c3f4SJérôme Glisse 	pte_unmap(ptep);
32853f5c3f4SJérôme Glisse 	/* Fault any virtual address we were asked to fault */
329f8c888a3SChristoph Hellwig 	return hmm_vma_fault(addr, end, fault, write_fault, walk);
33053f5c3f4SJérôme Glisse }
33153f5c3f4SJérôme Glisse 
332da4c3c73SJérôme Glisse static int hmm_vma_walk_pmd(pmd_t *pmdp,
333da4c3c73SJérôme Glisse 			    unsigned long start,
334da4c3c73SJérôme Glisse 			    unsigned long end,
335da4c3c73SJérôme Glisse 			    struct mm_walk *walk)
336da4c3c73SJérôme Glisse {
33774eee180SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
33874eee180SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
3392288a9a6SJason Gunthorpe 	uint64_t *pfns = &range->pfns[(start - range->start) >> PAGE_SHIFT];
3402288a9a6SJason Gunthorpe 	unsigned long npages = (end - start) >> PAGE_SHIFT;
3412288a9a6SJason Gunthorpe 	unsigned long addr = start;
3422288a9a6SJason Gunthorpe 	bool fault, write_fault;
343da4c3c73SJérôme Glisse 	pte_t *ptep;
344da4c3c73SJérôme Glisse 	pmd_t pmd;
345da4c3c73SJérôme Glisse 
346d08faca0SJérôme Glisse again:
347d08faca0SJérôme Glisse 	pmd = READ_ONCE(*pmdp);
348d08faca0SJérôme Glisse 	if (pmd_none(pmd))
349b7a16c7aSSteven Price 		return hmm_vma_walk_hole(start, end, -1, walk);
350d08faca0SJérôme Glisse 
351d08faca0SJérôme Glisse 	if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
352d08faca0SJérôme Glisse 		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
353d08faca0SJérôme Glisse 				     0, &fault, &write_fault);
354d08faca0SJérôme Glisse 		if (fault || write_fault) {
355d08faca0SJérôme Glisse 			hmm_vma_walk->last = addr;
356d2e8d551SRalph Campbell 			pmd_migration_entry_wait(walk->mm, pmdp);
35773231612SJérôme Glisse 			return -EBUSY;
358d08faca0SJérôme Glisse 		}
3597d082987SJason Gunthorpe 		return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
3602288a9a6SJason Gunthorpe 	}
3612288a9a6SJason Gunthorpe 
3622288a9a6SJason Gunthorpe 	if (!pmd_present(pmd)) {
3632288a9a6SJason Gunthorpe 		hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
3642288a9a6SJason Gunthorpe 				     &write_fault);
3652288a9a6SJason Gunthorpe 		if (fault || write_fault)
3662288a9a6SJason Gunthorpe 			return -EFAULT;
367d28c2c9aSRalph Campbell 		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
3682288a9a6SJason Gunthorpe 	}
369d08faca0SJérôme Glisse 
370d08faca0SJérôme Glisse 	if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
371da4c3c73SJérôme Glisse 		/*
372d2e8d551SRalph Campbell 		 * No need to take pmd_lock here, even if some other thread
373da4c3c73SJérôme Glisse 		 * is splitting the huge pmd we will get that event through
374da4c3c73SJérôme Glisse 		 * mmu_notifier callback.
375da4c3c73SJérôme Glisse 		 *
376d2e8d551SRalph Campbell 		 * So just read pmd value and check again it's a transparent
377da4c3c73SJérôme Glisse 		 * huge or device mapping one and compute corresponding pfn
378da4c3c73SJérôme Glisse 		 * values.
379da4c3c73SJérôme Glisse 		 */
380da4c3c73SJérôme Glisse 		pmd = pmd_read_atomic(pmdp);
381da4c3c73SJérôme Glisse 		barrier();
382da4c3c73SJérôme Glisse 		if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
383da4c3c73SJérôme Glisse 			goto again;
384da4c3c73SJérôme Glisse 
3852288a9a6SJason Gunthorpe 		return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd);
386da4c3c73SJérôme Glisse 	}
387da4c3c73SJérôme Glisse 
388d08faca0SJérôme Glisse 	/*
389d2e8d551SRalph Campbell 	 * We have handled all the valid cases above ie either none, migration,
390d08faca0SJérôme Glisse 	 * huge or transparent huge. At this point either it is a valid pmd
391d08faca0SJérôme Glisse 	 * entry pointing to pte directory or it is a bad pmd that will not
392d08faca0SJérôme Glisse 	 * recover.
393d08faca0SJérôme Glisse 	 */
3942288a9a6SJason Gunthorpe 	if (pmd_bad(pmd)) {
3952288a9a6SJason Gunthorpe 		hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
3962288a9a6SJason Gunthorpe 				     &write_fault);
3972288a9a6SJason Gunthorpe 		if (fault || write_fault)
3982288a9a6SJason Gunthorpe 			return -EFAULT;
399d28c2c9aSRalph Campbell 		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
4002288a9a6SJason Gunthorpe 	}
401da4c3c73SJérôme Glisse 
402da4c3c73SJérôme Glisse 	ptep = pte_offset_map(pmdp, addr);
4032288a9a6SJason Gunthorpe 	for (; addr < end; addr += PAGE_SIZE, ptep++, pfns++) {
40453f5c3f4SJérôme Glisse 		int r;
405da4c3c73SJérôme Glisse 
4062288a9a6SJason Gunthorpe 		r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns);
40753f5c3f4SJérôme Glisse 		if (r) {
408dfdc2207SJason Gunthorpe 			/* hmm_vma_handle_pte() did pte_unmap() */
40974eee180SJérôme Glisse 			hmm_vma_walk->last = addr;
41053f5c3f4SJérôme Glisse 			return r;
41174eee180SJérôme Glisse 		}
412da4c3c73SJérôme Glisse 	}
413992de9a8SJérôme Glisse 	if (hmm_vma_walk->pgmap) {
414992de9a8SJérôme Glisse 		/*
415992de9a8SJérôme Glisse 		 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
416992de9a8SJérôme Glisse 		 * so that we can leverage get_dev_pagemap() optimization which
417992de9a8SJérôme Glisse 		 * will not re-take a reference on a pgmap if we already have
418992de9a8SJérôme Glisse 		 * one.
419992de9a8SJérôme Glisse 		 */
420992de9a8SJérôme Glisse 		put_dev_pagemap(hmm_vma_walk->pgmap);
421992de9a8SJérôme Glisse 		hmm_vma_walk->pgmap = NULL;
422992de9a8SJérôme Glisse 	}
423da4c3c73SJérôme Glisse 	pte_unmap(ptep - 1);
424da4c3c73SJérôme Glisse 
42553f5c3f4SJérôme Glisse 	hmm_vma_walk->last = addr;
426da4c3c73SJérôme Glisse 	return 0;
427da4c3c73SJérôme Glisse }
428da4c3c73SJérôme Glisse 
429f0b3c45cSChristoph Hellwig #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
430f0b3c45cSChristoph Hellwig     defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
431f0b3c45cSChristoph Hellwig static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
432f0b3c45cSChristoph Hellwig {
433f0b3c45cSChristoph Hellwig 	if (!pud_present(pud))
434f0b3c45cSChristoph Hellwig 		return 0;
435f0b3c45cSChristoph Hellwig 	return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
436f0b3c45cSChristoph Hellwig 				range->flags[HMM_PFN_WRITE] :
437f0b3c45cSChristoph Hellwig 				range->flags[HMM_PFN_VALID];
438f0b3c45cSChristoph Hellwig }
439f0b3c45cSChristoph Hellwig 
440f0b3c45cSChristoph Hellwig static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
441992de9a8SJérôme Glisse 		struct mm_walk *walk)
442992de9a8SJérôme Glisse {
443992de9a8SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
444992de9a8SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
4453afc4236SSteven Price 	unsigned long addr = start;
446992de9a8SJérôme Glisse 	pud_t pud;
4473afc4236SSteven Price 	int ret = 0;
4483afc4236SSteven Price 	spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
449992de9a8SJérôme Glisse 
4503afc4236SSteven Price 	if (!ptl)
4513afc4236SSteven Price 		return 0;
4523afc4236SSteven Price 
4533afc4236SSteven Price 	/* Normally we don't want to split the huge page */
4543afc4236SSteven Price 	walk->action = ACTION_CONTINUE;
4553afc4236SSteven Price 
456992de9a8SJérôme Glisse 	pud = READ_ONCE(*pudp);
4573afc4236SSteven Price 	if (pud_none(pud)) {
45805fc1df9SJason Gunthorpe 		spin_unlock(ptl);
45905fc1df9SJason Gunthorpe 		return hmm_vma_walk_hole(start, end, -1, walk);
4603afc4236SSteven Price 	}
461992de9a8SJérôme Glisse 
462992de9a8SJérôme Glisse 	if (pud_huge(pud) && pud_devmap(pud)) {
463992de9a8SJérôme Glisse 		unsigned long i, npages, pfn;
464992de9a8SJérôme Glisse 		uint64_t *pfns, cpu_flags;
465992de9a8SJérôme Glisse 		bool fault, write_fault;
466992de9a8SJérôme Glisse 
4673afc4236SSteven Price 		if (!pud_present(pud)) {
46805fc1df9SJason Gunthorpe 			spin_unlock(ptl);
46905fc1df9SJason Gunthorpe 			return hmm_vma_walk_hole(start, end, -1, walk);
4703afc4236SSteven Price 		}
471992de9a8SJérôme Glisse 
472992de9a8SJérôme Glisse 		i = (addr - range->start) >> PAGE_SHIFT;
473992de9a8SJérôme Glisse 		npages = (end - addr) >> PAGE_SHIFT;
474992de9a8SJérôme Glisse 		pfns = &range->pfns[i];
475992de9a8SJérôme Glisse 
476992de9a8SJérôme Glisse 		cpu_flags = pud_to_hmm_pfn_flags(range, pud);
477992de9a8SJérôme Glisse 		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
478992de9a8SJérôme Glisse 				     cpu_flags, &fault, &write_fault);
4793afc4236SSteven Price 		if (fault || write_fault) {
48005fc1df9SJason Gunthorpe 			spin_unlock(ptl);
481f8c888a3SChristoph Hellwig 			return hmm_vma_fault(addr, end, fault, write_fault,
48205fc1df9SJason Gunthorpe 						  walk);
4833afc4236SSteven Price 		}
484992de9a8SJérôme Glisse 
485992de9a8SJérôme Glisse 		pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
486992de9a8SJérôme Glisse 		for (i = 0; i < npages; ++i, ++pfn) {
487992de9a8SJérôme Glisse 			hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
488992de9a8SJérôme Glisse 					      hmm_vma_walk->pgmap);
4893afc4236SSteven Price 			if (unlikely(!hmm_vma_walk->pgmap)) {
4903afc4236SSteven Price 				ret = -EBUSY;
4913afc4236SSteven Price 				goto out_unlock;
4923afc4236SSteven Price 			}
493391aab11SJérôme Glisse 			pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
494391aab11SJérôme Glisse 				  cpu_flags;
495992de9a8SJérôme Glisse 		}
496992de9a8SJérôme Glisse 		if (hmm_vma_walk->pgmap) {
497992de9a8SJérôme Glisse 			put_dev_pagemap(hmm_vma_walk->pgmap);
498992de9a8SJérôme Glisse 			hmm_vma_walk->pgmap = NULL;
499992de9a8SJérôme Glisse 		}
500992de9a8SJérôme Glisse 		hmm_vma_walk->last = end;
5013afc4236SSteven Price 		goto out_unlock;
502992de9a8SJérôme Glisse 	}
503992de9a8SJérôme Glisse 
5043afc4236SSteven Price 	/* Ask for the PUD to be split */
5053afc4236SSteven Price 	walk->action = ACTION_SUBTREE;
506992de9a8SJérôme Glisse 
5073afc4236SSteven Price out_unlock:
5083afc4236SSteven Price 	spin_unlock(ptl);
509992de9a8SJérôme Glisse 	return ret;
510992de9a8SJérôme Glisse }
511f0b3c45cSChristoph Hellwig #else
512f0b3c45cSChristoph Hellwig #define hmm_vma_walk_pud	NULL
513f0b3c45cSChristoph Hellwig #endif
514992de9a8SJérôme Glisse 
515251bbe59SChristoph Hellwig #ifdef CONFIG_HUGETLB_PAGE
51663d5066fSJérôme Glisse static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
51763d5066fSJérôme Glisse 				      unsigned long start, unsigned long end,
51863d5066fSJérôme Glisse 				      struct mm_walk *walk)
51963d5066fSJérôme Glisse {
52005c23af4SChristoph Hellwig 	unsigned long addr = start, i, pfn;
52163d5066fSJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
52263d5066fSJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
52363d5066fSJérôme Glisse 	struct vm_area_struct *vma = walk->vma;
52463d5066fSJérôme Glisse 	uint64_t orig_pfn, cpu_flags;
52563d5066fSJérôme Glisse 	bool fault, write_fault;
52663d5066fSJérôme Glisse 	spinlock_t *ptl;
52763d5066fSJérôme Glisse 	pte_t entry;
52863d5066fSJérôme Glisse 
529d2e8d551SRalph Campbell 	ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
53063d5066fSJérôme Glisse 	entry = huge_ptep_get(pte);
53163d5066fSJérôme Glisse 
5327f08263dSChristoph Hellwig 	i = (start - range->start) >> PAGE_SHIFT;
53363d5066fSJérôme Glisse 	orig_pfn = range->pfns[i];
53463d5066fSJérôme Glisse 	range->pfns[i] = range->values[HMM_PFN_NONE];
53563d5066fSJérôme Glisse 	cpu_flags = pte_to_hmm_pfn_flags(range, entry);
53663d5066fSJérôme Glisse 	fault = write_fault = false;
53763d5066fSJérôme Glisse 	hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
53863d5066fSJérôme Glisse 			   &fault, &write_fault);
53963d5066fSJérôme Glisse 	if (fault || write_fault) {
54045050692SChristoph Hellwig 		spin_unlock(ptl);
541f8c888a3SChristoph Hellwig 		return hmm_vma_fault(addr, end, fault, write_fault, walk);
54263d5066fSJérôme Glisse 	}
54363d5066fSJérôme Glisse 
54405c23af4SChristoph Hellwig 	pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
5457f08263dSChristoph Hellwig 	for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
546391aab11SJérôme Glisse 		range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
547391aab11SJérôme Glisse 				 cpu_flags;
54863d5066fSJérôme Glisse 	hmm_vma_walk->last = end;
54963d5066fSJérôme Glisse 	spin_unlock(ptl);
55045050692SChristoph Hellwig 	return 0;
55163d5066fSJérôme Glisse }
552251bbe59SChristoph Hellwig #else
553251bbe59SChristoph Hellwig #define hmm_vma_walk_hugetlb_entry NULL
554251bbe59SChristoph Hellwig #endif /* CONFIG_HUGETLB_PAGE */
55563d5066fSJérôme Glisse 
556d28c2c9aSRalph Campbell static int hmm_vma_walk_test(unsigned long start, unsigned long end,
557d28c2c9aSRalph Campbell 			     struct mm_walk *walk)
55833cd47dcSJérôme Glisse {
559d28c2c9aSRalph Campbell 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
560d28c2c9aSRalph Campbell 	struct hmm_range *range = hmm_vma_walk->range;
561d28c2c9aSRalph Campbell 	struct vm_area_struct *vma = walk->vma;
562d28c2c9aSRalph Campbell 
563d28c2c9aSRalph Campbell 	/*
564c2579c9cSJason Gunthorpe 	 * Skip vma ranges that don't have struct page backing them or map I/O
565c2579c9cSJason Gunthorpe 	 * devices directly.
566c2579c9cSJason Gunthorpe 	 *
567d28c2c9aSRalph Campbell 	 * If the vma does not allow read access, then assume that it does not
568c2579c9cSJason Gunthorpe 	 * allow write access either. HMM does not support architectures that
569c2579c9cSJason Gunthorpe 	 * allow write without read.
570d28c2c9aSRalph Campbell 	 */
571c2579c9cSJason Gunthorpe 	if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) ||
572c2579c9cSJason Gunthorpe 	    !(vma->vm_flags & VM_READ)) {
573d28c2c9aSRalph Campbell 		bool fault, write_fault;
574d28c2c9aSRalph Campbell 
575d28c2c9aSRalph Campbell 		/*
576d28c2c9aSRalph Campbell 		 * Check to see if a fault is requested for any page in the
577d28c2c9aSRalph Campbell 		 * range.
578d28c2c9aSRalph Campbell 		 */
579d28c2c9aSRalph Campbell 		hmm_range_need_fault(hmm_vma_walk, range->pfns +
580d28c2c9aSRalph Campbell 					((start - range->start) >> PAGE_SHIFT),
581d28c2c9aSRalph Campbell 					(end - start) >> PAGE_SHIFT,
582d28c2c9aSRalph Campbell 					0, &fault, &write_fault);
583d28c2c9aSRalph Campbell 		if (fault || write_fault)
584d28c2c9aSRalph Campbell 			return -EFAULT;
585d28c2c9aSRalph Campbell 
586c2579c9cSJason Gunthorpe 		hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
587d28c2c9aSRalph Campbell 		hmm_vma_walk->last = end;
588d28c2c9aSRalph Campbell 
589d28c2c9aSRalph Campbell 		/* Skip this vma and continue processing the next vma. */
590d28c2c9aSRalph Campbell 		return 1;
591d28c2c9aSRalph Campbell 	}
592d28c2c9aSRalph Campbell 
593d28c2c9aSRalph Campbell 	return 0;
59433cd47dcSJérôme Glisse }
59533cd47dcSJérôme Glisse 
5967b86ac33SChristoph Hellwig static const struct mm_walk_ops hmm_walk_ops = {
5977b86ac33SChristoph Hellwig 	.pud_entry	= hmm_vma_walk_pud,
5987b86ac33SChristoph Hellwig 	.pmd_entry	= hmm_vma_walk_pmd,
5997b86ac33SChristoph Hellwig 	.pte_hole	= hmm_vma_walk_hole,
6007b86ac33SChristoph Hellwig 	.hugetlb_entry	= hmm_vma_walk_hugetlb_entry,
601d28c2c9aSRalph Campbell 	.test_walk	= hmm_vma_walk_test,
6027b86ac33SChristoph Hellwig };
6037b86ac33SChristoph Hellwig 
6049a4903e4SChristoph Hellwig /**
6059a4903e4SChristoph Hellwig  * hmm_range_fault - try to fault some address in a virtual address range
60608232a45SJérôme Glisse  * @range:	range being faulted
6079a4903e4SChristoph Hellwig  * @flags:	HMM_FAULT_* flags
60873231612SJérôme Glisse  *
6099a4903e4SChristoph Hellwig  * Return: the number of valid pages in range->pfns[] (from range start
6109a4903e4SChristoph Hellwig  * address), which may be zero.  On error one of the following status codes
6119a4903e4SChristoph Hellwig  * can be returned:
6129a4903e4SChristoph Hellwig  *
6139a4903e4SChristoph Hellwig  * -EINVAL:	Invalid arguments or mm or virtual address is in an invalid vma
6149a4903e4SChristoph Hellwig  *		(e.g., device file vma).
61573231612SJérôme Glisse  * -ENOMEM:	Out of memory.
6169a4903e4SChristoph Hellwig  * -EPERM:	Invalid permission (e.g., asking for write and range is read
6179a4903e4SChristoph Hellwig  *		only).
6189a4903e4SChristoph Hellwig  * -EBUSY:	The range has been invalidated and the caller needs to wait for
6199a4903e4SChristoph Hellwig  *		the invalidation to finish.
6209a4903e4SChristoph Hellwig  * -EFAULT:	Invalid (i.e., either no valid vma or it is illegal to access
6219a4903e4SChristoph Hellwig  *		that range) number of valid pages in range->pfns[] (from
62273231612SJérôme Glisse  *              range start address).
62374eee180SJérôme Glisse  *
62474eee180SJérôme Glisse  * This is similar to a regular CPU page fault except that it will not trigger
62573231612SJérôme Glisse  * any memory migration if the memory being faulted is not accessible by CPUs
62673231612SJérôme Glisse  * and caller does not ask for migration.
62774eee180SJérôme Glisse  *
628ff05c0c6SJérôme Glisse  * On error, for one virtual address in the range, the function will mark the
629ff05c0c6SJérôme Glisse  * corresponding HMM pfn entry with an error flag.
63074eee180SJérôme Glisse  */
6319a4903e4SChristoph Hellwig long hmm_range_fault(struct hmm_range *range, unsigned int flags)
63274eee180SJérôme Glisse {
633d28c2c9aSRalph Campbell 	struct hmm_vma_walk hmm_vma_walk = {
634d28c2c9aSRalph Campbell 		.range = range,
635d28c2c9aSRalph Campbell 		.last = range->start,
636d28c2c9aSRalph Campbell 		.flags = flags,
637d28c2c9aSRalph Campbell 	};
638a22dd506SJason Gunthorpe 	struct mm_struct *mm = range->notifier->mm;
63974eee180SJérôme Glisse 	int ret;
64074eee180SJérôme Glisse 
64104ec32fbSJason Gunthorpe 	lockdep_assert_held(&mm->mmap_sem);
642a3e0d41cSJérôme Glisse 
643a3e0d41cSJérôme Glisse 	do {
644a3e0d41cSJérôme Glisse 		/* If range is no longer valid force retry. */
645a22dd506SJason Gunthorpe 		if (mmu_interval_check_retry(range->notifier,
646a22dd506SJason Gunthorpe 					     range->notifier_seq))
6472bcbeaefSChristoph Hellwig 			return -EBUSY;
648d28c2c9aSRalph Campbell 		ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
6497b86ac33SChristoph Hellwig 				      &hmm_walk_ops, &hmm_vma_walk);
650d28c2c9aSRalph Campbell 	} while (ret == -EBUSY);
651a3e0d41cSJérôme Glisse 
652d28c2c9aSRalph Campbell 	if (ret)
65373231612SJérôme Glisse 		return ret;
65473231612SJérôme Glisse 	return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
65574eee180SJérôme Glisse }
65673231612SJérôme Glisse EXPORT_SYMBOL(hmm_range_fault);
657