xref: /linux/mm/hmm.c (revision f8c888a304e12074d941428b4aa1b13f04dd54ee)
1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2133ff0eaSJérôme Glisse /*
3133ff0eaSJérôme Glisse  * Copyright 2013 Red Hat Inc.
4133ff0eaSJérôme Glisse  *
5f813f219SJérôme Glisse  * Authors: Jérôme Glisse <jglisse@redhat.com>
6133ff0eaSJérôme Glisse  */
7133ff0eaSJérôme Glisse /*
8133ff0eaSJérôme Glisse  * Refer to include/linux/hmm.h for information about heterogeneous memory
9133ff0eaSJérôme Glisse  * management or HMM for short.
10133ff0eaSJérôme Glisse  */
11a520110eSChristoph Hellwig #include <linux/pagewalk.h>
12133ff0eaSJérôme Glisse #include <linux/hmm.h>
13858b54daSJérôme Glisse #include <linux/init.h>
14da4c3c73SJérôme Glisse #include <linux/rmap.h>
15da4c3c73SJérôme Glisse #include <linux/swap.h>
16133ff0eaSJérôme Glisse #include <linux/slab.h>
17133ff0eaSJérôme Glisse #include <linux/sched.h>
184ef589dcSJérôme Glisse #include <linux/mmzone.h>
194ef589dcSJérôme Glisse #include <linux/pagemap.h>
20da4c3c73SJérôme Glisse #include <linux/swapops.h>
21da4c3c73SJérôme Glisse #include <linux/hugetlb.h>
224ef589dcSJérôme Glisse #include <linux/memremap.h>
23c8a53b2dSJason Gunthorpe #include <linux/sched/mm.h>
247b2d55d2SJérôme Glisse #include <linux/jump_label.h>
2555c0ece8SJérôme Glisse #include <linux/dma-mapping.h>
26c0b12405SJérôme Glisse #include <linux/mmu_notifier.h>
274ef589dcSJérôme Glisse #include <linux/memory_hotplug.h>
284ef589dcSJérôme Glisse 
2974eee180SJérôme Glisse struct hmm_vma_walk {
3074eee180SJérôme Glisse 	struct hmm_range	*range;
31992de9a8SJérôme Glisse 	struct dev_pagemap	*pgmap;
3274eee180SJérôme Glisse 	unsigned long		last;
339a4903e4SChristoph Hellwig 	unsigned int		flags;
3474eee180SJérôme Glisse };
3574eee180SJérôme Glisse 
362aee09d8SJérôme Glisse static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
372aee09d8SJérôme Glisse 			    bool write_fault, uint64_t *pfn)
3874eee180SJérôme Glisse {
399b1ae605SKuehling, Felix 	unsigned int flags = FAULT_FLAG_REMOTE;
4074eee180SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
41f88a1e90SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
4274eee180SJérôme Glisse 	struct vm_area_struct *vma = walk->vma;
4350a7ca3cSSouptick Joarder 	vm_fault_t ret;
4474eee180SJérôme Glisse 
456c64f2bbSRalph Campbell 	if (!vma)
466c64f2bbSRalph Campbell 		goto err;
476c64f2bbSRalph Campbell 
489a4903e4SChristoph Hellwig 	if (write_fault)
499a4903e4SChristoph Hellwig 		flags |= FAULT_FLAG_WRITE;
509a4903e4SChristoph Hellwig 
5150a7ca3cSSouptick Joarder 	ret = handle_mm_fault(vma, addr, flags);
526c64f2bbSRalph Campbell 	if (ret & VM_FAULT_ERROR)
536c64f2bbSRalph Campbell 		goto err;
5474eee180SJérôme Glisse 
5573231612SJérôme Glisse 	return -EBUSY;
566c64f2bbSRalph Campbell 
576c64f2bbSRalph Campbell err:
586c64f2bbSRalph Campbell 	*pfn = range->values[HMM_PFN_ERROR];
596c64f2bbSRalph Campbell 	return -EFAULT;
6074eee180SJérôme Glisse }
6174eee180SJérôme Glisse 
62d28c2c9aSRalph Campbell static int hmm_pfns_fill(unsigned long addr, unsigned long end,
63d28c2c9aSRalph Campbell 		struct hmm_range *range, enum hmm_pfn_value_e value)
64da4c3c73SJérôme Glisse {
65ff05c0c6SJérôme Glisse 	uint64_t *pfns = range->pfns;
66da4c3c73SJérôme Glisse 	unsigned long i;
67da4c3c73SJérôme Glisse 
68da4c3c73SJérôme Glisse 	i = (addr - range->start) >> PAGE_SHIFT;
69da4c3c73SJérôme Glisse 	for (; addr < end; addr += PAGE_SIZE, i++)
70d28c2c9aSRalph Campbell 		pfns[i] = range->values[value];
71da4c3c73SJérôme Glisse 
72da4c3c73SJérôme Glisse 	return 0;
73da4c3c73SJérôme Glisse }
74da4c3c73SJérôme Glisse 
755504ed29SJérôme Glisse /*
76*f8c888a3SChristoph Hellwig  * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
77d2e8d551SRalph Campbell  * @addr: range virtual start address (inclusive)
785504ed29SJérôme Glisse  * @end: range virtual end address (exclusive)
792aee09d8SJérôme Glisse  * @fault: should we fault or not ?
802aee09d8SJérôme Glisse  * @write_fault: write fault ?
815504ed29SJérôme Glisse  * @walk: mm_walk structure
82*f8c888a3SChristoph Hellwig  * Return: -EBUSY after page fault, or page fault error
835504ed29SJérôme Glisse  *
845504ed29SJérôme Glisse  * This function will be called whenever pmd_none() or pte_none() returns true,
855504ed29SJérôme Glisse  * or whenever there is no page directory covering the virtual address range.
865504ed29SJérôme Glisse  */
87*f8c888a3SChristoph Hellwig static int hmm_vma_fault(unsigned long addr, unsigned long end,
882aee09d8SJérôme Glisse 			      bool fault, bool write_fault,
89da4c3c73SJérôme Glisse 			      struct mm_walk *walk)
90da4c3c73SJérôme Glisse {
9174eee180SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
9274eee180SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
93ff05c0c6SJérôme Glisse 	uint64_t *pfns = range->pfns;
94*f8c888a3SChristoph Hellwig 	unsigned long i = (addr - range->start) >> PAGE_SHIFT;
95da4c3c73SJérôme Glisse 
96*f8c888a3SChristoph Hellwig 	WARN_ON_ONCE(!fault && !write_fault);
9774eee180SJérôme Glisse 	hmm_vma_walk->last = addr;
9863d5066fSJérôme Glisse 
99c18ce674SRalph Campbell 	if (write_fault && walk->vma && !(walk->vma->vm_flags & VM_WRITE))
100c18ce674SRalph Campbell 		return -EPERM;
101c18ce674SRalph Campbell 
1027f08263dSChristoph Hellwig 	for (; addr < end; addr += PAGE_SIZE, i++) {
10374eee180SJérôme Glisse 		int ret;
104da4c3c73SJérôme Glisse 
105*f8c888a3SChristoph Hellwig 		ret = hmm_vma_do_fault(walk, addr, write_fault, &pfns[i]);
10673231612SJérôme Glisse 		if (ret != -EBUSY)
10774eee180SJérôme Glisse 			return ret;
10874eee180SJérôme Glisse 	}
10974eee180SJérôme Glisse 
110*f8c888a3SChristoph Hellwig 	return -EBUSY;
1112aee09d8SJérôme Glisse }
1122aee09d8SJérôme Glisse 
1132aee09d8SJérôme Glisse static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
1142aee09d8SJérôme Glisse 				      uint64_t pfns, uint64_t cpu_flags,
1152aee09d8SJérôme Glisse 				      bool *fault, bool *write_fault)
1162aee09d8SJérôme Glisse {
117f88a1e90SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
118f88a1e90SJérôme Glisse 
119d45d464bSChristoph Hellwig 	if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
1202aee09d8SJérôme Glisse 		return;
1212aee09d8SJérôme Glisse 
122023a019aSJérôme Glisse 	/*
123023a019aSJérôme Glisse 	 * So we not only consider the individual per page request we also
124023a019aSJérôme Glisse 	 * consider the default flags requested for the range. The API can
125d2e8d551SRalph Campbell 	 * be used 2 ways. The first one where the HMM user coalesces
126d2e8d551SRalph Campbell 	 * multiple page faults into one request and sets flags per pfn for
127d2e8d551SRalph Campbell 	 * those faults. The second one where the HMM user wants to pre-
128023a019aSJérôme Glisse 	 * fault a range with specific flags. For the latter one it is a
129023a019aSJérôme Glisse 	 * waste to have the user pre-fill the pfn arrays with a default
130023a019aSJérôme Glisse 	 * flags value.
131023a019aSJérôme Glisse 	 */
132023a019aSJérôme Glisse 	pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
133023a019aSJérôme Glisse 
1342aee09d8SJérôme Glisse 	/* We aren't ask to do anything ... */
135f88a1e90SJérôme Glisse 	if (!(pfns & range->flags[HMM_PFN_VALID]))
1362aee09d8SJérôme Glisse 		return;
137d2e8d551SRalph Campbell 	/* If this is device memory then only fault if explicitly requested */
138f88a1e90SJérôme Glisse 	if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
139f88a1e90SJérôme Glisse 		/* Do we fault on device memory ? */
140f88a1e90SJérôme Glisse 		if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
141f88a1e90SJérôme Glisse 			*write_fault = pfns & range->flags[HMM_PFN_WRITE];
142f88a1e90SJérôme Glisse 			*fault = true;
143f88a1e90SJérôme Glisse 		}
1442aee09d8SJérôme Glisse 		return;
1452aee09d8SJérôme Glisse 	}
146f88a1e90SJérôme Glisse 
147f88a1e90SJérôme Glisse 	/* If CPU page table is not valid then we need to fault */
148f88a1e90SJérôme Glisse 	*fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
149f88a1e90SJérôme Glisse 	/* Need to write fault ? */
150f88a1e90SJérôme Glisse 	if ((pfns & range->flags[HMM_PFN_WRITE]) &&
151f88a1e90SJérôme Glisse 	    !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
152f88a1e90SJérôme Glisse 		*write_fault = true;
1532aee09d8SJérôme Glisse 		*fault = true;
1542aee09d8SJérôme Glisse 	}
1552aee09d8SJérôme Glisse }
1562aee09d8SJérôme Glisse 
1572aee09d8SJérôme Glisse static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
1582aee09d8SJérôme Glisse 				 const uint64_t *pfns, unsigned long npages,
1592aee09d8SJérôme Glisse 				 uint64_t cpu_flags, bool *fault,
1602aee09d8SJérôme Glisse 				 bool *write_fault)
1612aee09d8SJérôme Glisse {
1622aee09d8SJérôme Glisse 	unsigned long i;
1632aee09d8SJérôme Glisse 
164d45d464bSChristoph Hellwig 	if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) {
1652aee09d8SJérôme Glisse 		*fault = *write_fault = false;
1662aee09d8SJérôme Glisse 		return;
1672aee09d8SJérôme Glisse 	}
1682aee09d8SJérôme Glisse 
169a3e0d41cSJérôme Glisse 	*fault = *write_fault = false;
1702aee09d8SJérôme Glisse 	for (i = 0; i < npages; ++i) {
1712aee09d8SJérôme Glisse 		hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
1722aee09d8SJérôme Glisse 				   fault, write_fault);
173a3e0d41cSJérôme Glisse 		if ((*write_fault))
1742aee09d8SJérôme Glisse 			return;
1752aee09d8SJérôme Glisse 	}
1762aee09d8SJérôme Glisse }
1772aee09d8SJérôme Glisse 
1782aee09d8SJérôme Glisse static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
179b7a16c7aSSteven Price 			     __always_unused int depth, struct mm_walk *walk)
1802aee09d8SJérôme Glisse {
1812aee09d8SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
1822aee09d8SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
1832aee09d8SJérôme Glisse 	bool fault, write_fault;
1842aee09d8SJérôme Glisse 	unsigned long i, npages;
1852aee09d8SJérôme Glisse 	uint64_t *pfns;
1862aee09d8SJérôme Glisse 
1872aee09d8SJérôme Glisse 	i = (addr - range->start) >> PAGE_SHIFT;
1882aee09d8SJérôme Glisse 	npages = (end - addr) >> PAGE_SHIFT;
1892aee09d8SJérôme Glisse 	pfns = &range->pfns[i];
1902aee09d8SJérôme Glisse 	hmm_range_need_fault(hmm_vma_walk, pfns, npages,
1912aee09d8SJérôme Glisse 			     0, &fault, &write_fault);
192*f8c888a3SChristoph Hellwig 	if (fault || write_fault)
193*f8c888a3SChristoph Hellwig 		return hmm_vma_fault(addr, end, fault, write_fault, walk);
194*f8c888a3SChristoph Hellwig 	hmm_vma_walk->last = addr;
195*f8c888a3SChristoph Hellwig 	return hmm_pfns_fill(addr, end, range, HMM_PFN_NONE);
1962aee09d8SJérôme Glisse }
1972aee09d8SJérôme Glisse 
198f88a1e90SJérôme Glisse static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
1992aee09d8SJérôme Glisse {
2002aee09d8SJérôme Glisse 	if (pmd_protnone(pmd))
2012aee09d8SJérôme Glisse 		return 0;
202f88a1e90SJérôme Glisse 	return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
203f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_WRITE] :
204f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_VALID];
205da4c3c73SJérôme Glisse }
206da4c3c73SJérôme Glisse 
207992de9a8SJérôme Glisse #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2089d3973d6SChristoph Hellwig static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
2099d3973d6SChristoph Hellwig 		unsigned long end, uint64_t *pfns, pmd_t pmd)
2109d3973d6SChristoph Hellwig {
21153f5c3f4SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
212f88a1e90SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
2132aee09d8SJérôme Glisse 	unsigned long pfn, npages, i;
2142aee09d8SJérôme Glisse 	bool fault, write_fault;
215f88a1e90SJérôme Glisse 	uint64_t cpu_flags;
21653f5c3f4SJérôme Glisse 
2172aee09d8SJérôme Glisse 	npages = (end - addr) >> PAGE_SHIFT;
218f88a1e90SJérôme Glisse 	cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
2192aee09d8SJérôme Glisse 	hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
2202aee09d8SJérôme Glisse 			     &fault, &write_fault);
22153f5c3f4SJérôme Glisse 
22224cee8abSJason Gunthorpe 	if (fault || write_fault)
223*f8c888a3SChristoph Hellwig 		return hmm_vma_fault(addr, end, fault, write_fault, walk);
22453f5c3f4SJérôme Glisse 
225309f9a4fSChristoph Hellwig 	pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
226992de9a8SJérôme Glisse 	for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
227992de9a8SJérôme Glisse 		if (pmd_devmap(pmd)) {
228992de9a8SJérôme Glisse 			hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
229992de9a8SJérôme Glisse 					      hmm_vma_walk->pgmap);
230992de9a8SJérôme Glisse 			if (unlikely(!hmm_vma_walk->pgmap))
231992de9a8SJérôme Glisse 				return -EBUSY;
232992de9a8SJérôme Glisse 		}
233391aab11SJérôme Glisse 		pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
234992de9a8SJérôme Glisse 	}
235992de9a8SJérôme Glisse 	if (hmm_vma_walk->pgmap) {
236992de9a8SJérôme Glisse 		put_dev_pagemap(hmm_vma_walk->pgmap);
237992de9a8SJérôme Glisse 		hmm_vma_walk->pgmap = NULL;
238992de9a8SJérôme Glisse 	}
23953f5c3f4SJérôme Glisse 	hmm_vma_walk->last = end;
24053f5c3f4SJérôme Glisse 	return 0;
24153f5c3f4SJérôme Glisse }
2429d3973d6SChristoph Hellwig #else /* CONFIG_TRANSPARENT_HUGEPAGE */
2439d3973d6SChristoph Hellwig /* stub to allow the code below to compile */
2449d3973d6SChristoph Hellwig int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
2459d3973d6SChristoph Hellwig 		unsigned long end, uint64_t *pfns, pmd_t pmd);
2469d3973d6SChristoph Hellwig #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
24753f5c3f4SJérôme Glisse 
248f88a1e90SJérôme Glisse static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
2492aee09d8SJérôme Glisse {
250789c2af8SPhilip Yang 	if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
2512aee09d8SJérôme Glisse 		return 0;
252f88a1e90SJérôme Glisse 	return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
253f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_WRITE] :
254f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_VALID];
2552aee09d8SJérôme Glisse }
2562aee09d8SJérôme Glisse 
25753f5c3f4SJérôme Glisse static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
25853f5c3f4SJérôme Glisse 			      unsigned long end, pmd_t *pmdp, pte_t *ptep,
25953f5c3f4SJérôme Glisse 			      uint64_t *pfn)
26053f5c3f4SJérôme Glisse {
26153f5c3f4SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
262f88a1e90SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
2632aee09d8SJérôme Glisse 	bool fault, write_fault;
2642aee09d8SJérôme Glisse 	uint64_t cpu_flags;
26553f5c3f4SJérôme Glisse 	pte_t pte = *ptep;
266f88a1e90SJérôme Glisse 	uint64_t orig_pfn = *pfn;
26753f5c3f4SJérôme Glisse 
268f88a1e90SJérôme Glisse 	*pfn = range->values[HMM_PFN_NONE];
26973231612SJérôme Glisse 	fault = write_fault = false;
27053f5c3f4SJérôme Glisse 
27153f5c3f4SJérôme Glisse 	if (pte_none(pte)) {
27273231612SJérôme Glisse 		hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
27373231612SJérôme Glisse 				   &fault, &write_fault);
2742aee09d8SJérôme Glisse 		if (fault || write_fault)
27553f5c3f4SJérôme Glisse 			goto fault;
27653f5c3f4SJérôme Glisse 		return 0;
27753f5c3f4SJérôme Glisse 	}
27853f5c3f4SJérôme Glisse 
27953f5c3f4SJérôme Glisse 	if (!pte_present(pte)) {
28053f5c3f4SJérôme Glisse 		swp_entry_t entry = pte_to_swp_entry(pte);
28153f5c3f4SJérôme Glisse 
28253f5c3f4SJérôme Glisse 		/*
28353f5c3f4SJérôme Glisse 		 * This is a special swap entry, ignore migration, use
28453f5c3f4SJérôme Glisse 		 * device and report anything else as error.
28553f5c3f4SJérôme Glisse 		 */
28653f5c3f4SJérôme Glisse 		if (is_device_private_entry(entry)) {
287f88a1e90SJérôme Glisse 			cpu_flags = range->flags[HMM_PFN_VALID] |
288f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_DEVICE_PRIVATE];
2892aee09d8SJérôme Glisse 			cpu_flags |= is_write_device_private_entry(entry) ?
290f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_WRITE] : 0;
291f88a1e90SJérôme Glisse 			hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
292f88a1e90SJérôme Glisse 					   &fault, &write_fault);
293f88a1e90SJérôme Glisse 			if (fault || write_fault)
294f88a1e90SJérôme Glisse 				goto fault;
295391aab11SJérôme Glisse 			*pfn = hmm_device_entry_from_pfn(range,
296391aab11SJérôme Glisse 					    swp_offset(entry));
297f88a1e90SJérôme Glisse 			*pfn |= cpu_flags;
29853f5c3f4SJérôme Glisse 			return 0;
29953f5c3f4SJérôme Glisse 		}
30053f5c3f4SJérôme Glisse 
30176612d6cSJason Gunthorpe 		hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, &fault,
30276612d6cSJason Gunthorpe 				   &write_fault);
30376612d6cSJason Gunthorpe 		if (!fault && !write_fault)
30476612d6cSJason Gunthorpe 			return 0;
30576612d6cSJason Gunthorpe 
30676612d6cSJason Gunthorpe 		if (!non_swap_entry(entry))
30776612d6cSJason Gunthorpe 			goto fault;
30876612d6cSJason Gunthorpe 
30953f5c3f4SJérôme Glisse 		if (is_migration_entry(entry)) {
31053f5c3f4SJérôme Glisse 			pte_unmap(ptep);
31153f5c3f4SJérôme Glisse 			hmm_vma_walk->last = addr;
312d2e8d551SRalph Campbell 			migration_entry_wait(walk->mm, pmdp, addr);
31373231612SJérôme Glisse 			return -EBUSY;
31453f5c3f4SJérôme Glisse 		}
31553f5c3f4SJérôme Glisse 
31653f5c3f4SJérôme Glisse 		/* Report error for everything else */
317dfdc2207SJason Gunthorpe 		pte_unmap(ptep);
318f88a1e90SJérôme Glisse 		*pfn = range->values[HMM_PFN_ERROR];
31953f5c3f4SJérôme Glisse 		return -EFAULT;
32053f5c3f4SJérôme Glisse 	}
32153f5c3f4SJérôme Glisse 
32276612d6cSJason Gunthorpe 	cpu_flags = pte_to_hmm_pfn_flags(range, pte);
32376612d6cSJason Gunthorpe 	hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, &fault,
32476612d6cSJason Gunthorpe 			   &write_fault);
3252aee09d8SJérôme Glisse 	if (fault || write_fault)
32653f5c3f4SJérôme Glisse 		goto fault;
32753f5c3f4SJérôme Glisse 
328992de9a8SJérôme Glisse 	if (pte_devmap(pte)) {
329992de9a8SJérôme Glisse 		hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
330992de9a8SJérôme Glisse 					      hmm_vma_walk->pgmap);
331dfdc2207SJason Gunthorpe 		if (unlikely(!hmm_vma_walk->pgmap)) {
332dfdc2207SJason Gunthorpe 			pte_unmap(ptep);
333992de9a8SJérôme Glisse 			return -EBUSY;
334dfdc2207SJason Gunthorpe 		}
33540550627SJason Gunthorpe 	}
33640550627SJason Gunthorpe 
33740550627SJason Gunthorpe 	/*
33840550627SJason Gunthorpe 	 * Since each architecture defines a struct page for the zero page, just
33940550627SJason Gunthorpe 	 * fall through and treat it like a normal page.
34040550627SJason Gunthorpe 	 */
34140550627SJason Gunthorpe 	if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
34240550627SJason Gunthorpe 		hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, &fault,
34340550627SJason Gunthorpe 				   &write_fault);
34440550627SJason Gunthorpe 		if (fault || write_fault) {
345dfdc2207SJason Gunthorpe 			pte_unmap(ptep);
346992de9a8SJérôme Glisse 			return -EFAULT;
347992de9a8SJérôme Glisse 		}
34840550627SJason Gunthorpe 		*pfn = range->values[HMM_PFN_SPECIAL];
34940550627SJason Gunthorpe 		return 0;
350ac541f25SRalph Campbell 	}
351992de9a8SJérôme Glisse 
352391aab11SJérôme Glisse 	*pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
35353f5c3f4SJérôme Glisse 	return 0;
35453f5c3f4SJérôme Glisse 
35553f5c3f4SJérôme Glisse fault:
356992de9a8SJérôme Glisse 	if (hmm_vma_walk->pgmap) {
357992de9a8SJérôme Glisse 		put_dev_pagemap(hmm_vma_walk->pgmap);
358992de9a8SJérôme Glisse 		hmm_vma_walk->pgmap = NULL;
359992de9a8SJérôme Glisse 	}
36053f5c3f4SJérôme Glisse 	pte_unmap(ptep);
36153f5c3f4SJérôme Glisse 	/* Fault any virtual address we were asked to fault */
362*f8c888a3SChristoph Hellwig 	return hmm_vma_fault(addr, end, fault, write_fault, walk);
36353f5c3f4SJérôme Glisse }
36453f5c3f4SJérôme Glisse 
365da4c3c73SJérôme Glisse static int hmm_vma_walk_pmd(pmd_t *pmdp,
366da4c3c73SJérôme Glisse 			    unsigned long start,
367da4c3c73SJérôme Glisse 			    unsigned long end,
368da4c3c73SJérôme Glisse 			    struct mm_walk *walk)
369da4c3c73SJérôme Glisse {
37074eee180SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
37174eee180SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
3722288a9a6SJason Gunthorpe 	uint64_t *pfns = &range->pfns[(start - range->start) >> PAGE_SHIFT];
3732288a9a6SJason Gunthorpe 	unsigned long npages = (end - start) >> PAGE_SHIFT;
3742288a9a6SJason Gunthorpe 	unsigned long addr = start;
3752288a9a6SJason Gunthorpe 	bool fault, write_fault;
376da4c3c73SJérôme Glisse 	pte_t *ptep;
377da4c3c73SJérôme Glisse 	pmd_t pmd;
378da4c3c73SJérôme Glisse 
379d08faca0SJérôme Glisse again:
380d08faca0SJérôme Glisse 	pmd = READ_ONCE(*pmdp);
381d08faca0SJérôme Glisse 	if (pmd_none(pmd))
382b7a16c7aSSteven Price 		return hmm_vma_walk_hole(start, end, -1, walk);
383d08faca0SJérôme Glisse 
384d08faca0SJérôme Glisse 	if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
385d08faca0SJérôme Glisse 		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
386d08faca0SJérôme Glisse 				     0, &fault, &write_fault);
387d08faca0SJérôme Glisse 		if (fault || write_fault) {
388d08faca0SJérôme Glisse 			hmm_vma_walk->last = addr;
389d2e8d551SRalph Campbell 			pmd_migration_entry_wait(walk->mm, pmdp);
39073231612SJérôme Glisse 			return -EBUSY;
391d08faca0SJérôme Glisse 		}
3927d082987SJason Gunthorpe 		return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
3932288a9a6SJason Gunthorpe 	}
3942288a9a6SJason Gunthorpe 
3952288a9a6SJason Gunthorpe 	if (!pmd_present(pmd)) {
3962288a9a6SJason Gunthorpe 		hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
3972288a9a6SJason Gunthorpe 				     &write_fault);
3982288a9a6SJason Gunthorpe 		if (fault || write_fault)
3992288a9a6SJason Gunthorpe 			return -EFAULT;
400d28c2c9aSRalph Campbell 		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
4012288a9a6SJason Gunthorpe 	}
402d08faca0SJérôme Glisse 
403d08faca0SJérôme Glisse 	if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
404da4c3c73SJérôme Glisse 		/*
405d2e8d551SRalph Campbell 		 * No need to take pmd_lock here, even if some other thread
406da4c3c73SJérôme Glisse 		 * is splitting the huge pmd we will get that event through
407da4c3c73SJérôme Glisse 		 * mmu_notifier callback.
408da4c3c73SJérôme Glisse 		 *
409d2e8d551SRalph Campbell 		 * So just read pmd value and check again it's a transparent
410da4c3c73SJérôme Glisse 		 * huge or device mapping one and compute corresponding pfn
411da4c3c73SJérôme Glisse 		 * values.
412da4c3c73SJérôme Glisse 		 */
413da4c3c73SJérôme Glisse 		pmd = pmd_read_atomic(pmdp);
414da4c3c73SJérôme Glisse 		barrier();
415da4c3c73SJérôme Glisse 		if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
416da4c3c73SJérôme Glisse 			goto again;
417da4c3c73SJérôme Glisse 
4182288a9a6SJason Gunthorpe 		return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd);
419da4c3c73SJérôme Glisse 	}
420da4c3c73SJérôme Glisse 
421d08faca0SJérôme Glisse 	/*
422d2e8d551SRalph Campbell 	 * We have handled all the valid cases above ie either none, migration,
423d08faca0SJérôme Glisse 	 * huge or transparent huge. At this point either it is a valid pmd
424d08faca0SJérôme Glisse 	 * entry pointing to pte directory or it is a bad pmd that will not
425d08faca0SJérôme Glisse 	 * recover.
426d08faca0SJérôme Glisse 	 */
4272288a9a6SJason Gunthorpe 	if (pmd_bad(pmd)) {
4282288a9a6SJason Gunthorpe 		hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
4292288a9a6SJason Gunthorpe 				     &write_fault);
4302288a9a6SJason Gunthorpe 		if (fault || write_fault)
4312288a9a6SJason Gunthorpe 			return -EFAULT;
432d28c2c9aSRalph Campbell 		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
4332288a9a6SJason Gunthorpe 	}
434da4c3c73SJérôme Glisse 
435da4c3c73SJérôme Glisse 	ptep = pte_offset_map(pmdp, addr);
4362288a9a6SJason Gunthorpe 	for (; addr < end; addr += PAGE_SIZE, ptep++, pfns++) {
43753f5c3f4SJérôme Glisse 		int r;
438da4c3c73SJérôme Glisse 
4392288a9a6SJason Gunthorpe 		r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns);
44053f5c3f4SJérôme Glisse 		if (r) {
441dfdc2207SJason Gunthorpe 			/* hmm_vma_handle_pte() did pte_unmap() */
44274eee180SJérôme Glisse 			hmm_vma_walk->last = addr;
44353f5c3f4SJérôme Glisse 			return r;
44474eee180SJérôme Glisse 		}
445da4c3c73SJérôme Glisse 	}
446992de9a8SJérôme Glisse 	if (hmm_vma_walk->pgmap) {
447992de9a8SJérôme Glisse 		/*
448992de9a8SJérôme Glisse 		 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
449992de9a8SJérôme Glisse 		 * so that we can leverage get_dev_pagemap() optimization which
450992de9a8SJérôme Glisse 		 * will not re-take a reference on a pgmap if we already have
451992de9a8SJérôme Glisse 		 * one.
452992de9a8SJérôme Glisse 		 */
453992de9a8SJérôme Glisse 		put_dev_pagemap(hmm_vma_walk->pgmap);
454992de9a8SJérôme Glisse 		hmm_vma_walk->pgmap = NULL;
455992de9a8SJérôme Glisse 	}
456da4c3c73SJérôme Glisse 	pte_unmap(ptep - 1);
457da4c3c73SJérôme Glisse 
45853f5c3f4SJérôme Glisse 	hmm_vma_walk->last = addr;
459da4c3c73SJérôme Glisse 	return 0;
460da4c3c73SJérôme Glisse }
461da4c3c73SJérôme Glisse 
462f0b3c45cSChristoph Hellwig #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
463f0b3c45cSChristoph Hellwig     defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
464f0b3c45cSChristoph Hellwig static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
465f0b3c45cSChristoph Hellwig {
466f0b3c45cSChristoph Hellwig 	if (!pud_present(pud))
467f0b3c45cSChristoph Hellwig 		return 0;
468f0b3c45cSChristoph Hellwig 	return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
469f0b3c45cSChristoph Hellwig 				range->flags[HMM_PFN_WRITE] :
470f0b3c45cSChristoph Hellwig 				range->flags[HMM_PFN_VALID];
471f0b3c45cSChristoph Hellwig }
472f0b3c45cSChristoph Hellwig 
473f0b3c45cSChristoph Hellwig static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
474992de9a8SJérôme Glisse 		struct mm_walk *walk)
475992de9a8SJérôme Glisse {
476992de9a8SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
477992de9a8SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
4783afc4236SSteven Price 	unsigned long addr = start;
479992de9a8SJérôme Glisse 	pud_t pud;
4803afc4236SSteven Price 	int ret = 0;
4813afc4236SSteven Price 	spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
482992de9a8SJérôme Glisse 
4833afc4236SSteven Price 	if (!ptl)
4843afc4236SSteven Price 		return 0;
4853afc4236SSteven Price 
4863afc4236SSteven Price 	/* Normally we don't want to split the huge page */
4873afc4236SSteven Price 	walk->action = ACTION_CONTINUE;
4883afc4236SSteven Price 
489992de9a8SJérôme Glisse 	pud = READ_ONCE(*pudp);
4903afc4236SSteven Price 	if (pud_none(pud)) {
49105fc1df9SJason Gunthorpe 		spin_unlock(ptl);
49205fc1df9SJason Gunthorpe 		return hmm_vma_walk_hole(start, end, -1, walk);
4933afc4236SSteven Price 	}
494992de9a8SJérôme Glisse 
495992de9a8SJérôme Glisse 	if (pud_huge(pud) && pud_devmap(pud)) {
496992de9a8SJérôme Glisse 		unsigned long i, npages, pfn;
497992de9a8SJérôme Glisse 		uint64_t *pfns, cpu_flags;
498992de9a8SJérôme Glisse 		bool fault, write_fault;
499992de9a8SJérôme Glisse 
5003afc4236SSteven Price 		if (!pud_present(pud)) {
50105fc1df9SJason Gunthorpe 			spin_unlock(ptl);
50205fc1df9SJason Gunthorpe 			return hmm_vma_walk_hole(start, end, -1, walk);
5033afc4236SSteven Price 		}
504992de9a8SJérôme Glisse 
505992de9a8SJérôme Glisse 		i = (addr - range->start) >> PAGE_SHIFT;
506992de9a8SJérôme Glisse 		npages = (end - addr) >> PAGE_SHIFT;
507992de9a8SJérôme Glisse 		pfns = &range->pfns[i];
508992de9a8SJérôme Glisse 
509992de9a8SJérôme Glisse 		cpu_flags = pud_to_hmm_pfn_flags(range, pud);
510992de9a8SJérôme Glisse 		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
511992de9a8SJérôme Glisse 				     cpu_flags, &fault, &write_fault);
5123afc4236SSteven Price 		if (fault || write_fault) {
51305fc1df9SJason Gunthorpe 			spin_unlock(ptl);
514*f8c888a3SChristoph Hellwig 			return hmm_vma_fault(addr, end, fault, write_fault,
51505fc1df9SJason Gunthorpe 						  walk);
5163afc4236SSteven Price 		}
517992de9a8SJérôme Glisse 
518992de9a8SJérôme Glisse 		pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
519992de9a8SJérôme Glisse 		for (i = 0; i < npages; ++i, ++pfn) {
520992de9a8SJérôme Glisse 			hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
521992de9a8SJérôme Glisse 					      hmm_vma_walk->pgmap);
5223afc4236SSteven Price 			if (unlikely(!hmm_vma_walk->pgmap)) {
5233afc4236SSteven Price 				ret = -EBUSY;
5243afc4236SSteven Price 				goto out_unlock;
5253afc4236SSteven Price 			}
526391aab11SJérôme Glisse 			pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
527391aab11SJérôme Glisse 				  cpu_flags;
528992de9a8SJérôme Glisse 		}
529992de9a8SJérôme Glisse 		if (hmm_vma_walk->pgmap) {
530992de9a8SJérôme Glisse 			put_dev_pagemap(hmm_vma_walk->pgmap);
531992de9a8SJérôme Glisse 			hmm_vma_walk->pgmap = NULL;
532992de9a8SJérôme Glisse 		}
533992de9a8SJérôme Glisse 		hmm_vma_walk->last = end;
5343afc4236SSteven Price 		goto out_unlock;
535992de9a8SJérôme Glisse 	}
536992de9a8SJérôme Glisse 
5373afc4236SSteven Price 	/* Ask for the PUD to be split */
5383afc4236SSteven Price 	walk->action = ACTION_SUBTREE;
539992de9a8SJérôme Glisse 
5403afc4236SSteven Price out_unlock:
5413afc4236SSteven Price 	spin_unlock(ptl);
542992de9a8SJérôme Glisse 	return ret;
543992de9a8SJérôme Glisse }
544f0b3c45cSChristoph Hellwig #else
545f0b3c45cSChristoph Hellwig #define hmm_vma_walk_pud	NULL
546f0b3c45cSChristoph Hellwig #endif
547992de9a8SJérôme Glisse 
548251bbe59SChristoph Hellwig #ifdef CONFIG_HUGETLB_PAGE
54963d5066fSJérôme Glisse static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
55063d5066fSJérôme Glisse 				      unsigned long start, unsigned long end,
55163d5066fSJérôme Glisse 				      struct mm_walk *walk)
55263d5066fSJérôme Glisse {
55305c23af4SChristoph Hellwig 	unsigned long addr = start, i, pfn;
55463d5066fSJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
55563d5066fSJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
55663d5066fSJérôme Glisse 	struct vm_area_struct *vma = walk->vma;
55763d5066fSJérôme Glisse 	uint64_t orig_pfn, cpu_flags;
55863d5066fSJérôme Glisse 	bool fault, write_fault;
55963d5066fSJérôme Glisse 	spinlock_t *ptl;
56063d5066fSJérôme Glisse 	pte_t entry;
56163d5066fSJérôme Glisse 
562d2e8d551SRalph Campbell 	ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
56363d5066fSJérôme Glisse 	entry = huge_ptep_get(pte);
56463d5066fSJérôme Glisse 
5657f08263dSChristoph Hellwig 	i = (start - range->start) >> PAGE_SHIFT;
56663d5066fSJérôme Glisse 	orig_pfn = range->pfns[i];
56763d5066fSJérôme Glisse 	range->pfns[i] = range->values[HMM_PFN_NONE];
56863d5066fSJérôme Glisse 	cpu_flags = pte_to_hmm_pfn_flags(range, entry);
56963d5066fSJérôme Glisse 	fault = write_fault = false;
57063d5066fSJérôme Glisse 	hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
57163d5066fSJérôme Glisse 			   &fault, &write_fault);
57263d5066fSJérôme Glisse 	if (fault || write_fault) {
57345050692SChristoph Hellwig 		spin_unlock(ptl);
574*f8c888a3SChristoph Hellwig 		return hmm_vma_fault(addr, end, fault, write_fault, walk);
57563d5066fSJérôme Glisse 	}
57663d5066fSJérôme Glisse 
57705c23af4SChristoph Hellwig 	pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
5787f08263dSChristoph Hellwig 	for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
579391aab11SJérôme Glisse 		range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
580391aab11SJérôme Glisse 				 cpu_flags;
58163d5066fSJérôme Glisse 	hmm_vma_walk->last = end;
58263d5066fSJérôme Glisse 	spin_unlock(ptl);
58345050692SChristoph Hellwig 	return 0;
58463d5066fSJérôme Glisse }
585251bbe59SChristoph Hellwig #else
586251bbe59SChristoph Hellwig #define hmm_vma_walk_hugetlb_entry NULL
587251bbe59SChristoph Hellwig #endif /* CONFIG_HUGETLB_PAGE */
58863d5066fSJérôme Glisse 
589d28c2c9aSRalph Campbell static int hmm_vma_walk_test(unsigned long start, unsigned long end,
590d28c2c9aSRalph Campbell 			     struct mm_walk *walk)
59133cd47dcSJérôme Glisse {
592d28c2c9aSRalph Campbell 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
593d28c2c9aSRalph Campbell 	struct hmm_range *range = hmm_vma_walk->range;
594d28c2c9aSRalph Campbell 	struct vm_area_struct *vma = walk->vma;
595d28c2c9aSRalph Campbell 
596d28c2c9aSRalph Campbell 	/*
597c2579c9cSJason Gunthorpe 	 * Skip vma ranges that don't have struct page backing them or map I/O
598c2579c9cSJason Gunthorpe 	 * devices directly.
599c2579c9cSJason Gunthorpe 	 *
600d28c2c9aSRalph Campbell 	 * If the vma does not allow read access, then assume that it does not
601c2579c9cSJason Gunthorpe 	 * allow write access either. HMM does not support architectures that
602c2579c9cSJason Gunthorpe 	 * allow write without read.
603d28c2c9aSRalph Campbell 	 */
604c2579c9cSJason Gunthorpe 	if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) ||
605c2579c9cSJason Gunthorpe 	    !(vma->vm_flags & VM_READ)) {
606d28c2c9aSRalph Campbell 		bool fault, write_fault;
607d28c2c9aSRalph Campbell 
608d28c2c9aSRalph Campbell 		/*
609d28c2c9aSRalph Campbell 		 * Check to see if a fault is requested for any page in the
610d28c2c9aSRalph Campbell 		 * range.
611d28c2c9aSRalph Campbell 		 */
612d28c2c9aSRalph Campbell 		hmm_range_need_fault(hmm_vma_walk, range->pfns +
613d28c2c9aSRalph Campbell 					((start - range->start) >> PAGE_SHIFT),
614d28c2c9aSRalph Campbell 					(end - start) >> PAGE_SHIFT,
615d28c2c9aSRalph Campbell 					0, &fault, &write_fault);
616d28c2c9aSRalph Campbell 		if (fault || write_fault)
617d28c2c9aSRalph Campbell 			return -EFAULT;
618d28c2c9aSRalph Campbell 
619c2579c9cSJason Gunthorpe 		hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
620d28c2c9aSRalph Campbell 		hmm_vma_walk->last = end;
621d28c2c9aSRalph Campbell 
622d28c2c9aSRalph Campbell 		/* Skip this vma and continue processing the next vma. */
623d28c2c9aSRalph Campbell 		return 1;
624d28c2c9aSRalph Campbell 	}
625d28c2c9aSRalph Campbell 
626d28c2c9aSRalph Campbell 	return 0;
62733cd47dcSJérôme Glisse }
62833cd47dcSJérôme Glisse 
6297b86ac33SChristoph Hellwig static const struct mm_walk_ops hmm_walk_ops = {
6307b86ac33SChristoph Hellwig 	.pud_entry	= hmm_vma_walk_pud,
6317b86ac33SChristoph Hellwig 	.pmd_entry	= hmm_vma_walk_pmd,
6327b86ac33SChristoph Hellwig 	.pte_hole	= hmm_vma_walk_hole,
6337b86ac33SChristoph Hellwig 	.hugetlb_entry	= hmm_vma_walk_hugetlb_entry,
634d28c2c9aSRalph Campbell 	.test_walk	= hmm_vma_walk_test,
6357b86ac33SChristoph Hellwig };
6367b86ac33SChristoph Hellwig 
6379a4903e4SChristoph Hellwig /**
6389a4903e4SChristoph Hellwig  * hmm_range_fault - try to fault some address in a virtual address range
63908232a45SJérôme Glisse  * @range:	range being faulted
6409a4903e4SChristoph Hellwig  * @flags:	HMM_FAULT_* flags
64173231612SJérôme Glisse  *
6429a4903e4SChristoph Hellwig  * Return: the number of valid pages in range->pfns[] (from range start
6439a4903e4SChristoph Hellwig  * address), which may be zero.  On error one of the following status codes
6449a4903e4SChristoph Hellwig  * can be returned:
6459a4903e4SChristoph Hellwig  *
6469a4903e4SChristoph Hellwig  * -EINVAL:	Invalid arguments or mm or virtual address is in an invalid vma
6479a4903e4SChristoph Hellwig  *		(e.g., device file vma).
64873231612SJérôme Glisse  * -ENOMEM:	Out of memory.
6499a4903e4SChristoph Hellwig  * -EPERM:	Invalid permission (e.g., asking for write and range is read
6509a4903e4SChristoph Hellwig  *		only).
6519a4903e4SChristoph Hellwig  * -EBUSY:	The range has been invalidated and the caller needs to wait for
6529a4903e4SChristoph Hellwig  *		the invalidation to finish.
6539a4903e4SChristoph Hellwig  * -EFAULT:	Invalid (i.e., either no valid vma or it is illegal to access
6549a4903e4SChristoph Hellwig  *		that range) number of valid pages in range->pfns[] (from
65573231612SJérôme Glisse  *              range start address).
65674eee180SJérôme Glisse  *
65774eee180SJérôme Glisse  * This is similar to a regular CPU page fault except that it will not trigger
65873231612SJérôme Glisse  * any memory migration if the memory being faulted is not accessible by CPUs
65973231612SJérôme Glisse  * and caller does not ask for migration.
66074eee180SJérôme Glisse  *
661ff05c0c6SJérôme Glisse  * On error, for one virtual address in the range, the function will mark the
662ff05c0c6SJérôme Glisse  * corresponding HMM pfn entry with an error flag.
66374eee180SJérôme Glisse  */
6649a4903e4SChristoph Hellwig long hmm_range_fault(struct hmm_range *range, unsigned int flags)
66574eee180SJérôme Glisse {
666d28c2c9aSRalph Campbell 	struct hmm_vma_walk hmm_vma_walk = {
667d28c2c9aSRalph Campbell 		.range = range,
668d28c2c9aSRalph Campbell 		.last = range->start,
669d28c2c9aSRalph Campbell 		.flags = flags,
670d28c2c9aSRalph Campbell 	};
671a22dd506SJason Gunthorpe 	struct mm_struct *mm = range->notifier->mm;
67274eee180SJérôme Glisse 	int ret;
67374eee180SJérôme Glisse 
67404ec32fbSJason Gunthorpe 	lockdep_assert_held(&mm->mmap_sem);
675a3e0d41cSJérôme Glisse 
676a3e0d41cSJérôme Glisse 	do {
677a3e0d41cSJérôme Glisse 		/* If range is no longer valid force retry. */
678a22dd506SJason Gunthorpe 		if (mmu_interval_check_retry(range->notifier,
679a22dd506SJason Gunthorpe 					     range->notifier_seq))
6802bcbeaefSChristoph Hellwig 			return -EBUSY;
681d28c2c9aSRalph Campbell 		ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
6827b86ac33SChristoph Hellwig 				      &hmm_walk_ops, &hmm_vma_walk);
683d28c2c9aSRalph Campbell 	} while (ret == -EBUSY);
684a3e0d41cSJérôme Glisse 
685d28c2c9aSRalph Campbell 	if (ret)
68673231612SJérôme Glisse 		return ret;
68773231612SJérôme Glisse 	return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
68874eee180SJérôme Glisse }
68973231612SJérôme Glisse EXPORT_SYMBOL(hmm_range_fault);
690