xref: /linux/mm/hmm.c (revision 7b86ac3371b70c3fd8fd95501719beb1faab719f)
1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2133ff0eaSJérôme Glisse /*
3133ff0eaSJérôme Glisse  * Copyright 2013 Red Hat Inc.
4133ff0eaSJérôme Glisse  *
5f813f219SJérôme Glisse  * Authors: Jérôme Glisse <jglisse@redhat.com>
6133ff0eaSJérôme Glisse  */
7133ff0eaSJérôme Glisse /*
8133ff0eaSJérôme Glisse  * Refer to include/linux/hmm.h for information about heterogeneous memory
9133ff0eaSJérôme Glisse  * management or HMM for short.
10133ff0eaSJérôme Glisse  */
11a520110eSChristoph Hellwig #include <linux/pagewalk.h>
12133ff0eaSJérôme Glisse #include <linux/hmm.h>
13858b54daSJérôme Glisse #include <linux/init.h>
14da4c3c73SJérôme Glisse #include <linux/rmap.h>
15da4c3c73SJérôme Glisse #include <linux/swap.h>
16133ff0eaSJérôme Glisse #include <linux/slab.h>
17133ff0eaSJérôme Glisse #include <linux/sched.h>
184ef589dcSJérôme Glisse #include <linux/mmzone.h>
194ef589dcSJérôme Glisse #include <linux/pagemap.h>
20da4c3c73SJérôme Glisse #include <linux/swapops.h>
21da4c3c73SJérôme Glisse #include <linux/hugetlb.h>
224ef589dcSJérôme Glisse #include <linux/memremap.h>
23c8a53b2dSJason Gunthorpe #include <linux/sched/mm.h>
247b2d55d2SJérôme Glisse #include <linux/jump_label.h>
2555c0ece8SJérôme Glisse #include <linux/dma-mapping.h>
26c0b12405SJérôme Glisse #include <linux/mmu_notifier.h>
274ef589dcSJérôme Glisse #include <linux/memory_hotplug.h>
284ef589dcSJérôme Glisse 
29c7d8b782SJason Gunthorpe static struct mmu_notifier *hmm_alloc_notifier(struct mm_struct *mm)
30704f3f2cSJérôme Glisse {
318a9320b7SJason Gunthorpe 	struct hmm *hmm;
32133ff0eaSJérôme Glisse 
33c7d8b782SJason Gunthorpe 	hmm = kzalloc(sizeof(*hmm), GFP_KERNEL);
34c0b12405SJérôme Glisse 	if (!hmm)
35c7d8b782SJason Gunthorpe 		return ERR_PTR(-ENOMEM);
36c7d8b782SJason Gunthorpe 
37a3e0d41cSJérôme Glisse 	init_waitqueue_head(&hmm->wq);
38c0b12405SJérôme Glisse 	INIT_LIST_HEAD(&hmm->mirrors);
39c0b12405SJérôme Glisse 	init_rwsem(&hmm->mirrors_sem);
40da4c3c73SJérôme Glisse 	INIT_LIST_HEAD(&hmm->ranges);
415a136b4aSJason Gunthorpe 	spin_lock_init(&hmm->ranges_lock);
42a3e0d41cSJérôme Glisse 	hmm->notifiers = 0;
43c7d8b782SJason Gunthorpe 	return &hmm->mmu_notifier;
44c7d8b782SJason Gunthorpe }
45c0b12405SJérôme Glisse 
46c7d8b782SJason Gunthorpe static void hmm_free_notifier(struct mmu_notifier *mn)
47c7d8b782SJason Gunthorpe {
48c7d8b782SJason Gunthorpe 	struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
49c7d8b782SJason Gunthorpe 
50c7d8b782SJason Gunthorpe 	WARN_ON(!list_empty(&hmm->ranges));
51c7d8b782SJason Gunthorpe 	WARN_ON(!list_empty(&hmm->mirrors));
5286a2d598SRalph Campbell 	kfree(hmm);
53704f3f2cSJérôme Glisse }
54704f3f2cSJérôme Glisse 
55a3e0d41cSJérôme Glisse static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
56c0b12405SJérôme Glisse {
576d7c3cdeSJason Gunthorpe 	struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
58c0b12405SJérôme Glisse 	struct hmm_mirror *mirror;
59da4c3c73SJérôme Glisse 
60e1401513SRalph Campbell 	/*
6147f24598SJason Gunthorpe 	 * Since hmm_range_register() holds the mmget() lock hmm_release() is
6247f24598SJason Gunthorpe 	 * prevented as long as a range exists.
63e1401513SRalph Campbell 	 */
6447f24598SJason Gunthorpe 	WARN_ON(!list_empty_careful(&hmm->ranges));
65704f3f2cSJérôme Glisse 
66a3e0d41cSJérôme Glisse 	down_read(&hmm->mirrors_sem);
67a3e0d41cSJérôme Glisse 	list_for_each_entry(mirror, &hmm->mirrors, list) {
68e1401513SRalph Campbell 		/*
6914331726SJason Gunthorpe 		 * Note: The driver is not allowed to trigger
7014331726SJason Gunthorpe 		 * hmm_mirror_unregister() from this thread.
71e1401513SRalph Campbell 		 */
7214331726SJason Gunthorpe 		if (mirror->ops->release)
73e1401513SRalph Campbell 			mirror->ops->release(mirror);
74a3e0d41cSJérôme Glisse 	}
75a3e0d41cSJérôme Glisse 	up_read(&hmm->mirrors_sem);
76c0b12405SJérôme Glisse }
77c0b12405SJérôme Glisse 
785a136b4aSJason Gunthorpe static void notifiers_decrement(struct hmm *hmm)
79c0b12405SJérôme Glisse {
805a136b4aSJason Gunthorpe 	unsigned long flags;
81c0b12405SJérôme Glisse 
825a136b4aSJason Gunthorpe 	spin_lock_irqsave(&hmm->ranges_lock, flags);
83a3e0d41cSJérôme Glisse 	hmm->notifiers--;
84a3e0d41cSJérôme Glisse 	if (!hmm->notifiers) {
85a3e0d41cSJérôme Glisse 		struct hmm_range *range;
86a3e0d41cSJérôme Glisse 
87a3e0d41cSJérôme Glisse 		list_for_each_entry(range, &hmm->ranges, list) {
88a3e0d41cSJérôme Glisse 			if (range->valid)
89a3e0d41cSJérôme Glisse 				continue;
90a3e0d41cSJérôme Glisse 			range->valid = true;
91a3e0d41cSJérôme Glisse 		}
92a3e0d41cSJérôme Glisse 		wake_up_all(&hmm->wq);
93a3e0d41cSJérôme Glisse 	}
945a136b4aSJason Gunthorpe 	spin_unlock_irqrestore(&hmm->ranges_lock, flags);
955a136b4aSJason Gunthorpe }
96a3e0d41cSJérôme Glisse 
97133ff0eaSJérôme Glisse static int hmm_invalidate_range_start(struct mmu_notifier *mn,
98133ff0eaSJérôme Glisse 			const struct mmu_notifier_range *nrange)
99133ff0eaSJérôme Glisse {
1006d7c3cdeSJason Gunthorpe 	struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
101133ff0eaSJérôme Glisse 	struct hmm_mirror *mirror;
102133ff0eaSJérôme Glisse 	struct hmm_range *range;
1035a136b4aSJason Gunthorpe 	unsigned long flags;
104133ff0eaSJérôme Glisse 	int ret = 0;
105133ff0eaSJérôme Glisse 
1065a136b4aSJason Gunthorpe 	spin_lock_irqsave(&hmm->ranges_lock, flags);
107133ff0eaSJérôme Glisse 	hmm->notifiers++;
108133ff0eaSJérôme Glisse 	list_for_each_entry(range, &hmm->ranges, list) {
1091f961807SRalph Campbell 		if (nrange->end < range->start || nrange->start >= range->end)
110133ff0eaSJérôme Glisse 			continue;
111133ff0eaSJérôme Glisse 
112133ff0eaSJérôme Glisse 		range->valid = false;
113133ff0eaSJérôme Glisse 	}
1145a136b4aSJason Gunthorpe 	spin_unlock_irqrestore(&hmm->ranges_lock, flags);
115c0b12405SJérôme Glisse 
116c0b12405SJérôme Glisse 	if (mmu_notifier_range_blockable(nrange))
117c0b12405SJérôme Glisse 		down_read(&hmm->mirrors_sem);
118c0b12405SJérôme Glisse 	else if (!down_read_trylock(&hmm->mirrors_sem)) {
119c0b12405SJérôme Glisse 		ret = -EAGAIN;
120c0b12405SJérôme Glisse 		goto out;
121c0b12405SJérôme Glisse 	}
122c0b12405SJérôme Glisse 
1235a136b4aSJason Gunthorpe 	list_for_each_entry(mirror, &hmm->mirrors, list) {
1245a136b4aSJason Gunthorpe 		int rc;
1255a136b4aSJason Gunthorpe 
1261f961807SRalph Campbell 		rc = mirror->ops->sync_cpu_device_pagetables(mirror, nrange);
1275a136b4aSJason Gunthorpe 		if (rc) {
1281f961807SRalph Campbell 			if (WARN_ON(mmu_notifier_range_blockable(nrange) ||
1291f961807SRalph Campbell 			    rc != -EAGAIN))
1305a136b4aSJason Gunthorpe 				continue;
1315a136b4aSJason Gunthorpe 			ret = -EAGAIN;
132085ea250SRalph Campbell 			break;
133c0b12405SJérôme Glisse 		}
1345a136b4aSJason Gunthorpe 	}
135c0b12405SJérôme Glisse 	up_read(&hmm->mirrors_sem);
136c0b12405SJérôme Glisse 
137c0b12405SJérôme Glisse out:
1385a136b4aSJason Gunthorpe 	if (ret)
1395a136b4aSJason Gunthorpe 		notifiers_decrement(hmm);
140c0b12405SJérôme Glisse 	return ret;
141c0b12405SJérôme Glisse }
142c0b12405SJérôme Glisse 
143c0b12405SJérôme Glisse static void hmm_invalidate_range_end(struct mmu_notifier *mn,
144c0b12405SJérôme Glisse 			const struct mmu_notifier_range *nrange)
145c0b12405SJérôme Glisse {
1466d7c3cdeSJason Gunthorpe 	struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
147c0b12405SJérôme Glisse 
1485a136b4aSJason Gunthorpe 	notifiers_decrement(hmm);
149c0b12405SJérôme Glisse }
150c0b12405SJérôme Glisse 
151c0b12405SJérôme Glisse static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
152e1401513SRalph Campbell 	.release		= hmm_release,
153c0b12405SJérôme Glisse 	.invalidate_range_start	= hmm_invalidate_range_start,
154c0b12405SJérôme Glisse 	.invalidate_range_end	= hmm_invalidate_range_end,
155c7d8b782SJason Gunthorpe 	.alloc_notifier		= hmm_alloc_notifier,
156c7d8b782SJason Gunthorpe 	.free_notifier		= hmm_free_notifier,
157c0b12405SJérôme Glisse };
158c0b12405SJérôme Glisse 
159c0b12405SJérôme Glisse /*
160c0b12405SJérôme Glisse  * hmm_mirror_register() - register a mirror against an mm
161c0b12405SJérôme Glisse  *
162c0b12405SJérôme Glisse  * @mirror: new mirror struct to register
163c0b12405SJérôme Glisse  * @mm: mm to register against
164085ea250SRalph Campbell  * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments
165c0b12405SJérôme Glisse  *
166c0b12405SJérôme Glisse  * To start mirroring a process address space, the device driver must register
167c0b12405SJérôme Glisse  * an HMM mirror struct.
168c7d8b782SJason Gunthorpe  *
169c7d8b782SJason Gunthorpe  * The caller cannot unregister the hmm_mirror while any ranges are
170c7d8b782SJason Gunthorpe  * registered.
171c7d8b782SJason Gunthorpe  *
172c7d8b782SJason Gunthorpe  * Callers using this function must put a call to mmu_notifier_synchronize()
173c7d8b782SJason Gunthorpe  * in their module exit functions.
174c0b12405SJérôme Glisse  */
175c0b12405SJérôme Glisse int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
176c0b12405SJérôme Glisse {
177c7d8b782SJason Gunthorpe 	struct mmu_notifier *mn;
178c7d8b782SJason Gunthorpe 
179fec88ab0SLinus Torvalds 	lockdep_assert_held_write(&mm->mmap_sem);
1808a1a0cd0SJason Gunthorpe 
181c0b12405SJérôme Glisse 	/* Sanity check */
182c0b12405SJérôme Glisse 	if (!mm || !mirror || !mirror->ops)
183c0b12405SJérôme Glisse 		return -EINVAL;
184c0b12405SJérôme Glisse 
185c7d8b782SJason Gunthorpe 	mn = mmu_notifier_get_locked(&hmm_mmu_notifier_ops, mm);
186c7d8b782SJason Gunthorpe 	if (IS_ERR(mn))
187c7d8b782SJason Gunthorpe 		return PTR_ERR(mn);
188c7d8b782SJason Gunthorpe 	mirror->hmm = container_of(mn, struct hmm, mmu_notifier);
189c0b12405SJérôme Glisse 
190c0b12405SJérôme Glisse 	down_write(&mirror->hmm->mirrors_sem);
191c0b12405SJérôme Glisse 	list_add(&mirror->list, &mirror->hmm->mirrors);
192c0b12405SJérôme Glisse 	up_write(&mirror->hmm->mirrors_sem);
193c0b12405SJérôme Glisse 
194c0b12405SJérôme Glisse 	return 0;
195c0b12405SJérôme Glisse }
196c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_register);
197c0b12405SJérôme Glisse 
198c0b12405SJérôme Glisse /*
199c0b12405SJérôme Glisse  * hmm_mirror_unregister() - unregister a mirror
200c0b12405SJérôme Glisse  *
201085ea250SRalph Campbell  * @mirror: mirror struct to unregister
202c0b12405SJérôme Glisse  *
203c0b12405SJérôme Glisse  * Stop mirroring a process address space, and cleanup.
204c0b12405SJérôme Glisse  */
205c0b12405SJérôme Glisse void hmm_mirror_unregister(struct hmm_mirror *mirror)
206c0b12405SJérôme Glisse {
207187229c2SJason Gunthorpe 	struct hmm *hmm = mirror->hmm;
208c01cbba2SJérôme Glisse 
209c0b12405SJérôme Glisse 	down_write(&hmm->mirrors_sem);
21014331726SJason Gunthorpe 	list_del(&mirror->list);
211c0b12405SJérôme Glisse 	up_write(&hmm->mirrors_sem);
212c7d8b782SJason Gunthorpe 	mmu_notifier_put(&hmm->mmu_notifier);
213c0b12405SJérôme Glisse }
214c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_unregister);
215da4c3c73SJérôme Glisse 
21674eee180SJérôme Glisse struct hmm_vma_walk {
21774eee180SJérôme Glisse 	struct hmm_range	*range;
218992de9a8SJérôme Glisse 	struct dev_pagemap	*pgmap;
21974eee180SJérôme Glisse 	unsigned long		last;
2209a4903e4SChristoph Hellwig 	unsigned int		flags;
22174eee180SJérôme Glisse };
22274eee180SJérôme Glisse 
2232aee09d8SJérôme Glisse static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
2242aee09d8SJérôme Glisse 			    bool write_fault, uint64_t *pfn)
22574eee180SJérôme Glisse {
2269b1ae605SKuehling, Felix 	unsigned int flags = FAULT_FLAG_REMOTE;
22774eee180SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
228f88a1e90SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
22974eee180SJérôme Glisse 	struct vm_area_struct *vma = walk->vma;
23050a7ca3cSSouptick Joarder 	vm_fault_t ret;
23174eee180SJérôme Glisse 
2326c64f2bbSRalph Campbell 	if (!vma)
2336c64f2bbSRalph Campbell 		goto err;
2346c64f2bbSRalph Campbell 
2359a4903e4SChristoph Hellwig 	if (hmm_vma_walk->flags & HMM_FAULT_ALLOW_RETRY)
2369a4903e4SChristoph Hellwig 		flags |= FAULT_FLAG_ALLOW_RETRY;
2379a4903e4SChristoph Hellwig 	if (write_fault)
2389a4903e4SChristoph Hellwig 		flags |= FAULT_FLAG_WRITE;
2399a4903e4SChristoph Hellwig 
24050a7ca3cSSouptick Joarder 	ret = handle_mm_fault(vma, addr, flags);
241e709acccSJason Gunthorpe 	if (ret & VM_FAULT_RETRY) {
242e709acccSJason Gunthorpe 		/* Note, handle_mm_fault did up_read(&mm->mmap_sem)) */
24373231612SJérôme Glisse 		return -EAGAIN;
244e709acccSJason Gunthorpe 	}
2456c64f2bbSRalph Campbell 	if (ret & VM_FAULT_ERROR)
2466c64f2bbSRalph Campbell 		goto err;
24774eee180SJérôme Glisse 
24873231612SJérôme Glisse 	return -EBUSY;
2496c64f2bbSRalph Campbell 
2506c64f2bbSRalph Campbell err:
2516c64f2bbSRalph Campbell 	*pfn = range->values[HMM_PFN_ERROR];
2526c64f2bbSRalph Campbell 	return -EFAULT;
25374eee180SJérôme Glisse }
25474eee180SJérôme Glisse 
255da4c3c73SJérôme Glisse static int hmm_pfns_bad(unsigned long addr,
256da4c3c73SJérôme Glisse 			unsigned long end,
257da4c3c73SJérôme Glisse 			struct mm_walk *walk)
258da4c3c73SJérôme Glisse {
259c719547fSJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
260c719547fSJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
261ff05c0c6SJérôme Glisse 	uint64_t *pfns = range->pfns;
262da4c3c73SJérôme Glisse 	unsigned long i;
263da4c3c73SJérôme Glisse 
264da4c3c73SJérôme Glisse 	i = (addr - range->start) >> PAGE_SHIFT;
265da4c3c73SJérôme Glisse 	for (; addr < end; addr += PAGE_SIZE, i++)
266f88a1e90SJérôme Glisse 		pfns[i] = range->values[HMM_PFN_ERROR];
267da4c3c73SJérôme Glisse 
268da4c3c73SJérôme Glisse 	return 0;
269da4c3c73SJérôme Glisse }
270da4c3c73SJérôme Glisse 
2715504ed29SJérôme Glisse /*
272d2e8d551SRalph Campbell  * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s)
273d2e8d551SRalph Campbell  * @addr: range virtual start address (inclusive)
2745504ed29SJérôme Glisse  * @end: range virtual end address (exclusive)
2752aee09d8SJérôme Glisse  * @fault: should we fault or not ?
2762aee09d8SJérôme Glisse  * @write_fault: write fault ?
2775504ed29SJérôme Glisse  * @walk: mm_walk structure
278085ea250SRalph Campbell  * Return: 0 on success, -EBUSY after page fault, or page fault error
2795504ed29SJérôme Glisse  *
2805504ed29SJérôme Glisse  * This function will be called whenever pmd_none() or pte_none() returns true,
2815504ed29SJérôme Glisse  * or whenever there is no page directory covering the virtual address range.
2825504ed29SJérôme Glisse  */
2832aee09d8SJérôme Glisse static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
2842aee09d8SJérôme Glisse 			      bool fault, bool write_fault,
285da4c3c73SJérôme Glisse 			      struct mm_walk *walk)
286da4c3c73SJérôme Glisse {
28774eee180SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
28874eee180SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
289ff05c0c6SJérôme Glisse 	uint64_t *pfns = range->pfns;
2907f08263dSChristoph Hellwig 	unsigned long i;
291da4c3c73SJérôme Glisse 
29274eee180SJérôme Glisse 	hmm_vma_walk->last = addr;
2937f08263dSChristoph Hellwig 	i = (addr - range->start) >> PAGE_SHIFT;
29463d5066fSJérôme Glisse 
295c18ce674SRalph Campbell 	if (write_fault && walk->vma && !(walk->vma->vm_flags & VM_WRITE))
296c18ce674SRalph Campbell 		return -EPERM;
297c18ce674SRalph Campbell 
2987f08263dSChristoph Hellwig 	for (; addr < end; addr += PAGE_SIZE, i++) {
299f88a1e90SJérôme Glisse 		pfns[i] = range->values[HMM_PFN_NONE];
3002aee09d8SJérôme Glisse 		if (fault || write_fault) {
30174eee180SJérôme Glisse 			int ret;
302da4c3c73SJérôme Glisse 
3032aee09d8SJérôme Glisse 			ret = hmm_vma_do_fault(walk, addr, write_fault,
3042aee09d8SJérôme Glisse 					       &pfns[i]);
30573231612SJérôme Glisse 			if (ret != -EBUSY)
30674eee180SJérôme Glisse 				return ret;
30774eee180SJérôme Glisse 		}
30874eee180SJérôme Glisse 	}
30974eee180SJérôme Glisse 
31073231612SJérôme Glisse 	return (fault || write_fault) ? -EBUSY : 0;
3112aee09d8SJérôme Glisse }
3122aee09d8SJérôme Glisse 
3132aee09d8SJérôme Glisse static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
3142aee09d8SJérôme Glisse 				      uint64_t pfns, uint64_t cpu_flags,
3152aee09d8SJérôme Glisse 				      bool *fault, bool *write_fault)
3162aee09d8SJérôme Glisse {
317f88a1e90SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
318f88a1e90SJérôme Glisse 
319d45d464bSChristoph Hellwig 	if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
3202aee09d8SJérôme Glisse 		return;
3212aee09d8SJérôme Glisse 
322023a019aSJérôme Glisse 	/*
323023a019aSJérôme Glisse 	 * So we not only consider the individual per page request we also
324023a019aSJérôme Glisse 	 * consider the default flags requested for the range. The API can
325d2e8d551SRalph Campbell 	 * be used 2 ways. The first one where the HMM user coalesces
326d2e8d551SRalph Campbell 	 * multiple page faults into one request and sets flags per pfn for
327d2e8d551SRalph Campbell 	 * those faults. The second one where the HMM user wants to pre-
328023a019aSJérôme Glisse 	 * fault a range with specific flags. For the latter one it is a
329023a019aSJérôme Glisse 	 * waste to have the user pre-fill the pfn arrays with a default
330023a019aSJérôme Glisse 	 * flags value.
331023a019aSJérôme Glisse 	 */
332023a019aSJérôme Glisse 	pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
333023a019aSJérôme Glisse 
3342aee09d8SJérôme Glisse 	/* We aren't ask to do anything ... */
335f88a1e90SJérôme Glisse 	if (!(pfns & range->flags[HMM_PFN_VALID]))
3362aee09d8SJérôme Glisse 		return;
337d2e8d551SRalph Campbell 	/* If this is device memory then only fault if explicitly requested */
338f88a1e90SJérôme Glisse 	if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
339f88a1e90SJérôme Glisse 		/* Do we fault on device memory ? */
340f88a1e90SJérôme Glisse 		if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
341f88a1e90SJérôme Glisse 			*write_fault = pfns & range->flags[HMM_PFN_WRITE];
342f88a1e90SJérôme Glisse 			*fault = true;
343f88a1e90SJérôme Glisse 		}
3442aee09d8SJérôme Glisse 		return;
3452aee09d8SJérôme Glisse 	}
346f88a1e90SJérôme Glisse 
347f88a1e90SJérôme Glisse 	/* If CPU page table is not valid then we need to fault */
348f88a1e90SJérôme Glisse 	*fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
349f88a1e90SJérôme Glisse 	/* Need to write fault ? */
350f88a1e90SJérôme Glisse 	if ((pfns & range->flags[HMM_PFN_WRITE]) &&
351f88a1e90SJérôme Glisse 	    !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
352f88a1e90SJérôme Glisse 		*write_fault = true;
3532aee09d8SJérôme Glisse 		*fault = true;
3542aee09d8SJérôme Glisse 	}
3552aee09d8SJérôme Glisse }
3562aee09d8SJérôme Glisse 
3572aee09d8SJérôme Glisse static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
3582aee09d8SJérôme Glisse 				 const uint64_t *pfns, unsigned long npages,
3592aee09d8SJérôme Glisse 				 uint64_t cpu_flags, bool *fault,
3602aee09d8SJérôme Glisse 				 bool *write_fault)
3612aee09d8SJérôme Glisse {
3622aee09d8SJérôme Glisse 	unsigned long i;
3632aee09d8SJérôme Glisse 
364d45d464bSChristoph Hellwig 	if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) {
3652aee09d8SJérôme Glisse 		*fault = *write_fault = false;
3662aee09d8SJérôme Glisse 		return;
3672aee09d8SJérôme Glisse 	}
3682aee09d8SJérôme Glisse 
369a3e0d41cSJérôme Glisse 	*fault = *write_fault = false;
3702aee09d8SJérôme Glisse 	for (i = 0; i < npages; ++i) {
3712aee09d8SJérôme Glisse 		hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
3722aee09d8SJérôme Glisse 				   fault, write_fault);
373a3e0d41cSJérôme Glisse 		if ((*write_fault))
3742aee09d8SJérôme Glisse 			return;
3752aee09d8SJérôme Glisse 	}
3762aee09d8SJérôme Glisse }
3772aee09d8SJérôme Glisse 
3782aee09d8SJérôme Glisse static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
3792aee09d8SJérôme Glisse 			     struct mm_walk *walk)
3802aee09d8SJérôme Glisse {
3812aee09d8SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
3822aee09d8SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
3832aee09d8SJérôme Glisse 	bool fault, write_fault;
3842aee09d8SJérôme Glisse 	unsigned long i, npages;
3852aee09d8SJérôme Glisse 	uint64_t *pfns;
3862aee09d8SJérôme Glisse 
3872aee09d8SJérôme Glisse 	i = (addr - range->start) >> PAGE_SHIFT;
3882aee09d8SJérôme Glisse 	npages = (end - addr) >> PAGE_SHIFT;
3892aee09d8SJérôme Glisse 	pfns = &range->pfns[i];
3902aee09d8SJérôme Glisse 	hmm_range_need_fault(hmm_vma_walk, pfns, npages,
3912aee09d8SJérôme Glisse 			     0, &fault, &write_fault);
3922aee09d8SJérôme Glisse 	return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
3932aee09d8SJérôme Glisse }
3942aee09d8SJérôme Glisse 
395f88a1e90SJérôme Glisse static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
3962aee09d8SJérôme Glisse {
3972aee09d8SJérôme Glisse 	if (pmd_protnone(pmd))
3982aee09d8SJérôme Glisse 		return 0;
399f88a1e90SJérôme Glisse 	return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
400f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_WRITE] :
401f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_VALID];
402da4c3c73SJérôme Glisse }
403da4c3c73SJérôme Glisse 
404992de9a8SJérôme Glisse #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4059d3973d6SChristoph Hellwig static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
4069d3973d6SChristoph Hellwig 		unsigned long end, uint64_t *pfns, pmd_t pmd)
4079d3973d6SChristoph Hellwig {
40853f5c3f4SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
409f88a1e90SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
4102aee09d8SJérôme Glisse 	unsigned long pfn, npages, i;
4112aee09d8SJérôme Glisse 	bool fault, write_fault;
412f88a1e90SJérôme Glisse 	uint64_t cpu_flags;
41353f5c3f4SJérôme Glisse 
4142aee09d8SJérôme Glisse 	npages = (end - addr) >> PAGE_SHIFT;
415f88a1e90SJérôme Glisse 	cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
4162aee09d8SJérôme Glisse 	hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
4172aee09d8SJérôme Glisse 			     &fault, &write_fault);
41853f5c3f4SJérôme Glisse 
4192aee09d8SJérôme Glisse 	if (pmd_protnone(pmd) || fault || write_fault)
4202aee09d8SJérôme Glisse 		return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
42153f5c3f4SJérôme Glisse 
422309f9a4fSChristoph Hellwig 	pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
423992de9a8SJérôme Glisse 	for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
424992de9a8SJérôme Glisse 		if (pmd_devmap(pmd)) {
425992de9a8SJérôme Glisse 			hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
426992de9a8SJérôme Glisse 					      hmm_vma_walk->pgmap);
427992de9a8SJérôme Glisse 			if (unlikely(!hmm_vma_walk->pgmap))
428992de9a8SJérôme Glisse 				return -EBUSY;
429992de9a8SJérôme Glisse 		}
430391aab11SJérôme Glisse 		pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
431992de9a8SJérôme Glisse 	}
432992de9a8SJérôme Glisse 	if (hmm_vma_walk->pgmap) {
433992de9a8SJérôme Glisse 		put_dev_pagemap(hmm_vma_walk->pgmap);
434992de9a8SJérôme Glisse 		hmm_vma_walk->pgmap = NULL;
435992de9a8SJérôme Glisse 	}
43653f5c3f4SJérôme Glisse 	hmm_vma_walk->last = end;
43753f5c3f4SJérôme Glisse 	return 0;
43853f5c3f4SJérôme Glisse }
4399d3973d6SChristoph Hellwig #else /* CONFIG_TRANSPARENT_HUGEPAGE */
4409d3973d6SChristoph Hellwig /* stub to allow the code below to compile */
4419d3973d6SChristoph Hellwig int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
4429d3973d6SChristoph Hellwig 		unsigned long end, uint64_t *pfns, pmd_t pmd);
4439d3973d6SChristoph Hellwig #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
44453f5c3f4SJérôme Glisse 
445f88a1e90SJérôme Glisse static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
4462aee09d8SJérôme Glisse {
447789c2af8SPhilip Yang 	if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
4482aee09d8SJérôme Glisse 		return 0;
449f88a1e90SJérôme Glisse 	return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
450f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_WRITE] :
451f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_VALID];
4522aee09d8SJérôme Glisse }
4532aee09d8SJérôme Glisse 
45453f5c3f4SJérôme Glisse static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
45553f5c3f4SJérôme Glisse 			      unsigned long end, pmd_t *pmdp, pte_t *ptep,
45653f5c3f4SJérôme Glisse 			      uint64_t *pfn)
45753f5c3f4SJérôme Glisse {
45853f5c3f4SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
459f88a1e90SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
4602aee09d8SJérôme Glisse 	bool fault, write_fault;
4612aee09d8SJérôme Glisse 	uint64_t cpu_flags;
46253f5c3f4SJérôme Glisse 	pte_t pte = *ptep;
463f88a1e90SJérôme Glisse 	uint64_t orig_pfn = *pfn;
46453f5c3f4SJérôme Glisse 
465f88a1e90SJérôme Glisse 	*pfn = range->values[HMM_PFN_NONE];
46673231612SJérôme Glisse 	fault = write_fault = false;
46753f5c3f4SJérôme Glisse 
46853f5c3f4SJérôme Glisse 	if (pte_none(pte)) {
46973231612SJérôme Glisse 		hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
47073231612SJérôme Glisse 				   &fault, &write_fault);
4712aee09d8SJérôme Glisse 		if (fault || write_fault)
47253f5c3f4SJérôme Glisse 			goto fault;
47353f5c3f4SJérôme Glisse 		return 0;
47453f5c3f4SJérôme Glisse 	}
47553f5c3f4SJérôme Glisse 
47653f5c3f4SJérôme Glisse 	if (!pte_present(pte)) {
47753f5c3f4SJérôme Glisse 		swp_entry_t entry = pte_to_swp_entry(pte);
47853f5c3f4SJérôme Glisse 
47953f5c3f4SJérôme Glisse 		if (!non_swap_entry(entry)) {
480e3fe8e55SYang, Philip 			cpu_flags = pte_to_hmm_pfn_flags(range, pte);
481e3fe8e55SYang, Philip 			hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
482e3fe8e55SYang, Philip 					   &fault, &write_fault);
4832aee09d8SJérôme Glisse 			if (fault || write_fault)
48453f5c3f4SJérôme Glisse 				goto fault;
48553f5c3f4SJérôme Glisse 			return 0;
48653f5c3f4SJérôme Glisse 		}
48753f5c3f4SJérôme Glisse 
48853f5c3f4SJérôme Glisse 		/*
48953f5c3f4SJérôme Glisse 		 * This is a special swap entry, ignore migration, use
49053f5c3f4SJérôme Glisse 		 * device and report anything else as error.
49153f5c3f4SJérôme Glisse 		 */
49253f5c3f4SJérôme Glisse 		if (is_device_private_entry(entry)) {
493f88a1e90SJérôme Glisse 			cpu_flags = range->flags[HMM_PFN_VALID] |
494f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_DEVICE_PRIVATE];
4952aee09d8SJérôme Glisse 			cpu_flags |= is_write_device_private_entry(entry) ?
496f88a1e90SJérôme Glisse 				range->flags[HMM_PFN_WRITE] : 0;
497f88a1e90SJérôme Glisse 			hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
498f88a1e90SJérôme Glisse 					   &fault, &write_fault);
499f88a1e90SJérôme Glisse 			if (fault || write_fault)
500f88a1e90SJérôme Glisse 				goto fault;
501391aab11SJérôme Glisse 			*pfn = hmm_device_entry_from_pfn(range,
502391aab11SJérôme Glisse 					    swp_offset(entry));
503f88a1e90SJérôme Glisse 			*pfn |= cpu_flags;
50453f5c3f4SJérôme Glisse 			return 0;
50553f5c3f4SJérôme Glisse 		}
50653f5c3f4SJérôme Glisse 
50753f5c3f4SJérôme Glisse 		if (is_migration_entry(entry)) {
5082aee09d8SJérôme Glisse 			if (fault || write_fault) {
50953f5c3f4SJérôme Glisse 				pte_unmap(ptep);
51053f5c3f4SJérôme Glisse 				hmm_vma_walk->last = addr;
511d2e8d551SRalph Campbell 				migration_entry_wait(walk->mm, pmdp, addr);
51273231612SJérôme Glisse 				return -EBUSY;
51353f5c3f4SJérôme Glisse 			}
51453f5c3f4SJérôme Glisse 			return 0;
51553f5c3f4SJérôme Glisse 		}
51653f5c3f4SJérôme Glisse 
51753f5c3f4SJérôme Glisse 		/* Report error for everything else */
518f88a1e90SJérôme Glisse 		*pfn = range->values[HMM_PFN_ERROR];
51953f5c3f4SJérôme Glisse 		return -EFAULT;
52073231612SJérôme Glisse 	} else {
52173231612SJérôme Glisse 		cpu_flags = pte_to_hmm_pfn_flags(range, pte);
52273231612SJérôme Glisse 		hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
52373231612SJérôme Glisse 				   &fault, &write_fault);
52453f5c3f4SJérôme Glisse 	}
52553f5c3f4SJérôme Glisse 
5262aee09d8SJérôme Glisse 	if (fault || write_fault)
52753f5c3f4SJérôme Glisse 		goto fault;
52853f5c3f4SJérôme Glisse 
529992de9a8SJérôme Glisse 	if (pte_devmap(pte)) {
530992de9a8SJérôme Glisse 		hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
531992de9a8SJérôme Glisse 					      hmm_vma_walk->pgmap);
532992de9a8SJérôme Glisse 		if (unlikely(!hmm_vma_walk->pgmap))
533992de9a8SJérôme Glisse 			return -EBUSY;
534992de9a8SJérôme Glisse 	} else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) {
535992de9a8SJérôme Glisse 		*pfn = range->values[HMM_PFN_SPECIAL];
536992de9a8SJérôme Glisse 		return -EFAULT;
537992de9a8SJérôme Glisse 	}
538992de9a8SJérôme Glisse 
539391aab11SJérôme Glisse 	*pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
54053f5c3f4SJérôme Glisse 	return 0;
54153f5c3f4SJérôme Glisse 
54253f5c3f4SJérôme Glisse fault:
543992de9a8SJérôme Glisse 	if (hmm_vma_walk->pgmap) {
544992de9a8SJérôme Glisse 		put_dev_pagemap(hmm_vma_walk->pgmap);
545992de9a8SJérôme Glisse 		hmm_vma_walk->pgmap = NULL;
546992de9a8SJérôme Glisse 	}
54753f5c3f4SJérôme Glisse 	pte_unmap(ptep);
54853f5c3f4SJérôme Glisse 	/* Fault any virtual address we were asked to fault */
5492aee09d8SJérôme Glisse 	return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
55053f5c3f4SJérôme Glisse }
55153f5c3f4SJérôme Glisse 
552da4c3c73SJérôme Glisse static int hmm_vma_walk_pmd(pmd_t *pmdp,
553da4c3c73SJérôme Glisse 			    unsigned long start,
554da4c3c73SJérôme Glisse 			    unsigned long end,
555da4c3c73SJérôme Glisse 			    struct mm_walk *walk)
556da4c3c73SJérôme Glisse {
55774eee180SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
55874eee180SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
559ff05c0c6SJérôme Glisse 	uint64_t *pfns = range->pfns;
560da4c3c73SJérôme Glisse 	unsigned long addr = start, i;
561da4c3c73SJérôme Glisse 	pte_t *ptep;
562da4c3c73SJérôme Glisse 	pmd_t pmd;
563da4c3c73SJérôme Glisse 
564d08faca0SJérôme Glisse again:
565d08faca0SJérôme Glisse 	pmd = READ_ONCE(*pmdp);
566d08faca0SJérôme Glisse 	if (pmd_none(pmd))
567d08faca0SJérôme Glisse 		return hmm_vma_walk_hole(start, end, walk);
568d08faca0SJérôme Glisse 
569d08faca0SJérôme Glisse 	if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
570d08faca0SJérôme Glisse 		bool fault, write_fault;
571d08faca0SJérôme Glisse 		unsigned long npages;
572d08faca0SJérôme Glisse 		uint64_t *pfns;
573d08faca0SJérôme Glisse 
574d08faca0SJérôme Glisse 		i = (addr - range->start) >> PAGE_SHIFT;
575d08faca0SJérôme Glisse 		npages = (end - addr) >> PAGE_SHIFT;
576d08faca0SJérôme Glisse 		pfns = &range->pfns[i];
577d08faca0SJérôme Glisse 
578d08faca0SJérôme Glisse 		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
579d08faca0SJérôme Glisse 				     0, &fault, &write_fault);
580d08faca0SJérôme Glisse 		if (fault || write_fault) {
581d08faca0SJérôme Glisse 			hmm_vma_walk->last = addr;
582d2e8d551SRalph Campbell 			pmd_migration_entry_wait(walk->mm, pmdp);
58373231612SJérôme Glisse 			return -EBUSY;
584d08faca0SJérôme Glisse 		}
585d08faca0SJérôme Glisse 		return 0;
586d08faca0SJérôme Glisse 	} else if (!pmd_present(pmd))
587d08faca0SJérôme Glisse 		return hmm_pfns_bad(start, end, walk);
588d08faca0SJérôme Glisse 
589d08faca0SJérôme Glisse 	if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
590da4c3c73SJérôme Glisse 		/*
591d2e8d551SRalph Campbell 		 * No need to take pmd_lock here, even if some other thread
592da4c3c73SJérôme Glisse 		 * is splitting the huge pmd we will get that event through
593da4c3c73SJérôme Glisse 		 * mmu_notifier callback.
594da4c3c73SJérôme Glisse 		 *
595d2e8d551SRalph Campbell 		 * So just read pmd value and check again it's a transparent
596da4c3c73SJérôme Glisse 		 * huge or device mapping one and compute corresponding pfn
597da4c3c73SJérôme Glisse 		 * values.
598da4c3c73SJérôme Glisse 		 */
599da4c3c73SJérôme Glisse 		pmd = pmd_read_atomic(pmdp);
600da4c3c73SJérôme Glisse 		barrier();
601da4c3c73SJérôme Glisse 		if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
602da4c3c73SJérôme Glisse 			goto again;
603da4c3c73SJérôme Glisse 
604d08faca0SJérôme Glisse 		i = (addr - range->start) >> PAGE_SHIFT;
60553f5c3f4SJérôme Glisse 		return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
606da4c3c73SJérôme Glisse 	}
607da4c3c73SJérôme Glisse 
608d08faca0SJérôme Glisse 	/*
609d2e8d551SRalph Campbell 	 * We have handled all the valid cases above ie either none, migration,
610d08faca0SJérôme Glisse 	 * huge or transparent huge. At this point either it is a valid pmd
611d08faca0SJérôme Glisse 	 * entry pointing to pte directory or it is a bad pmd that will not
612d08faca0SJérôme Glisse 	 * recover.
613d08faca0SJérôme Glisse 	 */
614d08faca0SJérôme Glisse 	if (pmd_bad(pmd))
615da4c3c73SJérôme Glisse 		return hmm_pfns_bad(start, end, walk);
616da4c3c73SJérôme Glisse 
617da4c3c73SJérôme Glisse 	ptep = pte_offset_map(pmdp, addr);
618d08faca0SJérôme Glisse 	i = (addr - range->start) >> PAGE_SHIFT;
619da4c3c73SJérôme Glisse 	for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
62053f5c3f4SJérôme Glisse 		int r;
621da4c3c73SJérôme Glisse 
62253f5c3f4SJérôme Glisse 		r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
62353f5c3f4SJérôme Glisse 		if (r) {
62453f5c3f4SJérôme Glisse 			/* hmm_vma_handle_pte() did unmap pte directory */
62574eee180SJérôme Glisse 			hmm_vma_walk->last = addr;
62653f5c3f4SJérôme Glisse 			return r;
62774eee180SJérôme Glisse 		}
628da4c3c73SJérôme Glisse 	}
629992de9a8SJérôme Glisse 	if (hmm_vma_walk->pgmap) {
630992de9a8SJérôme Glisse 		/*
631992de9a8SJérôme Glisse 		 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
632992de9a8SJérôme Glisse 		 * so that we can leverage get_dev_pagemap() optimization which
633992de9a8SJérôme Glisse 		 * will not re-take a reference on a pgmap if we already have
634992de9a8SJérôme Glisse 		 * one.
635992de9a8SJérôme Glisse 		 */
636992de9a8SJérôme Glisse 		put_dev_pagemap(hmm_vma_walk->pgmap);
637992de9a8SJérôme Glisse 		hmm_vma_walk->pgmap = NULL;
638992de9a8SJérôme Glisse 	}
639da4c3c73SJérôme Glisse 	pte_unmap(ptep - 1);
640da4c3c73SJérôme Glisse 
64153f5c3f4SJérôme Glisse 	hmm_vma_walk->last = addr;
642da4c3c73SJérôme Glisse 	return 0;
643da4c3c73SJérôme Glisse }
644da4c3c73SJérôme Glisse 
645f0b3c45cSChristoph Hellwig #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
646f0b3c45cSChristoph Hellwig     defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
647f0b3c45cSChristoph Hellwig static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
648f0b3c45cSChristoph Hellwig {
649f0b3c45cSChristoph Hellwig 	if (!pud_present(pud))
650f0b3c45cSChristoph Hellwig 		return 0;
651f0b3c45cSChristoph Hellwig 	return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
652f0b3c45cSChristoph Hellwig 				range->flags[HMM_PFN_WRITE] :
653f0b3c45cSChristoph Hellwig 				range->flags[HMM_PFN_VALID];
654f0b3c45cSChristoph Hellwig }
655f0b3c45cSChristoph Hellwig 
656f0b3c45cSChristoph Hellwig static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
657992de9a8SJérôme Glisse 		struct mm_walk *walk)
658992de9a8SJérôme Glisse {
659992de9a8SJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
660992de9a8SJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
661992de9a8SJérôme Glisse 	unsigned long addr = start, next;
662992de9a8SJérôme Glisse 	pmd_t *pmdp;
663992de9a8SJérôme Glisse 	pud_t pud;
664992de9a8SJérôme Glisse 	int ret;
665992de9a8SJérôme Glisse 
666992de9a8SJérôme Glisse again:
667992de9a8SJérôme Glisse 	pud = READ_ONCE(*pudp);
668992de9a8SJérôme Glisse 	if (pud_none(pud))
669992de9a8SJérôme Glisse 		return hmm_vma_walk_hole(start, end, walk);
670992de9a8SJérôme Glisse 
671992de9a8SJérôme Glisse 	if (pud_huge(pud) && pud_devmap(pud)) {
672992de9a8SJérôme Glisse 		unsigned long i, npages, pfn;
673992de9a8SJérôme Glisse 		uint64_t *pfns, cpu_flags;
674992de9a8SJérôme Glisse 		bool fault, write_fault;
675992de9a8SJérôme Glisse 
676992de9a8SJérôme Glisse 		if (!pud_present(pud))
677992de9a8SJérôme Glisse 			return hmm_vma_walk_hole(start, end, walk);
678992de9a8SJérôme Glisse 
679992de9a8SJérôme Glisse 		i = (addr - range->start) >> PAGE_SHIFT;
680992de9a8SJérôme Glisse 		npages = (end - addr) >> PAGE_SHIFT;
681992de9a8SJérôme Glisse 		pfns = &range->pfns[i];
682992de9a8SJérôme Glisse 
683992de9a8SJérôme Glisse 		cpu_flags = pud_to_hmm_pfn_flags(range, pud);
684992de9a8SJérôme Glisse 		hmm_range_need_fault(hmm_vma_walk, pfns, npages,
685992de9a8SJérôme Glisse 				     cpu_flags, &fault, &write_fault);
686992de9a8SJérôme Glisse 		if (fault || write_fault)
687992de9a8SJérôme Glisse 			return hmm_vma_walk_hole_(addr, end, fault,
688992de9a8SJérôme Glisse 						write_fault, walk);
689992de9a8SJérôme Glisse 
690992de9a8SJérôme Glisse 		pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
691992de9a8SJérôme Glisse 		for (i = 0; i < npages; ++i, ++pfn) {
692992de9a8SJérôme Glisse 			hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
693992de9a8SJérôme Glisse 					      hmm_vma_walk->pgmap);
694992de9a8SJérôme Glisse 			if (unlikely(!hmm_vma_walk->pgmap))
695992de9a8SJérôme Glisse 				return -EBUSY;
696391aab11SJérôme Glisse 			pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
697391aab11SJérôme Glisse 				  cpu_flags;
698992de9a8SJérôme Glisse 		}
699992de9a8SJérôme Glisse 		if (hmm_vma_walk->pgmap) {
700992de9a8SJérôme Glisse 			put_dev_pagemap(hmm_vma_walk->pgmap);
701992de9a8SJérôme Glisse 			hmm_vma_walk->pgmap = NULL;
702992de9a8SJérôme Glisse 		}
703992de9a8SJérôme Glisse 		hmm_vma_walk->last = end;
704992de9a8SJérôme Glisse 		return 0;
705992de9a8SJérôme Glisse 	}
706992de9a8SJérôme Glisse 
707992de9a8SJérôme Glisse 	split_huge_pud(walk->vma, pudp, addr);
708992de9a8SJérôme Glisse 	if (pud_none(*pudp))
709992de9a8SJérôme Glisse 		goto again;
710992de9a8SJérôme Glisse 
711992de9a8SJérôme Glisse 	pmdp = pmd_offset(pudp, addr);
712992de9a8SJérôme Glisse 	do {
713992de9a8SJérôme Glisse 		next = pmd_addr_end(addr, end);
714992de9a8SJérôme Glisse 		ret = hmm_vma_walk_pmd(pmdp, addr, next, walk);
715992de9a8SJérôme Glisse 		if (ret)
716992de9a8SJérôme Glisse 			return ret;
717992de9a8SJérôme Glisse 	} while (pmdp++, addr = next, addr != end);
718992de9a8SJérôme Glisse 
719992de9a8SJérôme Glisse 	return 0;
720992de9a8SJérôme Glisse }
721f0b3c45cSChristoph Hellwig #else
722f0b3c45cSChristoph Hellwig #define hmm_vma_walk_pud	NULL
723f0b3c45cSChristoph Hellwig #endif
724992de9a8SJérôme Glisse 
725251bbe59SChristoph Hellwig #ifdef CONFIG_HUGETLB_PAGE
72663d5066fSJérôme Glisse static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
72763d5066fSJérôme Glisse 				      unsigned long start, unsigned long end,
72863d5066fSJérôme Glisse 				      struct mm_walk *walk)
72963d5066fSJérôme Glisse {
73005c23af4SChristoph Hellwig 	unsigned long addr = start, i, pfn;
73163d5066fSJérôme Glisse 	struct hmm_vma_walk *hmm_vma_walk = walk->private;
73263d5066fSJérôme Glisse 	struct hmm_range *range = hmm_vma_walk->range;
73363d5066fSJérôme Glisse 	struct vm_area_struct *vma = walk->vma;
73463d5066fSJérôme Glisse 	uint64_t orig_pfn, cpu_flags;
73563d5066fSJérôme Glisse 	bool fault, write_fault;
73663d5066fSJérôme Glisse 	spinlock_t *ptl;
73763d5066fSJérôme Glisse 	pte_t entry;
73863d5066fSJérôme Glisse 	int ret = 0;
73963d5066fSJérôme Glisse 
740d2e8d551SRalph Campbell 	ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
74163d5066fSJérôme Glisse 	entry = huge_ptep_get(pte);
74263d5066fSJérôme Glisse 
7437f08263dSChristoph Hellwig 	i = (start - range->start) >> PAGE_SHIFT;
74463d5066fSJérôme Glisse 	orig_pfn = range->pfns[i];
74563d5066fSJérôme Glisse 	range->pfns[i] = range->values[HMM_PFN_NONE];
74663d5066fSJérôme Glisse 	cpu_flags = pte_to_hmm_pfn_flags(range, entry);
74763d5066fSJérôme Glisse 	fault = write_fault = false;
74863d5066fSJérôme Glisse 	hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
74963d5066fSJérôme Glisse 			   &fault, &write_fault);
75063d5066fSJérôme Glisse 	if (fault || write_fault) {
75163d5066fSJérôme Glisse 		ret = -ENOENT;
75263d5066fSJérôme Glisse 		goto unlock;
75363d5066fSJérôme Glisse 	}
75463d5066fSJérôme Glisse 
75505c23af4SChristoph Hellwig 	pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
7567f08263dSChristoph Hellwig 	for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
757391aab11SJérôme Glisse 		range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
758391aab11SJérôme Glisse 				 cpu_flags;
75963d5066fSJérôme Glisse 	hmm_vma_walk->last = end;
76063d5066fSJérôme Glisse 
76163d5066fSJérôme Glisse unlock:
76263d5066fSJérôme Glisse 	spin_unlock(ptl);
76363d5066fSJérôme Glisse 
76463d5066fSJérôme Glisse 	if (ret == -ENOENT)
76563d5066fSJérôme Glisse 		return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
76663d5066fSJérôme Glisse 
76763d5066fSJérôme Glisse 	return ret;
76863d5066fSJérôme Glisse }
769251bbe59SChristoph Hellwig #else
770251bbe59SChristoph Hellwig #define hmm_vma_walk_hugetlb_entry NULL
771251bbe59SChristoph Hellwig #endif /* CONFIG_HUGETLB_PAGE */
77263d5066fSJérôme Glisse 
773f88a1e90SJérôme Glisse static void hmm_pfns_clear(struct hmm_range *range,
774f88a1e90SJérôme Glisse 			   uint64_t *pfns,
77533cd47dcSJérôme Glisse 			   unsigned long addr,
77633cd47dcSJérôme Glisse 			   unsigned long end)
77733cd47dcSJérôme Glisse {
77833cd47dcSJérôme Glisse 	for (; addr < end; addr += PAGE_SIZE, pfns++)
779f88a1e90SJérôme Glisse 		*pfns = range->values[HMM_PFN_NONE];
78033cd47dcSJérôme Glisse }
78133cd47dcSJérôme Glisse 
782da4c3c73SJérôme Glisse /*
783a3e0d41cSJérôme Glisse  * hmm_range_register() - start tracking change to CPU page table over a range
784a3e0d41cSJérôme Glisse  * @range: range
785a3e0d41cSJérôme Glisse  * @mm: the mm struct for the range of virtual address
786fac555acSChristoph Hellwig  *
787d2e8d551SRalph Campbell  * Return: 0 on success, -EFAULT if the address space is no longer valid
788a3e0d41cSJérôme Glisse  *
789a3e0d41cSJérôme Glisse  * Track updates to the CPU page table see include/linux/hmm.h
790a3e0d41cSJérôme Glisse  */
791fac555acSChristoph Hellwig int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror)
792a3e0d41cSJérôme Glisse {
793e36acfe6SJason Gunthorpe 	struct hmm *hmm = mirror->hmm;
7945a136b4aSJason Gunthorpe 	unsigned long flags;
79563d5066fSJérôme Glisse 
796a3e0d41cSJérôme Glisse 	range->valid = false;
797a3e0d41cSJérôme Glisse 	range->hmm = NULL;
798a3e0d41cSJérôme Glisse 
7997f08263dSChristoph Hellwig 	if ((range->start & (PAGE_SIZE - 1)) || (range->end & (PAGE_SIZE - 1)))
80063d5066fSJérôme Glisse 		return -EINVAL;
801fac555acSChristoph Hellwig 	if (range->start >= range->end)
802a3e0d41cSJérôme Glisse 		return -EINVAL;
803a3e0d41cSJérôme Glisse 
80447f24598SJason Gunthorpe 	/* Prevent hmm_release() from running while the range is valid */
805c7d8b782SJason Gunthorpe 	if (!mmget_not_zero(hmm->mmu_notifier.mm))
806a3e0d41cSJérôme Glisse 		return -EFAULT;
807a3e0d41cSJérôme Glisse 
808085ea250SRalph Campbell 	/* Initialize range to track CPU page table updates. */
8095a136b4aSJason Gunthorpe 	spin_lock_irqsave(&hmm->ranges_lock, flags);
810a3e0d41cSJérôme Glisse 
811085ea250SRalph Campbell 	range->hmm = hmm;
812157816f3SJason Gunthorpe 	list_add(&range->list, &hmm->ranges);
813a3e0d41cSJérôme Glisse 
814a3e0d41cSJérôme Glisse 	/*
815a3e0d41cSJérôme Glisse 	 * If there are any concurrent notifiers we have to wait for them for
816a3e0d41cSJérôme Glisse 	 * the range to be valid (see hmm_range_wait_until_valid()).
817a3e0d41cSJérôme Glisse 	 */
818085ea250SRalph Campbell 	if (!hmm->notifiers)
819a3e0d41cSJérôme Glisse 		range->valid = true;
8205a136b4aSJason Gunthorpe 	spin_unlock_irqrestore(&hmm->ranges_lock, flags);
821a3e0d41cSJérôme Glisse 
822a3e0d41cSJérôme Glisse 	return 0;
823a3e0d41cSJérôme Glisse }
824a3e0d41cSJérôme Glisse EXPORT_SYMBOL(hmm_range_register);
825a3e0d41cSJérôme Glisse 
826a3e0d41cSJérôme Glisse /*
827a3e0d41cSJérôme Glisse  * hmm_range_unregister() - stop tracking change to CPU page table over a range
828a3e0d41cSJérôme Glisse  * @range: range
829a3e0d41cSJérôme Glisse  *
830a3e0d41cSJérôme Glisse  * Range struct is used to track updates to the CPU page table after a call to
831a3e0d41cSJérôme Glisse  * hmm_range_register(). See include/linux/hmm.h for how to use it.
832a3e0d41cSJérôme Glisse  */
833a3e0d41cSJérôme Glisse void hmm_range_unregister(struct hmm_range *range)
834a3e0d41cSJérôme Glisse {
835085ea250SRalph Campbell 	struct hmm *hmm = range->hmm;
8365a136b4aSJason Gunthorpe 	unsigned long flags;
837a3e0d41cSJérôme Glisse 
8385a136b4aSJason Gunthorpe 	spin_lock_irqsave(&hmm->ranges_lock, flags);
83947f24598SJason Gunthorpe 	list_del_init(&range->list);
8405a136b4aSJason Gunthorpe 	spin_unlock_irqrestore(&hmm->ranges_lock, flags);
841a3e0d41cSJérôme Glisse 
842a3e0d41cSJérôme Glisse 	/* Drop reference taken by hmm_range_register() */
843c7d8b782SJason Gunthorpe 	mmput(hmm->mmu_notifier.mm);
8442dcc3eb8SJason Gunthorpe 
8452dcc3eb8SJason Gunthorpe 	/*
8462dcc3eb8SJason Gunthorpe 	 * The range is now invalid and the ref on the hmm is dropped, so
8472dcc3eb8SJason Gunthorpe 	 * poison the pointer.  Leave other fields in place, for the caller's
8482dcc3eb8SJason Gunthorpe 	 * use.
8492dcc3eb8SJason Gunthorpe 	 */
850a3e0d41cSJérôme Glisse 	range->valid = false;
8512dcc3eb8SJason Gunthorpe 	memset(&range->hmm, POISON_INUSE, sizeof(range->hmm));
852a3e0d41cSJérôme Glisse }
853a3e0d41cSJérôme Glisse EXPORT_SYMBOL(hmm_range_unregister);
854a3e0d41cSJérôme Glisse 
855*7b86ac33SChristoph Hellwig static const struct mm_walk_ops hmm_walk_ops = {
856*7b86ac33SChristoph Hellwig 	.pud_entry	= hmm_vma_walk_pud,
857*7b86ac33SChristoph Hellwig 	.pmd_entry	= hmm_vma_walk_pmd,
858*7b86ac33SChristoph Hellwig 	.pte_hole	= hmm_vma_walk_hole,
859*7b86ac33SChristoph Hellwig 	.hugetlb_entry	= hmm_vma_walk_hugetlb_entry,
860*7b86ac33SChristoph Hellwig };
861*7b86ac33SChristoph Hellwig 
8629a4903e4SChristoph Hellwig /**
8639a4903e4SChristoph Hellwig  * hmm_range_fault - try to fault some address in a virtual address range
86408232a45SJérôme Glisse  * @range:	range being faulted
8659a4903e4SChristoph Hellwig  * @flags:	HMM_FAULT_* flags
86673231612SJérôme Glisse  *
8679a4903e4SChristoph Hellwig  * Return: the number of valid pages in range->pfns[] (from range start
8689a4903e4SChristoph Hellwig  * address), which may be zero.  On error one of the following status codes
8699a4903e4SChristoph Hellwig  * can be returned:
8709a4903e4SChristoph Hellwig  *
8719a4903e4SChristoph Hellwig  * -EINVAL:	Invalid arguments or mm or virtual address is in an invalid vma
8729a4903e4SChristoph Hellwig  *		(e.g., device file vma).
87373231612SJérôme Glisse  * -ENOMEM:	Out of memory.
8749a4903e4SChristoph Hellwig  * -EPERM:	Invalid permission (e.g., asking for write and range is read
8759a4903e4SChristoph Hellwig  *		only).
8769a4903e4SChristoph Hellwig  * -EAGAIN:	A page fault needs to be retried and mmap_sem was dropped.
8779a4903e4SChristoph Hellwig  * -EBUSY:	The range has been invalidated and the caller needs to wait for
8789a4903e4SChristoph Hellwig  *		the invalidation to finish.
8799a4903e4SChristoph Hellwig  * -EFAULT:	Invalid (i.e., either no valid vma or it is illegal to access
8809a4903e4SChristoph Hellwig  *		that range) number of valid pages in range->pfns[] (from
88173231612SJérôme Glisse  *              range start address).
88274eee180SJérôme Glisse  *
88374eee180SJérôme Glisse  * This is similar to a regular CPU page fault except that it will not trigger
88473231612SJérôme Glisse  * any memory migration if the memory being faulted is not accessible by CPUs
88573231612SJérôme Glisse  * and caller does not ask for migration.
88674eee180SJérôme Glisse  *
887ff05c0c6SJérôme Glisse  * On error, for one virtual address in the range, the function will mark the
888ff05c0c6SJérôme Glisse  * corresponding HMM pfn entry with an error flag.
88974eee180SJérôme Glisse  */
8909a4903e4SChristoph Hellwig long hmm_range_fault(struct hmm_range *range, unsigned int flags)
89174eee180SJérôme Glisse {
89263d5066fSJérôme Glisse 	const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
893a3e0d41cSJérôme Glisse 	unsigned long start = range->start, end;
89474eee180SJérôme Glisse 	struct hmm_vma_walk hmm_vma_walk;
895a3e0d41cSJérôme Glisse 	struct hmm *hmm = range->hmm;
896a3e0d41cSJérôme Glisse 	struct vm_area_struct *vma;
89774eee180SJérôme Glisse 	int ret;
89874eee180SJérôme Glisse 
899c7d8b782SJason Gunthorpe 	lockdep_assert_held(&hmm->mmu_notifier.mm->mmap_sem);
900a3e0d41cSJérôme Glisse 
901a3e0d41cSJérôme Glisse 	do {
902a3e0d41cSJérôme Glisse 		/* If range is no longer valid force retry. */
9032bcbeaefSChristoph Hellwig 		if (!range->valid)
9042bcbeaefSChristoph Hellwig 			return -EBUSY;
90574eee180SJérôme Glisse 
906c7d8b782SJason Gunthorpe 		vma = find_vma(hmm->mmu_notifier.mm, start);
90763d5066fSJérôme Glisse 		if (vma == NULL || (vma->vm_flags & device_vma))
908a3e0d41cSJérôme Glisse 			return -EFAULT;
909a3e0d41cSJérôme Glisse 
91086586a41SJérôme Glisse 		if (!(vma->vm_flags & VM_READ)) {
91186586a41SJérôme Glisse 			/*
912a3e0d41cSJérôme Glisse 			 * If vma do not allow read access, then assume that it
913a3e0d41cSJérôme Glisse 			 * does not allow write access, either. HMM does not
914a3e0d41cSJérôme Glisse 			 * support architecture that allow write without read.
91586586a41SJérôme Glisse 			 */
916a3e0d41cSJérôme Glisse 			hmm_pfns_clear(range, range->pfns,
917a3e0d41cSJérôme Glisse 				range->start, range->end);
91886586a41SJérôme Glisse 			return -EPERM;
91986586a41SJérôme Glisse 		}
92074eee180SJérôme Glisse 
921992de9a8SJérôme Glisse 		hmm_vma_walk.pgmap = NULL;
922a3e0d41cSJérôme Glisse 		hmm_vma_walk.last = start;
9239a4903e4SChristoph Hellwig 		hmm_vma_walk.flags = flags;
92474eee180SJérôme Glisse 		hmm_vma_walk.range = range;
925a3e0d41cSJérôme Glisse 		end = min(range->end, vma->vm_end);
92674eee180SJérôme Glisse 
927*7b86ac33SChristoph Hellwig 		walk_page_range(vma->vm_mm, start, end, &hmm_walk_ops,
928*7b86ac33SChristoph Hellwig 				&hmm_vma_walk);
92974eee180SJérôme Glisse 
93074eee180SJérôme Glisse 		do {
931*7b86ac33SChristoph Hellwig 			ret = walk_page_range(vma->vm_mm, start, end,
932*7b86ac33SChristoph Hellwig 					&hmm_walk_ops, &hmm_vma_walk);
93374eee180SJérôme Glisse 			start = hmm_vma_walk.last;
934a3e0d41cSJérôme Glisse 
93573231612SJérôme Glisse 			/* Keep trying while the range is valid. */
93673231612SJérôme Glisse 		} while (ret == -EBUSY && range->valid);
93774eee180SJérôme Glisse 
93874eee180SJérôme Glisse 		if (ret) {
93974eee180SJérôme Glisse 			unsigned long i;
94074eee180SJérôme Glisse 
94174eee180SJérôme Glisse 			i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
942a3e0d41cSJérôme Glisse 			hmm_pfns_clear(range, &range->pfns[i],
943a3e0d41cSJérôme Glisse 				hmm_vma_walk.last, range->end);
94473231612SJérôme Glisse 			return ret;
94574eee180SJérôme Glisse 		}
946a3e0d41cSJérôme Glisse 		start = end;
947a3e0d41cSJérôme Glisse 
948a3e0d41cSJérôme Glisse 	} while (start < range->end);
949704f3f2cSJérôme Glisse 
95073231612SJérôme Glisse 	return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
95174eee180SJérôme Glisse }
95273231612SJérôme Glisse EXPORT_SYMBOL(hmm_range_fault);
95355c0ece8SJérôme Glisse 
95455c0ece8SJérôme Glisse /**
9559a4903e4SChristoph Hellwig  * hmm_range_dma_map - hmm_range_fault() and dma map page all in one.
95655c0ece8SJérôme Glisse  * @range:	range being faulted
9579a4903e4SChristoph Hellwig  * @device:	device to map page to
9589a4903e4SChristoph Hellwig  * @daddrs:	array of dma addresses for the mapped pages
9599a4903e4SChristoph Hellwig  * @flags:	HMM_FAULT_*
96055c0ece8SJérôme Glisse  *
9619a4903e4SChristoph Hellwig  * Return: the number of pages mapped on success (including zero), or any
9629a4903e4SChristoph Hellwig  * status return from hmm_range_fault() otherwise.
96355c0ece8SJérôme Glisse  */
9649a4903e4SChristoph Hellwig long hmm_range_dma_map(struct hmm_range *range, struct device *device,
9659a4903e4SChristoph Hellwig 		dma_addr_t *daddrs, unsigned int flags)
96655c0ece8SJérôme Glisse {
96755c0ece8SJérôme Glisse 	unsigned long i, npages, mapped;
96855c0ece8SJérôme Glisse 	long ret;
96955c0ece8SJérôme Glisse 
9709a4903e4SChristoph Hellwig 	ret = hmm_range_fault(range, flags);
97155c0ece8SJérôme Glisse 	if (ret <= 0)
97255c0ece8SJérôme Glisse 		return ret ? ret : -EBUSY;
97355c0ece8SJérôme Glisse 
97455c0ece8SJérôme Glisse 	npages = (range->end - range->start) >> PAGE_SHIFT;
97555c0ece8SJérôme Glisse 	for (i = 0, mapped = 0; i < npages; ++i) {
97655c0ece8SJérôme Glisse 		enum dma_data_direction dir = DMA_TO_DEVICE;
97755c0ece8SJérôme Glisse 		struct page *page;
97855c0ece8SJérôme Glisse 
97955c0ece8SJérôme Glisse 		/*
98055c0ece8SJérôme Glisse 		 * FIXME need to update DMA API to provide invalid DMA address
98155c0ece8SJérôme Glisse 		 * value instead of a function to test dma address value. This
98255c0ece8SJérôme Glisse 		 * would remove lot of dumb code duplicated accross many arch.
98355c0ece8SJérôme Glisse 		 *
98455c0ece8SJérôme Glisse 		 * For now setting it to 0 here is good enough as the pfns[]
98555c0ece8SJérôme Glisse 		 * value is what is use to check what is valid and what isn't.
98655c0ece8SJérôme Glisse 		 */
98755c0ece8SJérôme Glisse 		daddrs[i] = 0;
98855c0ece8SJérôme Glisse 
989391aab11SJérôme Glisse 		page = hmm_device_entry_to_page(range, range->pfns[i]);
99055c0ece8SJérôme Glisse 		if (page == NULL)
99155c0ece8SJérôme Glisse 			continue;
99255c0ece8SJérôme Glisse 
99355c0ece8SJérôme Glisse 		/* Check if range is being invalidated */
99455c0ece8SJérôme Glisse 		if (!range->valid) {
99555c0ece8SJérôme Glisse 			ret = -EBUSY;
99655c0ece8SJérôme Glisse 			goto unmap;
99755c0ece8SJérôme Glisse 		}
99855c0ece8SJérôme Glisse 
99955c0ece8SJérôme Glisse 		/* If it is read and write than map bi-directional. */
100055c0ece8SJérôme Glisse 		if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
100155c0ece8SJérôme Glisse 			dir = DMA_BIDIRECTIONAL;
100255c0ece8SJérôme Glisse 
100355c0ece8SJérôme Glisse 		daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
100455c0ece8SJérôme Glisse 		if (dma_mapping_error(device, daddrs[i])) {
100555c0ece8SJérôme Glisse 			ret = -EFAULT;
100655c0ece8SJérôme Glisse 			goto unmap;
100755c0ece8SJérôme Glisse 		}
100855c0ece8SJérôme Glisse 
100955c0ece8SJérôme Glisse 		mapped++;
101055c0ece8SJérôme Glisse 	}
101155c0ece8SJérôme Glisse 
101255c0ece8SJérôme Glisse 	return mapped;
101355c0ece8SJérôme Glisse 
101455c0ece8SJérôme Glisse unmap:
101555c0ece8SJérôme Glisse 	for (npages = i, i = 0; (i < npages) && mapped; ++i) {
101655c0ece8SJérôme Glisse 		enum dma_data_direction dir = DMA_TO_DEVICE;
101755c0ece8SJérôme Glisse 		struct page *page;
101855c0ece8SJérôme Glisse 
1019391aab11SJérôme Glisse 		page = hmm_device_entry_to_page(range, range->pfns[i]);
102055c0ece8SJérôme Glisse 		if (page == NULL)
102155c0ece8SJérôme Glisse 			continue;
102255c0ece8SJérôme Glisse 
102355c0ece8SJérôme Glisse 		if (dma_mapping_error(device, daddrs[i]))
102455c0ece8SJérôme Glisse 			continue;
102555c0ece8SJérôme Glisse 
102655c0ece8SJérôme Glisse 		/* If it is read and write than map bi-directional. */
102755c0ece8SJérôme Glisse 		if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
102855c0ece8SJérôme Glisse 			dir = DMA_BIDIRECTIONAL;
102955c0ece8SJérôme Glisse 
103055c0ece8SJérôme Glisse 		dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
103155c0ece8SJérôme Glisse 		mapped--;
103255c0ece8SJérôme Glisse 	}
103355c0ece8SJérôme Glisse 
103455c0ece8SJérôme Glisse 	return ret;
103555c0ece8SJérôme Glisse }
103655c0ece8SJérôme Glisse EXPORT_SYMBOL(hmm_range_dma_map);
103755c0ece8SJérôme Glisse 
103855c0ece8SJérôme Glisse /**
103955c0ece8SJérôme Glisse  * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
104055c0ece8SJérôme Glisse  * @range: range being unmapped
104155c0ece8SJérôme Glisse  * @device: device against which dma map was done
104255c0ece8SJérôme Glisse  * @daddrs: dma address of mapped pages
104355c0ece8SJérôme Glisse  * @dirty: dirty page if it had the write flag set
1044085ea250SRalph Campbell  * Return: number of page unmapped on success, -EINVAL otherwise
104555c0ece8SJérôme Glisse  *
104655c0ece8SJérôme Glisse  * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
104755c0ece8SJérôme Glisse  * to the sync_cpu_device_pagetables() callback so that it is safe here to
104855c0ece8SJérôme Glisse  * call set_page_dirty(). Caller must also take appropriate locks to avoid
104955c0ece8SJérôme Glisse  * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
105055c0ece8SJérôme Glisse  */
105155c0ece8SJérôme Glisse long hmm_range_dma_unmap(struct hmm_range *range,
105255c0ece8SJérôme Glisse 			 struct device *device,
105355c0ece8SJérôme Glisse 			 dma_addr_t *daddrs,
105455c0ece8SJérôme Glisse 			 bool dirty)
105555c0ece8SJérôme Glisse {
105655c0ece8SJérôme Glisse 	unsigned long i, npages;
105755c0ece8SJérôme Glisse 	long cpages = 0;
105855c0ece8SJérôme Glisse 
105955c0ece8SJérôme Glisse 	/* Sanity check. */
106055c0ece8SJérôme Glisse 	if (range->end <= range->start)
106155c0ece8SJérôme Glisse 		return -EINVAL;
106255c0ece8SJérôme Glisse 	if (!daddrs)
106355c0ece8SJérôme Glisse 		return -EINVAL;
106455c0ece8SJérôme Glisse 	if (!range->pfns)
106555c0ece8SJérôme Glisse 		return -EINVAL;
106655c0ece8SJérôme Glisse 
106755c0ece8SJérôme Glisse 	npages = (range->end - range->start) >> PAGE_SHIFT;
106855c0ece8SJérôme Glisse 	for (i = 0; i < npages; ++i) {
106955c0ece8SJérôme Glisse 		enum dma_data_direction dir = DMA_TO_DEVICE;
107055c0ece8SJérôme Glisse 		struct page *page;
107155c0ece8SJérôme Glisse 
1072391aab11SJérôme Glisse 		page = hmm_device_entry_to_page(range, range->pfns[i]);
107355c0ece8SJérôme Glisse 		if (page == NULL)
107455c0ece8SJérôme Glisse 			continue;
107555c0ece8SJérôme Glisse 
107655c0ece8SJérôme Glisse 		/* If it is read and write than map bi-directional. */
107755c0ece8SJérôme Glisse 		if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
107855c0ece8SJérôme Glisse 			dir = DMA_BIDIRECTIONAL;
107955c0ece8SJérôme Glisse 
108055c0ece8SJérôme Glisse 			/*
108155c0ece8SJérôme Glisse 			 * See comments in function description on why it is
108255c0ece8SJérôme Glisse 			 * safe here to call set_page_dirty()
108355c0ece8SJérôme Glisse 			 */
108455c0ece8SJérôme Glisse 			if (dirty)
108555c0ece8SJérôme Glisse 				set_page_dirty(page);
108655c0ece8SJérôme Glisse 		}
108755c0ece8SJérôme Glisse 
108855c0ece8SJérôme Glisse 		/* Unmap and clear pfns/dma address */
108955c0ece8SJérôme Glisse 		dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
109055c0ece8SJérôme Glisse 		range->pfns[i] = range->values[HMM_PFN_NONE];
109155c0ece8SJérôme Glisse 		/* FIXME see comments in hmm_vma_dma_map() */
109255c0ece8SJérôme Glisse 		daddrs[i] = 0;
109355c0ece8SJérôme Glisse 		cpages++;
109455c0ece8SJérôme Glisse 	}
109555c0ece8SJérôme Glisse 
109655c0ece8SJérôme Glisse 	return cpages;
109755c0ece8SJérôme Glisse }
109855c0ece8SJérôme Glisse EXPORT_SYMBOL(hmm_range_dma_unmap);
1099