1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2133ff0eaSJérôme Glisse /* 3133ff0eaSJérôme Glisse * Copyright 2013 Red Hat Inc. 4133ff0eaSJérôme Glisse * 5f813f219SJérôme Glisse * Authors: Jérôme Glisse <jglisse@redhat.com> 6133ff0eaSJérôme Glisse */ 7133ff0eaSJérôme Glisse /* 8133ff0eaSJérôme Glisse * Refer to include/linux/hmm.h for information about heterogeneous memory 9133ff0eaSJérôme Glisse * management or HMM for short. 10133ff0eaSJérôme Glisse */ 11133ff0eaSJérôme Glisse #include <linux/mm.h> 12133ff0eaSJérôme Glisse #include <linux/hmm.h> 13858b54daSJérôme Glisse #include <linux/init.h> 14da4c3c73SJérôme Glisse #include <linux/rmap.h> 15da4c3c73SJérôme Glisse #include <linux/swap.h> 16133ff0eaSJérôme Glisse #include <linux/slab.h> 17133ff0eaSJérôme Glisse #include <linux/sched.h> 184ef589dcSJérôme Glisse #include <linux/mmzone.h> 194ef589dcSJérôme Glisse #include <linux/pagemap.h> 20da4c3c73SJérôme Glisse #include <linux/swapops.h> 21da4c3c73SJérôme Glisse #include <linux/hugetlb.h> 224ef589dcSJérôme Glisse #include <linux/memremap.h> 23c8a53b2dSJason Gunthorpe #include <linux/sched/mm.h> 247b2d55d2SJérôme Glisse #include <linux/jump_label.h> 2555c0ece8SJérôme Glisse #include <linux/dma-mapping.h> 26c0b12405SJérôme Glisse #include <linux/mmu_notifier.h> 274ef589dcSJérôme Glisse #include <linux/memory_hotplug.h> 284ef589dcSJérôme Glisse 29c7d8b782SJason Gunthorpe static struct mmu_notifier *hmm_alloc_notifier(struct mm_struct *mm) 30704f3f2cSJérôme Glisse { 318a9320b7SJason Gunthorpe struct hmm *hmm; 32133ff0eaSJérôme Glisse 33c7d8b782SJason Gunthorpe hmm = kzalloc(sizeof(*hmm), GFP_KERNEL); 34c0b12405SJérôme Glisse if (!hmm) 35c7d8b782SJason Gunthorpe return ERR_PTR(-ENOMEM); 36c7d8b782SJason Gunthorpe 37a3e0d41cSJérôme Glisse init_waitqueue_head(&hmm->wq); 38c0b12405SJérôme Glisse INIT_LIST_HEAD(&hmm->mirrors); 39c0b12405SJérôme Glisse init_rwsem(&hmm->mirrors_sem); 40da4c3c73SJérôme Glisse INIT_LIST_HEAD(&hmm->ranges); 415a136b4aSJason Gunthorpe spin_lock_init(&hmm->ranges_lock); 42a3e0d41cSJérôme Glisse hmm->notifiers = 0; 43c7d8b782SJason Gunthorpe return &hmm->mmu_notifier; 44c7d8b782SJason Gunthorpe } 45c0b12405SJérôme Glisse 46c7d8b782SJason Gunthorpe static void hmm_free_notifier(struct mmu_notifier *mn) 47c7d8b782SJason Gunthorpe { 48c7d8b782SJason Gunthorpe struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 49c7d8b782SJason Gunthorpe 50c7d8b782SJason Gunthorpe WARN_ON(!list_empty(&hmm->ranges)); 51c7d8b782SJason Gunthorpe WARN_ON(!list_empty(&hmm->mirrors)); 5286a2d598SRalph Campbell kfree(hmm); 53704f3f2cSJérôme Glisse } 54704f3f2cSJérôme Glisse 55a3e0d41cSJérôme Glisse static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) 56c0b12405SJérôme Glisse { 576d7c3cdeSJason Gunthorpe struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 58c0b12405SJérôme Glisse struct hmm_mirror *mirror; 59da4c3c73SJérôme Glisse 60e1401513SRalph Campbell /* 6147f24598SJason Gunthorpe * Since hmm_range_register() holds the mmget() lock hmm_release() is 6247f24598SJason Gunthorpe * prevented as long as a range exists. 63e1401513SRalph Campbell */ 6447f24598SJason Gunthorpe WARN_ON(!list_empty_careful(&hmm->ranges)); 65704f3f2cSJérôme Glisse 66a3e0d41cSJérôme Glisse down_read(&hmm->mirrors_sem); 67a3e0d41cSJérôme Glisse list_for_each_entry(mirror, &hmm->mirrors, list) { 68e1401513SRalph Campbell /* 6914331726SJason Gunthorpe * Note: The driver is not allowed to trigger 7014331726SJason Gunthorpe * hmm_mirror_unregister() from this thread. 71e1401513SRalph Campbell */ 7214331726SJason Gunthorpe if (mirror->ops->release) 73e1401513SRalph Campbell mirror->ops->release(mirror); 74a3e0d41cSJérôme Glisse } 75a3e0d41cSJérôme Glisse up_read(&hmm->mirrors_sem); 76c0b12405SJérôme Glisse } 77c0b12405SJérôme Glisse 785a136b4aSJason Gunthorpe static void notifiers_decrement(struct hmm *hmm) 79c0b12405SJérôme Glisse { 805a136b4aSJason Gunthorpe unsigned long flags; 81c0b12405SJérôme Glisse 825a136b4aSJason Gunthorpe spin_lock_irqsave(&hmm->ranges_lock, flags); 83a3e0d41cSJérôme Glisse hmm->notifiers--; 84a3e0d41cSJérôme Glisse if (!hmm->notifiers) { 85a3e0d41cSJérôme Glisse struct hmm_range *range; 86a3e0d41cSJérôme Glisse 87a3e0d41cSJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 88a3e0d41cSJérôme Glisse if (range->valid) 89a3e0d41cSJérôme Glisse continue; 90a3e0d41cSJérôme Glisse range->valid = true; 91a3e0d41cSJérôme Glisse } 92a3e0d41cSJérôme Glisse wake_up_all(&hmm->wq); 93a3e0d41cSJérôme Glisse } 945a136b4aSJason Gunthorpe spin_unlock_irqrestore(&hmm->ranges_lock, flags); 955a136b4aSJason Gunthorpe } 96a3e0d41cSJérôme Glisse 97133ff0eaSJérôme Glisse static int hmm_invalidate_range_start(struct mmu_notifier *mn, 98133ff0eaSJérôme Glisse const struct mmu_notifier_range *nrange) 99133ff0eaSJérôme Glisse { 1006d7c3cdeSJason Gunthorpe struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 101133ff0eaSJérôme Glisse struct hmm_mirror *mirror; 102133ff0eaSJérôme Glisse struct hmm_range *range; 1035a136b4aSJason Gunthorpe unsigned long flags; 104133ff0eaSJérôme Glisse int ret = 0; 105133ff0eaSJérôme Glisse 1065a136b4aSJason Gunthorpe spin_lock_irqsave(&hmm->ranges_lock, flags); 107133ff0eaSJérôme Glisse hmm->notifiers++; 108133ff0eaSJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 1091f961807SRalph Campbell if (nrange->end < range->start || nrange->start >= range->end) 110133ff0eaSJérôme Glisse continue; 111133ff0eaSJérôme Glisse 112133ff0eaSJérôme Glisse range->valid = false; 113133ff0eaSJérôme Glisse } 1145a136b4aSJason Gunthorpe spin_unlock_irqrestore(&hmm->ranges_lock, flags); 115c0b12405SJérôme Glisse 116c0b12405SJérôme Glisse if (mmu_notifier_range_blockable(nrange)) 117c0b12405SJérôme Glisse down_read(&hmm->mirrors_sem); 118c0b12405SJérôme Glisse else if (!down_read_trylock(&hmm->mirrors_sem)) { 119c0b12405SJérôme Glisse ret = -EAGAIN; 120c0b12405SJérôme Glisse goto out; 121c0b12405SJérôme Glisse } 122c0b12405SJérôme Glisse 1235a136b4aSJason Gunthorpe list_for_each_entry(mirror, &hmm->mirrors, list) { 1245a136b4aSJason Gunthorpe int rc; 1255a136b4aSJason Gunthorpe 1261f961807SRalph Campbell rc = mirror->ops->sync_cpu_device_pagetables(mirror, nrange); 1275a136b4aSJason Gunthorpe if (rc) { 1281f961807SRalph Campbell if (WARN_ON(mmu_notifier_range_blockable(nrange) || 1291f961807SRalph Campbell rc != -EAGAIN)) 1305a136b4aSJason Gunthorpe continue; 1315a136b4aSJason Gunthorpe ret = -EAGAIN; 132085ea250SRalph Campbell break; 133c0b12405SJérôme Glisse } 1345a136b4aSJason Gunthorpe } 135c0b12405SJérôme Glisse up_read(&hmm->mirrors_sem); 136c0b12405SJérôme Glisse 137c0b12405SJérôme Glisse out: 1385a136b4aSJason Gunthorpe if (ret) 1395a136b4aSJason Gunthorpe notifiers_decrement(hmm); 140c0b12405SJérôme Glisse return ret; 141c0b12405SJérôme Glisse } 142c0b12405SJérôme Glisse 143c0b12405SJérôme Glisse static void hmm_invalidate_range_end(struct mmu_notifier *mn, 144c0b12405SJérôme Glisse const struct mmu_notifier_range *nrange) 145c0b12405SJérôme Glisse { 1466d7c3cdeSJason Gunthorpe struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 147c0b12405SJérôme Glisse 1485a136b4aSJason Gunthorpe notifiers_decrement(hmm); 149c0b12405SJérôme Glisse } 150c0b12405SJérôme Glisse 151c0b12405SJérôme Glisse static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { 152e1401513SRalph Campbell .release = hmm_release, 153c0b12405SJérôme Glisse .invalidate_range_start = hmm_invalidate_range_start, 154c0b12405SJérôme Glisse .invalidate_range_end = hmm_invalidate_range_end, 155c7d8b782SJason Gunthorpe .alloc_notifier = hmm_alloc_notifier, 156c7d8b782SJason Gunthorpe .free_notifier = hmm_free_notifier, 157c0b12405SJérôme Glisse }; 158c0b12405SJérôme Glisse 159c0b12405SJérôme Glisse /* 160c0b12405SJérôme Glisse * hmm_mirror_register() - register a mirror against an mm 161c0b12405SJérôme Glisse * 162c0b12405SJérôme Glisse * @mirror: new mirror struct to register 163c0b12405SJérôme Glisse * @mm: mm to register against 164085ea250SRalph Campbell * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments 165c0b12405SJérôme Glisse * 166c0b12405SJérôme Glisse * To start mirroring a process address space, the device driver must register 167c0b12405SJérôme Glisse * an HMM mirror struct. 168c7d8b782SJason Gunthorpe * 169c7d8b782SJason Gunthorpe * The caller cannot unregister the hmm_mirror while any ranges are 170c7d8b782SJason Gunthorpe * registered. 171c7d8b782SJason Gunthorpe * 172c7d8b782SJason Gunthorpe * Callers using this function must put a call to mmu_notifier_synchronize() 173c7d8b782SJason Gunthorpe * in their module exit functions. 174c0b12405SJérôme Glisse */ 175c0b12405SJérôme Glisse int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) 176c0b12405SJérôme Glisse { 177c7d8b782SJason Gunthorpe struct mmu_notifier *mn; 178c7d8b782SJason Gunthorpe 179fec88ab0SLinus Torvalds lockdep_assert_held_write(&mm->mmap_sem); 1808a1a0cd0SJason Gunthorpe 181c0b12405SJérôme Glisse /* Sanity check */ 182c0b12405SJérôme Glisse if (!mm || !mirror || !mirror->ops) 183c0b12405SJérôme Glisse return -EINVAL; 184c0b12405SJérôme Glisse 185c7d8b782SJason Gunthorpe mn = mmu_notifier_get_locked(&hmm_mmu_notifier_ops, mm); 186c7d8b782SJason Gunthorpe if (IS_ERR(mn)) 187c7d8b782SJason Gunthorpe return PTR_ERR(mn); 188c7d8b782SJason Gunthorpe mirror->hmm = container_of(mn, struct hmm, mmu_notifier); 189c0b12405SJérôme Glisse 190c0b12405SJérôme Glisse down_write(&mirror->hmm->mirrors_sem); 191c0b12405SJérôme Glisse list_add(&mirror->list, &mirror->hmm->mirrors); 192c0b12405SJérôme Glisse up_write(&mirror->hmm->mirrors_sem); 193c0b12405SJérôme Glisse 194c0b12405SJérôme Glisse return 0; 195c0b12405SJérôme Glisse } 196c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_register); 197c0b12405SJérôme Glisse 198c0b12405SJérôme Glisse /* 199c0b12405SJérôme Glisse * hmm_mirror_unregister() - unregister a mirror 200c0b12405SJérôme Glisse * 201085ea250SRalph Campbell * @mirror: mirror struct to unregister 202c0b12405SJérôme Glisse * 203c0b12405SJérôme Glisse * Stop mirroring a process address space, and cleanup. 204c0b12405SJérôme Glisse */ 205c0b12405SJérôme Glisse void hmm_mirror_unregister(struct hmm_mirror *mirror) 206c0b12405SJérôme Glisse { 207187229c2SJason Gunthorpe struct hmm *hmm = mirror->hmm; 208c01cbba2SJérôme Glisse 209c0b12405SJérôme Glisse down_write(&hmm->mirrors_sem); 21014331726SJason Gunthorpe list_del(&mirror->list); 211c0b12405SJérôme Glisse up_write(&hmm->mirrors_sem); 212c7d8b782SJason Gunthorpe mmu_notifier_put(&hmm->mmu_notifier); 213c0b12405SJérôme Glisse } 214c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_unregister); 215da4c3c73SJérôme Glisse 21674eee180SJérôme Glisse struct hmm_vma_walk { 21774eee180SJérôme Glisse struct hmm_range *range; 218992de9a8SJérôme Glisse struct dev_pagemap *pgmap; 21974eee180SJérôme Glisse unsigned long last; 2209a4903e4SChristoph Hellwig unsigned int flags; 22174eee180SJérôme Glisse }; 22274eee180SJérôme Glisse 2232aee09d8SJérôme Glisse static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, 2242aee09d8SJérôme Glisse bool write_fault, uint64_t *pfn) 22574eee180SJérôme Glisse { 2269b1ae605SKuehling, Felix unsigned int flags = FAULT_FLAG_REMOTE; 22774eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 228f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 22974eee180SJérôme Glisse struct vm_area_struct *vma = walk->vma; 23050a7ca3cSSouptick Joarder vm_fault_t ret; 23174eee180SJérôme Glisse 2329a4903e4SChristoph Hellwig if (hmm_vma_walk->flags & HMM_FAULT_ALLOW_RETRY) 2339a4903e4SChristoph Hellwig flags |= FAULT_FLAG_ALLOW_RETRY; 2349a4903e4SChristoph Hellwig if (write_fault) 2359a4903e4SChristoph Hellwig flags |= FAULT_FLAG_WRITE; 2369a4903e4SChristoph Hellwig 23750a7ca3cSSouptick Joarder ret = handle_mm_fault(vma, addr, flags); 238e709acccSJason Gunthorpe if (ret & VM_FAULT_RETRY) { 239e709acccSJason Gunthorpe /* Note, handle_mm_fault did up_read(&mm->mmap_sem)) */ 24073231612SJérôme Glisse return -EAGAIN; 241e709acccSJason Gunthorpe } 24250a7ca3cSSouptick Joarder if (ret & VM_FAULT_ERROR) { 243f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_ERROR]; 24474eee180SJérôme Glisse return -EFAULT; 24574eee180SJérôme Glisse } 24674eee180SJérôme Glisse 24773231612SJérôme Glisse return -EBUSY; 24874eee180SJérôme Glisse } 24974eee180SJérôme Glisse 250da4c3c73SJérôme Glisse static int hmm_pfns_bad(unsigned long addr, 251da4c3c73SJérôme Glisse unsigned long end, 252da4c3c73SJérôme Glisse struct mm_walk *walk) 253da4c3c73SJérôme Glisse { 254c719547fSJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 255c719547fSJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 256ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 257da4c3c73SJérôme Glisse unsigned long i; 258da4c3c73SJérôme Glisse 259da4c3c73SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 260da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, i++) 261f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_ERROR]; 262da4c3c73SJérôme Glisse 263da4c3c73SJérôme Glisse return 0; 264da4c3c73SJérôme Glisse } 265da4c3c73SJérôme Glisse 2665504ed29SJérôme Glisse /* 267d2e8d551SRalph Campbell * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s) 268d2e8d551SRalph Campbell * @addr: range virtual start address (inclusive) 2695504ed29SJérôme Glisse * @end: range virtual end address (exclusive) 2702aee09d8SJérôme Glisse * @fault: should we fault or not ? 2712aee09d8SJérôme Glisse * @write_fault: write fault ? 2725504ed29SJérôme Glisse * @walk: mm_walk structure 273085ea250SRalph Campbell * Return: 0 on success, -EBUSY after page fault, or page fault error 2745504ed29SJérôme Glisse * 2755504ed29SJérôme Glisse * This function will be called whenever pmd_none() or pte_none() returns true, 2765504ed29SJérôme Glisse * or whenever there is no page directory covering the virtual address range. 2775504ed29SJérôme Glisse */ 2782aee09d8SJérôme Glisse static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, 2792aee09d8SJérôme Glisse bool fault, bool write_fault, 280da4c3c73SJérôme Glisse struct mm_walk *walk) 281da4c3c73SJérôme Glisse { 28274eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 28374eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 284ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 2857f08263dSChristoph Hellwig unsigned long i; 286da4c3c73SJérôme Glisse 28774eee180SJérôme Glisse hmm_vma_walk->last = addr; 2887f08263dSChristoph Hellwig i = (addr - range->start) >> PAGE_SHIFT; 28963d5066fSJérôme Glisse 2907f08263dSChristoph Hellwig for (; addr < end; addr += PAGE_SIZE, i++) { 291f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_NONE]; 2922aee09d8SJérôme Glisse if (fault || write_fault) { 29374eee180SJérôme Glisse int ret; 294da4c3c73SJérôme Glisse 2952aee09d8SJérôme Glisse ret = hmm_vma_do_fault(walk, addr, write_fault, 2962aee09d8SJérôme Glisse &pfns[i]); 29773231612SJérôme Glisse if (ret != -EBUSY) 29874eee180SJérôme Glisse return ret; 29974eee180SJérôme Glisse } 30074eee180SJérôme Glisse } 30174eee180SJérôme Glisse 30273231612SJérôme Glisse return (fault || write_fault) ? -EBUSY : 0; 3032aee09d8SJérôme Glisse } 3042aee09d8SJérôme Glisse 3052aee09d8SJérôme Glisse static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 3062aee09d8SJérôme Glisse uint64_t pfns, uint64_t cpu_flags, 3072aee09d8SJérôme Glisse bool *fault, bool *write_fault) 3082aee09d8SJérôme Glisse { 309f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 310f88a1e90SJérôme Glisse 311d45d464bSChristoph Hellwig if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) 3122aee09d8SJérôme Glisse return; 3132aee09d8SJérôme Glisse 314023a019aSJérôme Glisse /* 315023a019aSJérôme Glisse * So we not only consider the individual per page request we also 316023a019aSJérôme Glisse * consider the default flags requested for the range. The API can 317d2e8d551SRalph Campbell * be used 2 ways. The first one where the HMM user coalesces 318d2e8d551SRalph Campbell * multiple page faults into one request and sets flags per pfn for 319d2e8d551SRalph Campbell * those faults. The second one where the HMM user wants to pre- 320023a019aSJérôme Glisse * fault a range with specific flags. For the latter one it is a 321023a019aSJérôme Glisse * waste to have the user pre-fill the pfn arrays with a default 322023a019aSJérôme Glisse * flags value. 323023a019aSJérôme Glisse */ 324023a019aSJérôme Glisse pfns = (pfns & range->pfn_flags_mask) | range->default_flags; 325023a019aSJérôme Glisse 3262aee09d8SJérôme Glisse /* We aren't ask to do anything ... */ 327f88a1e90SJérôme Glisse if (!(pfns & range->flags[HMM_PFN_VALID])) 3282aee09d8SJérôme Glisse return; 329d2e8d551SRalph Campbell /* If this is device memory then only fault if explicitly requested */ 330f88a1e90SJérôme Glisse if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { 331f88a1e90SJérôme Glisse /* Do we fault on device memory ? */ 332f88a1e90SJérôme Glisse if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { 333f88a1e90SJérôme Glisse *write_fault = pfns & range->flags[HMM_PFN_WRITE]; 334f88a1e90SJérôme Glisse *fault = true; 335f88a1e90SJérôme Glisse } 3362aee09d8SJérôme Glisse return; 3372aee09d8SJérôme Glisse } 338f88a1e90SJérôme Glisse 339f88a1e90SJérôme Glisse /* If CPU page table is not valid then we need to fault */ 340f88a1e90SJérôme Glisse *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); 341f88a1e90SJérôme Glisse /* Need to write fault ? */ 342f88a1e90SJérôme Glisse if ((pfns & range->flags[HMM_PFN_WRITE]) && 343f88a1e90SJérôme Glisse !(cpu_flags & range->flags[HMM_PFN_WRITE])) { 344f88a1e90SJérôme Glisse *write_fault = true; 3452aee09d8SJérôme Glisse *fault = true; 3462aee09d8SJérôme Glisse } 3472aee09d8SJérôme Glisse } 3482aee09d8SJérôme Glisse 3492aee09d8SJérôme Glisse static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 3502aee09d8SJérôme Glisse const uint64_t *pfns, unsigned long npages, 3512aee09d8SJérôme Glisse uint64_t cpu_flags, bool *fault, 3522aee09d8SJérôme Glisse bool *write_fault) 3532aee09d8SJérôme Glisse { 3542aee09d8SJérôme Glisse unsigned long i; 3552aee09d8SJérôme Glisse 356d45d464bSChristoph Hellwig if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) { 3572aee09d8SJérôme Glisse *fault = *write_fault = false; 3582aee09d8SJérôme Glisse return; 3592aee09d8SJérôme Glisse } 3602aee09d8SJérôme Glisse 361a3e0d41cSJérôme Glisse *fault = *write_fault = false; 3622aee09d8SJérôme Glisse for (i = 0; i < npages; ++i) { 3632aee09d8SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags, 3642aee09d8SJérôme Glisse fault, write_fault); 365a3e0d41cSJérôme Glisse if ((*write_fault)) 3662aee09d8SJérôme Glisse return; 3672aee09d8SJérôme Glisse } 3682aee09d8SJérôme Glisse } 3692aee09d8SJérôme Glisse 3702aee09d8SJérôme Glisse static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, 3712aee09d8SJérôme Glisse struct mm_walk *walk) 3722aee09d8SJérôme Glisse { 3732aee09d8SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 3742aee09d8SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 3752aee09d8SJérôme Glisse bool fault, write_fault; 3762aee09d8SJérôme Glisse unsigned long i, npages; 3772aee09d8SJérôme Glisse uint64_t *pfns; 3782aee09d8SJérôme Glisse 3792aee09d8SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 3802aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 3812aee09d8SJérôme Glisse pfns = &range->pfns[i]; 3822aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 3832aee09d8SJérôme Glisse 0, &fault, &write_fault); 3842aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 3852aee09d8SJérôme Glisse } 3862aee09d8SJérôme Glisse 387f88a1e90SJérôme Glisse static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) 3882aee09d8SJérôme Glisse { 3892aee09d8SJérôme Glisse if (pmd_protnone(pmd)) 3902aee09d8SJérôme Glisse return 0; 391f88a1e90SJérôme Glisse return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | 392f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 393f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 394da4c3c73SJérôme Glisse } 395da4c3c73SJérôme Glisse 396992de9a8SJérôme Glisse #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3979d3973d6SChristoph Hellwig static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, 3989d3973d6SChristoph Hellwig unsigned long end, uint64_t *pfns, pmd_t pmd) 3999d3973d6SChristoph Hellwig { 40053f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 401f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 4022aee09d8SJérôme Glisse unsigned long pfn, npages, i; 4032aee09d8SJérôme Glisse bool fault, write_fault; 404f88a1e90SJérôme Glisse uint64_t cpu_flags; 40553f5c3f4SJérôme Glisse 4062aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 407f88a1e90SJérôme Glisse cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); 4082aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, 4092aee09d8SJérôme Glisse &fault, &write_fault); 41053f5c3f4SJérôme Glisse 4112aee09d8SJérôme Glisse if (pmd_protnone(pmd) || fault || write_fault) 4122aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 41353f5c3f4SJérôme Glisse 414309f9a4fSChristoph Hellwig pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 415992de9a8SJérôme Glisse for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { 416992de9a8SJérôme Glisse if (pmd_devmap(pmd)) { 417992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pfn, 418992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 419992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 420992de9a8SJérôme Glisse return -EBUSY; 421992de9a8SJérôme Glisse } 422391aab11SJérôme Glisse pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags; 423992de9a8SJérôme Glisse } 424992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 425992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 426992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 427992de9a8SJérôme Glisse } 42853f5c3f4SJérôme Glisse hmm_vma_walk->last = end; 42953f5c3f4SJérôme Glisse return 0; 43053f5c3f4SJérôme Glisse } 4319d3973d6SChristoph Hellwig #else /* CONFIG_TRANSPARENT_HUGEPAGE */ 4329d3973d6SChristoph Hellwig /* stub to allow the code below to compile */ 4339d3973d6SChristoph Hellwig int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, 4349d3973d6SChristoph Hellwig unsigned long end, uint64_t *pfns, pmd_t pmd); 4359d3973d6SChristoph Hellwig #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 43653f5c3f4SJérôme Glisse 437f88a1e90SJérôme Glisse static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) 4382aee09d8SJérôme Glisse { 439789c2af8SPhilip Yang if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) 4402aee09d8SJérôme Glisse return 0; 441f88a1e90SJérôme Glisse return pte_write(pte) ? range->flags[HMM_PFN_VALID] | 442f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 443f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 4442aee09d8SJérôme Glisse } 4452aee09d8SJérôme Glisse 44653f5c3f4SJérôme Glisse static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, 44753f5c3f4SJérôme Glisse unsigned long end, pmd_t *pmdp, pte_t *ptep, 44853f5c3f4SJérôme Glisse uint64_t *pfn) 44953f5c3f4SJérôme Glisse { 45053f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 451f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 4522aee09d8SJérôme Glisse bool fault, write_fault; 4532aee09d8SJérôme Glisse uint64_t cpu_flags; 45453f5c3f4SJérôme Glisse pte_t pte = *ptep; 455f88a1e90SJérôme Glisse uint64_t orig_pfn = *pfn; 45653f5c3f4SJérôme Glisse 457f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_NONE]; 45873231612SJérôme Glisse fault = write_fault = false; 45953f5c3f4SJérôme Glisse 46053f5c3f4SJérôme Glisse if (pte_none(pte)) { 46173231612SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, 46273231612SJérôme Glisse &fault, &write_fault); 4632aee09d8SJérôme Glisse if (fault || write_fault) 46453f5c3f4SJérôme Glisse goto fault; 46553f5c3f4SJérôme Glisse return 0; 46653f5c3f4SJérôme Glisse } 46753f5c3f4SJérôme Glisse 46853f5c3f4SJérôme Glisse if (!pte_present(pte)) { 46953f5c3f4SJérôme Glisse swp_entry_t entry = pte_to_swp_entry(pte); 47053f5c3f4SJérôme Glisse 47153f5c3f4SJérôme Glisse if (!non_swap_entry(entry)) { 472*e3fe8e55SYang, Philip cpu_flags = pte_to_hmm_pfn_flags(range, pte); 473*e3fe8e55SYang, Philip hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 474*e3fe8e55SYang, Philip &fault, &write_fault); 4752aee09d8SJérôme Glisse if (fault || write_fault) 47653f5c3f4SJérôme Glisse goto fault; 47753f5c3f4SJérôme Glisse return 0; 47853f5c3f4SJérôme Glisse } 47953f5c3f4SJérôme Glisse 48053f5c3f4SJérôme Glisse /* 48153f5c3f4SJérôme Glisse * This is a special swap entry, ignore migration, use 48253f5c3f4SJérôme Glisse * device and report anything else as error. 48353f5c3f4SJérôme Glisse */ 48453f5c3f4SJérôme Glisse if (is_device_private_entry(entry)) { 485f88a1e90SJérôme Glisse cpu_flags = range->flags[HMM_PFN_VALID] | 486f88a1e90SJérôme Glisse range->flags[HMM_PFN_DEVICE_PRIVATE]; 4872aee09d8SJérôme Glisse cpu_flags |= is_write_device_private_entry(entry) ? 488f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 0; 489f88a1e90SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 490f88a1e90SJérôme Glisse &fault, &write_fault); 491f88a1e90SJérôme Glisse if (fault || write_fault) 492f88a1e90SJérôme Glisse goto fault; 493391aab11SJérôme Glisse *pfn = hmm_device_entry_from_pfn(range, 494391aab11SJérôme Glisse swp_offset(entry)); 495f88a1e90SJérôme Glisse *pfn |= cpu_flags; 49653f5c3f4SJérôme Glisse return 0; 49753f5c3f4SJérôme Glisse } 49853f5c3f4SJérôme Glisse 49953f5c3f4SJérôme Glisse if (is_migration_entry(entry)) { 5002aee09d8SJérôme Glisse if (fault || write_fault) { 50153f5c3f4SJérôme Glisse pte_unmap(ptep); 50253f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 503d2e8d551SRalph Campbell migration_entry_wait(walk->mm, pmdp, addr); 50473231612SJérôme Glisse return -EBUSY; 50553f5c3f4SJérôme Glisse } 50653f5c3f4SJérôme Glisse return 0; 50753f5c3f4SJérôme Glisse } 50853f5c3f4SJérôme Glisse 50953f5c3f4SJérôme Glisse /* Report error for everything else */ 510f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_ERROR]; 51153f5c3f4SJérôme Glisse return -EFAULT; 51273231612SJérôme Glisse } else { 51373231612SJérôme Glisse cpu_flags = pte_to_hmm_pfn_flags(range, pte); 51473231612SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 51573231612SJérôme Glisse &fault, &write_fault); 51653f5c3f4SJérôme Glisse } 51753f5c3f4SJérôme Glisse 5182aee09d8SJérôme Glisse if (fault || write_fault) 51953f5c3f4SJérôme Glisse goto fault; 52053f5c3f4SJérôme Glisse 521992de9a8SJérôme Glisse if (pte_devmap(pte)) { 522992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte), 523992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 524992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 525992de9a8SJérôme Glisse return -EBUSY; 526992de9a8SJérôme Glisse } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) { 527992de9a8SJérôme Glisse *pfn = range->values[HMM_PFN_SPECIAL]; 528992de9a8SJérôme Glisse return -EFAULT; 529992de9a8SJérôme Glisse } 530992de9a8SJérôme Glisse 531391aab11SJérôme Glisse *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags; 53253f5c3f4SJérôme Glisse return 0; 53353f5c3f4SJérôme Glisse 53453f5c3f4SJérôme Glisse fault: 535992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 536992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 537992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 538992de9a8SJérôme Glisse } 53953f5c3f4SJérôme Glisse pte_unmap(ptep); 54053f5c3f4SJérôme Glisse /* Fault any virtual address we were asked to fault */ 5412aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 54253f5c3f4SJérôme Glisse } 54353f5c3f4SJérôme Glisse 544da4c3c73SJérôme Glisse static int hmm_vma_walk_pmd(pmd_t *pmdp, 545da4c3c73SJérôme Glisse unsigned long start, 546da4c3c73SJérôme Glisse unsigned long end, 547da4c3c73SJérôme Glisse struct mm_walk *walk) 548da4c3c73SJérôme Glisse { 54974eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 55074eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 551ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 552da4c3c73SJérôme Glisse unsigned long addr = start, i; 553da4c3c73SJérôme Glisse pte_t *ptep; 554da4c3c73SJérôme Glisse pmd_t pmd; 555da4c3c73SJérôme Glisse 556d08faca0SJérôme Glisse again: 557d08faca0SJérôme Glisse pmd = READ_ONCE(*pmdp); 558d08faca0SJérôme Glisse if (pmd_none(pmd)) 559d08faca0SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 560d08faca0SJérôme Glisse 561d08faca0SJérôme Glisse if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { 562d08faca0SJérôme Glisse bool fault, write_fault; 563d08faca0SJérôme Glisse unsigned long npages; 564d08faca0SJérôme Glisse uint64_t *pfns; 565d08faca0SJérôme Glisse 566d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 567d08faca0SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 568d08faca0SJérôme Glisse pfns = &range->pfns[i]; 569d08faca0SJérôme Glisse 570d08faca0SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 571d08faca0SJérôme Glisse 0, &fault, &write_fault); 572d08faca0SJérôme Glisse if (fault || write_fault) { 573d08faca0SJérôme Glisse hmm_vma_walk->last = addr; 574d2e8d551SRalph Campbell pmd_migration_entry_wait(walk->mm, pmdp); 57573231612SJérôme Glisse return -EBUSY; 576d08faca0SJérôme Glisse } 577d08faca0SJérôme Glisse return 0; 578d08faca0SJérôme Glisse } else if (!pmd_present(pmd)) 579d08faca0SJérôme Glisse return hmm_pfns_bad(start, end, walk); 580d08faca0SJérôme Glisse 581d08faca0SJérôme Glisse if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { 582da4c3c73SJérôme Glisse /* 583d2e8d551SRalph Campbell * No need to take pmd_lock here, even if some other thread 584da4c3c73SJérôme Glisse * is splitting the huge pmd we will get that event through 585da4c3c73SJérôme Glisse * mmu_notifier callback. 586da4c3c73SJérôme Glisse * 587d2e8d551SRalph Campbell * So just read pmd value and check again it's a transparent 588da4c3c73SJérôme Glisse * huge or device mapping one and compute corresponding pfn 589da4c3c73SJérôme Glisse * values. 590da4c3c73SJérôme Glisse */ 591da4c3c73SJérôme Glisse pmd = pmd_read_atomic(pmdp); 592da4c3c73SJérôme Glisse barrier(); 593da4c3c73SJérôme Glisse if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) 594da4c3c73SJérôme Glisse goto again; 595da4c3c73SJérôme Glisse 596d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 59753f5c3f4SJérôme Glisse return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd); 598da4c3c73SJérôme Glisse } 599da4c3c73SJérôme Glisse 600d08faca0SJérôme Glisse /* 601d2e8d551SRalph Campbell * We have handled all the valid cases above ie either none, migration, 602d08faca0SJérôme Glisse * huge or transparent huge. At this point either it is a valid pmd 603d08faca0SJérôme Glisse * entry pointing to pte directory or it is a bad pmd that will not 604d08faca0SJérôme Glisse * recover. 605d08faca0SJérôme Glisse */ 606d08faca0SJérôme Glisse if (pmd_bad(pmd)) 607da4c3c73SJérôme Glisse return hmm_pfns_bad(start, end, walk); 608da4c3c73SJérôme Glisse 609da4c3c73SJérôme Glisse ptep = pte_offset_map(pmdp, addr); 610d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 611da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { 61253f5c3f4SJérôme Glisse int r; 613da4c3c73SJérôme Glisse 61453f5c3f4SJérôme Glisse r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]); 61553f5c3f4SJérôme Glisse if (r) { 61653f5c3f4SJérôme Glisse /* hmm_vma_handle_pte() did unmap pte directory */ 61774eee180SJérôme Glisse hmm_vma_walk->last = addr; 61853f5c3f4SJérôme Glisse return r; 61974eee180SJérôme Glisse } 620da4c3c73SJérôme Glisse } 621992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 622992de9a8SJérôme Glisse /* 623992de9a8SJérôme Glisse * We do put_dev_pagemap() here and not in hmm_vma_handle_pte() 624992de9a8SJérôme Glisse * so that we can leverage get_dev_pagemap() optimization which 625992de9a8SJérôme Glisse * will not re-take a reference on a pgmap if we already have 626992de9a8SJérôme Glisse * one. 627992de9a8SJérôme Glisse */ 628992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 629992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 630992de9a8SJérôme Glisse } 631da4c3c73SJérôme Glisse pte_unmap(ptep - 1); 632da4c3c73SJérôme Glisse 63353f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 634da4c3c73SJérôme Glisse return 0; 635da4c3c73SJérôme Glisse } 636da4c3c73SJérôme Glisse 637f0b3c45cSChristoph Hellwig #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \ 638f0b3c45cSChristoph Hellwig defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 639f0b3c45cSChristoph Hellwig static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud) 640f0b3c45cSChristoph Hellwig { 641f0b3c45cSChristoph Hellwig if (!pud_present(pud)) 642f0b3c45cSChristoph Hellwig return 0; 643f0b3c45cSChristoph Hellwig return pud_write(pud) ? range->flags[HMM_PFN_VALID] | 644f0b3c45cSChristoph Hellwig range->flags[HMM_PFN_WRITE] : 645f0b3c45cSChristoph Hellwig range->flags[HMM_PFN_VALID]; 646f0b3c45cSChristoph Hellwig } 647f0b3c45cSChristoph Hellwig 648f0b3c45cSChristoph Hellwig static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, 649992de9a8SJérôme Glisse struct mm_walk *walk) 650992de9a8SJérôme Glisse { 651992de9a8SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 652992de9a8SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 653992de9a8SJérôme Glisse unsigned long addr = start, next; 654992de9a8SJérôme Glisse pmd_t *pmdp; 655992de9a8SJérôme Glisse pud_t pud; 656992de9a8SJérôme Glisse int ret; 657992de9a8SJérôme Glisse 658992de9a8SJérôme Glisse again: 659992de9a8SJérôme Glisse pud = READ_ONCE(*pudp); 660992de9a8SJérôme Glisse if (pud_none(pud)) 661992de9a8SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 662992de9a8SJérôme Glisse 663992de9a8SJérôme Glisse if (pud_huge(pud) && pud_devmap(pud)) { 664992de9a8SJérôme Glisse unsigned long i, npages, pfn; 665992de9a8SJérôme Glisse uint64_t *pfns, cpu_flags; 666992de9a8SJérôme Glisse bool fault, write_fault; 667992de9a8SJérôme Glisse 668992de9a8SJérôme Glisse if (!pud_present(pud)) 669992de9a8SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 670992de9a8SJérôme Glisse 671992de9a8SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 672992de9a8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 673992de9a8SJérôme Glisse pfns = &range->pfns[i]; 674992de9a8SJérôme Glisse 675992de9a8SJérôme Glisse cpu_flags = pud_to_hmm_pfn_flags(range, pud); 676992de9a8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 677992de9a8SJérôme Glisse cpu_flags, &fault, &write_fault); 678992de9a8SJérôme Glisse if (fault || write_fault) 679992de9a8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, 680992de9a8SJérôme Glisse write_fault, walk); 681992de9a8SJérôme Glisse 682992de9a8SJérôme Glisse pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 683992de9a8SJérôme Glisse for (i = 0; i < npages; ++i, ++pfn) { 684992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pfn, 685992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 686992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 687992de9a8SJérôme Glisse return -EBUSY; 688391aab11SJérôme Glisse pfns[i] = hmm_device_entry_from_pfn(range, pfn) | 689391aab11SJérôme Glisse cpu_flags; 690992de9a8SJérôme Glisse } 691992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 692992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 693992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 694992de9a8SJérôme Glisse } 695992de9a8SJérôme Glisse hmm_vma_walk->last = end; 696992de9a8SJérôme Glisse return 0; 697992de9a8SJérôme Glisse } 698992de9a8SJérôme Glisse 699992de9a8SJérôme Glisse split_huge_pud(walk->vma, pudp, addr); 700992de9a8SJérôme Glisse if (pud_none(*pudp)) 701992de9a8SJérôme Glisse goto again; 702992de9a8SJérôme Glisse 703992de9a8SJérôme Glisse pmdp = pmd_offset(pudp, addr); 704992de9a8SJérôme Glisse do { 705992de9a8SJérôme Glisse next = pmd_addr_end(addr, end); 706992de9a8SJérôme Glisse ret = hmm_vma_walk_pmd(pmdp, addr, next, walk); 707992de9a8SJérôme Glisse if (ret) 708992de9a8SJérôme Glisse return ret; 709992de9a8SJérôme Glisse } while (pmdp++, addr = next, addr != end); 710992de9a8SJérôme Glisse 711992de9a8SJérôme Glisse return 0; 712992de9a8SJérôme Glisse } 713f0b3c45cSChristoph Hellwig #else 714f0b3c45cSChristoph Hellwig #define hmm_vma_walk_pud NULL 715f0b3c45cSChristoph Hellwig #endif 716992de9a8SJérôme Glisse 717251bbe59SChristoph Hellwig #ifdef CONFIG_HUGETLB_PAGE 71863d5066fSJérôme Glisse static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, 71963d5066fSJérôme Glisse unsigned long start, unsigned long end, 72063d5066fSJérôme Glisse struct mm_walk *walk) 72163d5066fSJérôme Glisse { 72205c23af4SChristoph Hellwig unsigned long addr = start, i, pfn; 72363d5066fSJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 72463d5066fSJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 72563d5066fSJérôme Glisse struct vm_area_struct *vma = walk->vma; 72663d5066fSJérôme Glisse uint64_t orig_pfn, cpu_flags; 72763d5066fSJérôme Glisse bool fault, write_fault; 72863d5066fSJérôme Glisse spinlock_t *ptl; 72963d5066fSJérôme Glisse pte_t entry; 73063d5066fSJérôme Glisse int ret = 0; 73163d5066fSJérôme Glisse 732d2e8d551SRalph Campbell ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); 73363d5066fSJérôme Glisse entry = huge_ptep_get(pte); 73463d5066fSJérôme Glisse 7357f08263dSChristoph Hellwig i = (start - range->start) >> PAGE_SHIFT; 73663d5066fSJérôme Glisse orig_pfn = range->pfns[i]; 73763d5066fSJérôme Glisse range->pfns[i] = range->values[HMM_PFN_NONE]; 73863d5066fSJérôme Glisse cpu_flags = pte_to_hmm_pfn_flags(range, entry); 73963d5066fSJérôme Glisse fault = write_fault = false; 74063d5066fSJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 74163d5066fSJérôme Glisse &fault, &write_fault); 74263d5066fSJérôme Glisse if (fault || write_fault) { 74363d5066fSJérôme Glisse ret = -ENOENT; 74463d5066fSJérôme Glisse goto unlock; 74563d5066fSJérôme Glisse } 74663d5066fSJérôme Glisse 74705c23af4SChristoph Hellwig pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); 7487f08263dSChristoph Hellwig for (; addr < end; addr += PAGE_SIZE, i++, pfn++) 749391aab11SJérôme Glisse range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) | 750391aab11SJérôme Glisse cpu_flags; 75163d5066fSJérôme Glisse hmm_vma_walk->last = end; 75263d5066fSJérôme Glisse 75363d5066fSJérôme Glisse unlock: 75463d5066fSJérôme Glisse spin_unlock(ptl); 75563d5066fSJérôme Glisse 75663d5066fSJérôme Glisse if (ret == -ENOENT) 75763d5066fSJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 75863d5066fSJérôme Glisse 75963d5066fSJérôme Glisse return ret; 76063d5066fSJérôme Glisse } 761251bbe59SChristoph Hellwig #else 762251bbe59SChristoph Hellwig #define hmm_vma_walk_hugetlb_entry NULL 763251bbe59SChristoph Hellwig #endif /* CONFIG_HUGETLB_PAGE */ 76463d5066fSJérôme Glisse 765f88a1e90SJérôme Glisse static void hmm_pfns_clear(struct hmm_range *range, 766f88a1e90SJérôme Glisse uint64_t *pfns, 76733cd47dcSJérôme Glisse unsigned long addr, 76833cd47dcSJérôme Glisse unsigned long end) 76933cd47dcSJérôme Glisse { 77033cd47dcSJérôme Glisse for (; addr < end; addr += PAGE_SIZE, pfns++) 771f88a1e90SJérôme Glisse *pfns = range->values[HMM_PFN_NONE]; 77233cd47dcSJérôme Glisse } 77333cd47dcSJérôme Glisse 774da4c3c73SJérôme Glisse /* 775a3e0d41cSJérôme Glisse * hmm_range_register() - start tracking change to CPU page table over a range 776a3e0d41cSJérôme Glisse * @range: range 777a3e0d41cSJérôme Glisse * @mm: the mm struct for the range of virtual address 778fac555acSChristoph Hellwig * 779d2e8d551SRalph Campbell * Return: 0 on success, -EFAULT if the address space is no longer valid 780a3e0d41cSJérôme Glisse * 781a3e0d41cSJérôme Glisse * Track updates to the CPU page table see include/linux/hmm.h 782a3e0d41cSJérôme Glisse */ 783fac555acSChristoph Hellwig int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror) 784a3e0d41cSJérôme Glisse { 785e36acfe6SJason Gunthorpe struct hmm *hmm = mirror->hmm; 7865a136b4aSJason Gunthorpe unsigned long flags; 78763d5066fSJérôme Glisse 788a3e0d41cSJérôme Glisse range->valid = false; 789a3e0d41cSJérôme Glisse range->hmm = NULL; 790a3e0d41cSJérôme Glisse 7917f08263dSChristoph Hellwig if ((range->start & (PAGE_SIZE - 1)) || (range->end & (PAGE_SIZE - 1))) 79263d5066fSJérôme Glisse return -EINVAL; 793fac555acSChristoph Hellwig if (range->start >= range->end) 794a3e0d41cSJérôme Glisse return -EINVAL; 795a3e0d41cSJérôme Glisse 79647f24598SJason Gunthorpe /* Prevent hmm_release() from running while the range is valid */ 797c7d8b782SJason Gunthorpe if (!mmget_not_zero(hmm->mmu_notifier.mm)) 798a3e0d41cSJérôme Glisse return -EFAULT; 799a3e0d41cSJérôme Glisse 800085ea250SRalph Campbell /* Initialize range to track CPU page table updates. */ 8015a136b4aSJason Gunthorpe spin_lock_irqsave(&hmm->ranges_lock, flags); 802a3e0d41cSJérôme Glisse 803085ea250SRalph Campbell range->hmm = hmm; 804157816f3SJason Gunthorpe list_add(&range->list, &hmm->ranges); 805a3e0d41cSJérôme Glisse 806a3e0d41cSJérôme Glisse /* 807a3e0d41cSJérôme Glisse * If there are any concurrent notifiers we have to wait for them for 808a3e0d41cSJérôme Glisse * the range to be valid (see hmm_range_wait_until_valid()). 809a3e0d41cSJérôme Glisse */ 810085ea250SRalph Campbell if (!hmm->notifiers) 811a3e0d41cSJérôme Glisse range->valid = true; 8125a136b4aSJason Gunthorpe spin_unlock_irqrestore(&hmm->ranges_lock, flags); 813a3e0d41cSJérôme Glisse 814a3e0d41cSJérôme Glisse return 0; 815a3e0d41cSJérôme Glisse } 816a3e0d41cSJérôme Glisse EXPORT_SYMBOL(hmm_range_register); 817a3e0d41cSJérôme Glisse 818a3e0d41cSJérôme Glisse /* 819a3e0d41cSJérôme Glisse * hmm_range_unregister() - stop tracking change to CPU page table over a range 820a3e0d41cSJérôme Glisse * @range: range 821a3e0d41cSJérôme Glisse * 822a3e0d41cSJérôme Glisse * Range struct is used to track updates to the CPU page table after a call to 823a3e0d41cSJérôme Glisse * hmm_range_register(). See include/linux/hmm.h for how to use it. 824a3e0d41cSJérôme Glisse */ 825a3e0d41cSJérôme Glisse void hmm_range_unregister(struct hmm_range *range) 826a3e0d41cSJérôme Glisse { 827085ea250SRalph Campbell struct hmm *hmm = range->hmm; 8285a136b4aSJason Gunthorpe unsigned long flags; 829a3e0d41cSJérôme Glisse 8305a136b4aSJason Gunthorpe spin_lock_irqsave(&hmm->ranges_lock, flags); 83147f24598SJason Gunthorpe list_del_init(&range->list); 8325a136b4aSJason Gunthorpe spin_unlock_irqrestore(&hmm->ranges_lock, flags); 833a3e0d41cSJérôme Glisse 834a3e0d41cSJérôme Glisse /* Drop reference taken by hmm_range_register() */ 835c7d8b782SJason Gunthorpe mmput(hmm->mmu_notifier.mm); 8362dcc3eb8SJason Gunthorpe 8372dcc3eb8SJason Gunthorpe /* 8382dcc3eb8SJason Gunthorpe * The range is now invalid and the ref on the hmm is dropped, so 8392dcc3eb8SJason Gunthorpe * poison the pointer. Leave other fields in place, for the caller's 8402dcc3eb8SJason Gunthorpe * use. 8412dcc3eb8SJason Gunthorpe */ 842a3e0d41cSJérôme Glisse range->valid = false; 8432dcc3eb8SJason Gunthorpe memset(&range->hmm, POISON_INUSE, sizeof(range->hmm)); 844a3e0d41cSJérôme Glisse } 845a3e0d41cSJérôme Glisse EXPORT_SYMBOL(hmm_range_unregister); 846a3e0d41cSJérôme Glisse 8479a4903e4SChristoph Hellwig /** 8489a4903e4SChristoph Hellwig * hmm_range_fault - try to fault some address in a virtual address range 84908232a45SJérôme Glisse * @range: range being faulted 8509a4903e4SChristoph Hellwig * @flags: HMM_FAULT_* flags 85173231612SJérôme Glisse * 8529a4903e4SChristoph Hellwig * Return: the number of valid pages in range->pfns[] (from range start 8539a4903e4SChristoph Hellwig * address), which may be zero. On error one of the following status codes 8549a4903e4SChristoph Hellwig * can be returned: 8559a4903e4SChristoph Hellwig * 8569a4903e4SChristoph Hellwig * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma 8579a4903e4SChristoph Hellwig * (e.g., device file vma). 85873231612SJérôme Glisse * -ENOMEM: Out of memory. 8599a4903e4SChristoph Hellwig * -EPERM: Invalid permission (e.g., asking for write and range is read 8609a4903e4SChristoph Hellwig * only). 8619a4903e4SChristoph Hellwig * -EAGAIN: A page fault needs to be retried and mmap_sem was dropped. 8629a4903e4SChristoph Hellwig * -EBUSY: The range has been invalidated and the caller needs to wait for 8639a4903e4SChristoph Hellwig * the invalidation to finish. 8649a4903e4SChristoph Hellwig * -EFAULT: Invalid (i.e., either no valid vma or it is illegal to access 8659a4903e4SChristoph Hellwig * that range) number of valid pages in range->pfns[] (from 86673231612SJérôme Glisse * range start address). 86774eee180SJérôme Glisse * 86874eee180SJérôme Glisse * This is similar to a regular CPU page fault except that it will not trigger 86973231612SJérôme Glisse * any memory migration if the memory being faulted is not accessible by CPUs 87073231612SJérôme Glisse * and caller does not ask for migration. 87174eee180SJérôme Glisse * 872ff05c0c6SJérôme Glisse * On error, for one virtual address in the range, the function will mark the 873ff05c0c6SJérôme Glisse * corresponding HMM pfn entry with an error flag. 87474eee180SJérôme Glisse */ 8759a4903e4SChristoph Hellwig long hmm_range_fault(struct hmm_range *range, unsigned int flags) 87674eee180SJérôme Glisse { 87763d5066fSJérôme Glisse const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; 878a3e0d41cSJérôme Glisse unsigned long start = range->start, end; 87974eee180SJérôme Glisse struct hmm_vma_walk hmm_vma_walk; 880a3e0d41cSJérôme Glisse struct hmm *hmm = range->hmm; 881a3e0d41cSJérôme Glisse struct vm_area_struct *vma; 88274eee180SJérôme Glisse struct mm_walk mm_walk; 88374eee180SJérôme Glisse int ret; 88474eee180SJérôme Glisse 885c7d8b782SJason Gunthorpe lockdep_assert_held(&hmm->mmu_notifier.mm->mmap_sem); 886a3e0d41cSJérôme Glisse 887a3e0d41cSJérôme Glisse do { 888a3e0d41cSJérôme Glisse /* If range is no longer valid force retry. */ 8892bcbeaefSChristoph Hellwig if (!range->valid) 8902bcbeaefSChristoph Hellwig return -EBUSY; 89174eee180SJérôme Glisse 892c7d8b782SJason Gunthorpe vma = find_vma(hmm->mmu_notifier.mm, start); 89363d5066fSJérôme Glisse if (vma == NULL || (vma->vm_flags & device_vma)) 894a3e0d41cSJérôme Glisse return -EFAULT; 895a3e0d41cSJérôme Glisse 89686586a41SJérôme Glisse if (!(vma->vm_flags & VM_READ)) { 89786586a41SJérôme Glisse /* 898a3e0d41cSJérôme Glisse * If vma do not allow read access, then assume that it 899a3e0d41cSJérôme Glisse * does not allow write access, either. HMM does not 900a3e0d41cSJérôme Glisse * support architecture that allow write without read. 90186586a41SJérôme Glisse */ 902a3e0d41cSJérôme Glisse hmm_pfns_clear(range, range->pfns, 903a3e0d41cSJérôme Glisse range->start, range->end); 90486586a41SJérôme Glisse return -EPERM; 90586586a41SJérôme Glisse } 90674eee180SJérôme Glisse 907992de9a8SJérôme Glisse hmm_vma_walk.pgmap = NULL; 908a3e0d41cSJérôme Glisse hmm_vma_walk.last = start; 9099a4903e4SChristoph Hellwig hmm_vma_walk.flags = flags; 91074eee180SJérôme Glisse hmm_vma_walk.range = range; 91174eee180SJérôme Glisse mm_walk.private = &hmm_vma_walk; 912a3e0d41cSJérôme Glisse end = min(range->end, vma->vm_end); 91374eee180SJérôme Glisse 91474eee180SJérôme Glisse mm_walk.vma = vma; 91574eee180SJérôme Glisse mm_walk.mm = vma->vm_mm; 91674eee180SJérôme Glisse mm_walk.pte_entry = NULL; 91774eee180SJérôme Glisse mm_walk.test_walk = NULL; 91874eee180SJérôme Glisse mm_walk.hugetlb_entry = NULL; 919992de9a8SJérôme Glisse mm_walk.pud_entry = hmm_vma_walk_pud; 92074eee180SJérôme Glisse mm_walk.pmd_entry = hmm_vma_walk_pmd; 92174eee180SJérôme Glisse mm_walk.pte_hole = hmm_vma_walk_hole; 92263d5066fSJérôme Glisse mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry; 92374eee180SJérôme Glisse 92474eee180SJérôme Glisse do { 925a3e0d41cSJérôme Glisse ret = walk_page_range(start, end, &mm_walk); 92674eee180SJérôme Glisse start = hmm_vma_walk.last; 927a3e0d41cSJérôme Glisse 92873231612SJérôme Glisse /* Keep trying while the range is valid. */ 92973231612SJérôme Glisse } while (ret == -EBUSY && range->valid); 93074eee180SJérôme Glisse 93174eee180SJérôme Glisse if (ret) { 93274eee180SJérôme Glisse unsigned long i; 93374eee180SJérôme Glisse 93474eee180SJérôme Glisse i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 935a3e0d41cSJérôme Glisse hmm_pfns_clear(range, &range->pfns[i], 936a3e0d41cSJérôme Glisse hmm_vma_walk.last, range->end); 93773231612SJérôme Glisse return ret; 93874eee180SJérôme Glisse } 939a3e0d41cSJérôme Glisse start = end; 940a3e0d41cSJérôme Glisse 941a3e0d41cSJérôme Glisse } while (start < range->end); 942704f3f2cSJérôme Glisse 94373231612SJérôme Glisse return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 94474eee180SJérôme Glisse } 94573231612SJérôme Glisse EXPORT_SYMBOL(hmm_range_fault); 94655c0ece8SJérôme Glisse 94755c0ece8SJérôme Glisse /** 9489a4903e4SChristoph Hellwig * hmm_range_dma_map - hmm_range_fault() and dma map page all in one. 94955c0ece8SJérôme Glisse * @range: range being faulted 9509a4903e4SChristoph Hellwig * @device: device to map page to 9519a4903e4SChristoph Hellwig * @daddrs: array of dma addresses for the mapped pages 9529a4903e4SChristoph Hellwig * @flags: HMM_FAULT_* 95355c0ece8SJérôme Glisse * 9549a4903e4SChristoph Hellwig * Return: the number of pages mapped on success (including zero), or any 9559a4903e4SChristoph Hellwig * status return from hmm_range_fault() otherwise. 95655c0ece8SJérôme Glisse */ 9579a4903e4SChristoph Hellwig long hmm_range_dma_map(struct hmm_range *range, struct device *device, 9589a4903e4SChristoph Hellwig dma_addr_t *daddrs, unsigned int flags) 95955c0ece8SJérôme Glisse { 96055c0ece8SJérôme Glisse unsigned long i, npages, mapped; 96155c0ece8SJérôme Glisse long ret; 96255c0ece8SJérôme Glisse 9639a4903e4SChristoph Hellwig ret = hmm_range_fault(range, flags); 96455c0ece8SJérôme Glisse if (ret <= 0) 96555c0ece8SJérôme Glisse return ret ? ret : -EBUSY; 96655c0ece8SJérôme Glisse 96755c0ece8SJérôme Glisse npages = (range->end - range->start) >> PAGE_SHIFT; 96855c0ece8SJérôme Glisse for (i = 0, mapped = 0; i < npages; ++i) { 96955c0ece8SJérôme Glisse enum dma_data_direction dir = DMA_TO_DEVICE; 97055c0ece8SJérôme Glisse struct page *page; 97155c0ece8SJérôme Glisse 97255c0ece8SJérôme Glisse /* 97355c0ece8SJérôme Glisse * FIXME need to update DMA API to provide invalid DMA address 97455c0ece8SJérôme Glisse * value instead of a function to test dma address value. This 97555c0ece8SJérôme Glisse * would remove lot of dumb code duplicated accross many arch. 97655c0ece8SJérôme Glisse * 97755c0ece8SJérôme Glisse * For now setting it to 0 here is good enough as the pfns[] 97855c0ece8SJérôme Glisse * value is what is use to check what is valid and what isn't. 97955c0ece8SJérôme Glisse */ 98055c0ece8SJérôme Glisse daddrs[i] = 0; 98155c0ece8SJérôme Glisse 982391aab11SJérôme Glisse page = hmm_device_entry_to_page(range, range->pfns[i]); 98355c0ece8SJérôme Glisse if (page == NULL) 98455c0ece8SJérôme Glisse continue; 98555c0ece8SJérôme Glisse 98655c0ece8SJérôme Glisse /* Check if range is being invalidated */ 98755c0ece8SJérôme Glisse if (!range->valid) { 98855c0ece8SJérôme Glisse ret = -EBUSY; 98955c0ece8SJérôme Glisse goto unmap; 99055c0ece8SJérôme Glisse } 99155c0ece8SJérôme Glisse 99255c0ece8SJérôme Glisse /* If it is read and write than map bi-directional. */ 99355c0ece8SJérôme Glisse if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) 99455c0ece8SJérôme Glisse dir = DMA_BIDIRECTIONAL; 99555c0ece8SJérôme Glisse 99655c0ece8SJérôme Glisse daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir); 99755c0ece8SJérôme Glisse if (dma_mapping_error(device, daddrs[i])) { 99855c0ece8SJérôme Glisse ret = -EFAULT; 99955c0ece8SJérôme Glisse goto unmap; 100055c0ece8SJérôme Glisse } 100155c0ece8SJérôme Glisse 100255c0ece8SJérôme Glisse mapped++; 100355c0ece8SJérôme Glisse } 100455c0ece8SJérôme Glisse 100555c0ece8SJérôme Glisse return mapped; 100655c0ece8SJérôme Glisse 100755c0ece8SJérôme Glisse unmap: 100855c0ece8SJérôme Glisse for (npages = i, i = 0; (i < npages) && mapped; ++i) { 100955c0ece8SJérôme Glisse enum dma_data_direction dir = DMA_TO_DEVICE; 101055c0ece8SJérôme Glisse struct page *page; 101155c0ece8SJérôme Glisse 1012391aab11SJérôme Glisse page = hmm_device_entry_to_page(range, range->pfns[i]); 101355c0ece8SJérôme Glisse if (page == NULL) 101455c0ece8SJérôme Glisse continue; 101555c0ece8SJérôme Glisse 101655c0ece8SJérôme Glisse if (dma_mapping_error(device, daddrs[i])) 101755c0ece8SJérôme Glisse continue; 101855c0ece8SJérôme Glisse 101955c0ece8SJérôme Glisse /* If it is read and write than map bi-directional. */ 102055c0ece8SJérôme Glisse if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) 102155c0ece8SJérôme Glisse dir = DMA_BIDIRECTIONAL; 102255c0ece8SJérôme Glisse 102355c0ece8SJérôme Glisse dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir); 102455c0ece8SJérôme Glisse mapped--; 102555c0ece8SJérôme Glisse } 102655c0ece8SJérôme Glisse 102755c0ece8SJérôme Glisse return ret; 102855c0ece8SJérôme Glisse } 102955c0ece8SJérôme Glisse EXPORT_SYMBOL(hmm_range_dma_map); 103055c0ece8SJérôme Glisse 103155c0ece8SJérôme Glisse /** 103255c0ece8SJérôme Glisse * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map() 103355c0ece8SJérôme Glisse * @range: range being unmapped 103455c0ece8SJérôme Glisse * @device: device against which dma map was done 103555c0ece8SJérôme Glisse * @daddrs: dma address of mapped pages 103655c0ece8SJérôme Glisse * @dirty: dirty page if it had the write flag set 1037085ea250SRalph Campbell * Return: number of page unmapped on success, -EINVAL otherwise 103855c0ece8SJérôme Glisse * 103955c0ece8SJérôme Glisse * Note that caller MUST abide by mmu notifier or use HMM mirror and abide 104055c0ece8SJérôme Glisse * to the sync_cpu_device_pagetables() callback so that it is safe here to 104155c0ece8SJérôme Glisse * call set_page_dirty(). Caller must also take appropriate locks to avoid 104255c0ece8SJérôme Glisse * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress. 104355c0ece8SJérôme Glisse */ 104455c0ece8SJérôme Glisse long hmm_range_dma_unmap(struct hmm_range *range, 104555c0ece8SJérôme Glisse struct device *device, 104655c0ece8SJérôme Glisse dma_addr_t *daddrs, 104755c0ece8SJérôme Glisse bool dirty) 104855c0ece8SJérôme Glisse { 104955c0ece8SJérôme Glisse unsigned long i, npages; 105055c0ece8SJérôme Glisse long cpages = 0; 105155c0ece8SJérôme Glisse 105255c0ece8SJérôme Glisse /* Sanity check. */ 105355c0ece8SJérôme Glisse if (range->end <= range->start) 105455c0ece8SJérôme Glisse return -EINVAL; 105555c0ece8SJérôme Glisse if (!daddrs) 105655c0ece8SJérôme Glisse return -EINVAL; 105755c0ece8SJérôme Glisse if (!range->pfns) 105855c0ece8SJérôme Glisse return -EINVAL; 105955c0ece8SJérôme Glisse 106055c0ece8SJérôme Glisse npages = (range->end - range->start) >> PAGE_SHIFT; 106155c0ece8SJérôme Glisse for (i = 0; i < npages; ++i) { 106255c0ece8SJérôme Glisse enum dma_data_direction dir = DMA_TO_DEVICE; 106355c0ece8SJérôme Glisse struct page *page; 106455c0ece8SJérôme Glisse 1065391aab11SJérôme Glisse page = hmm_device_entry_to_page(range, range->pfns[i]); 106655c0ece8SJérôme Glisse if (page == NULL) 106755c0ece8SJérôme Glisse continue; 106855c0ece8SJérôme Glisse 106955c0ece8SJérôme Glisse /* If it is read and write than map bi-directional. */ 107055c0ece8SJérôme Glisse if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) { 107155c0ece8SJérôme Glisse dir = DMA_BIDIRECTIONAL; 107255c0ece8SJérôme Glisse 107355c0ece8SJérôme Glisse /* 107455c0ece8SJérôme Glisse * See comments in function description on why it is 107555c0ece8SJérôme Glisse * safe here to call set_page_dirty() 107655c0ece8SJérôme Glisse */ 107755c0ece8SJérôme Glisse if (dirty) 107855c0ece8SJérôme Glisse set_page_dirty(page); 107955c0ece8SJérôme Glisse } 108055c0ece8SJérôme Glisse 108155c0ece8SJérôme Glisse /* Unmap and clear pfns/dma address */ 108255c0ece8SJérôme Glisse dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir); 108355c0ece8SJérôme Glisse range->pfns[i] = range->values[HMM_PFN_NONE]; 108455c0ece8SJérôme Glisse /* FIXME see comments in hmm_vma_dma_map() */ 108555c0ece8SJérôme Glisse daddrs[i] = 0; 108655c0ece8SJérôme Glisse cpages++; 108755c0ece8SJérôme Glisse } 108855c0ece8SJérôme Glisse 108955c0ece8SJérôme Glisse return cpages; 109055c0ece8SJérôme Glisse } 109155c0ece8SJérôme Glisse EXPORT_SYMBOL(hmm_range_dma_unmap); 1092