1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2133ff0eaSJérôme Glisse /* 3133ff0eaSJérôme Glisse * Copyright 2013 Red Hat Inc. 4133ff0eaSJérôme Glisse * 5f813f219SJérôme Glisse * Authors: Jérôme Glisse <jglisse@redhat.com> 6133ff0eaSJérôme Glisse */ 7133ff0eaSJérôme Glisse /* 8133ff0eaSJérôme Glisse * Refer to include/linux/hmm.h for information about heterogeneous memory 9133ff0eaSJérôme Glisse * management or HMM for short. 10133ff0eaSJérôme Glisse */ 11133ff0eaSJérôme Glisse #include <linux/mm.h> 12133ff0eaSJérôme Glisse #include <linux/hmm.h> 13858b54daSJérôme Glisse #include <linux/init.h> 14da4c3c73SJérôme Glisse #include <linux/rmap.h> 15da4c3c73SJérôme Glisse #include <linux/swap.h> 16133ff0eaSJérôme Glisse #include <linux/slab.h> 17133ff0eaSJérôme Glisse #include <linux/sched.h> 184ef589dcSJérôme Glisse #include <linux/mmzone.h> 194ef589dcSJérôme Glisse #include <linux/pagemap.h> 20da4c3c73SJérôme Glisse #include <linux/swapops.h> 21da4c3c73SJérôme Glisse #include <linux/hugetlb.h> 224ef589dcSJérôme Glisse #include <linux/memremap.h> 23c8a53b2dSJason Gunthorpe #include <linux/sched/mm.h> 247b2d55d2SJérôme Glisse #include <linux/jump_label.h> 2555c0ece8SJérôme Glisse #include <linux/dma-mapping.h> 26c0b12405SJérôme Glisse #include <linux/mmu_notifier.h> 274ef589dcSJérôme Glisse #include <linux/memory_hotplug.h> 284ef589dcSJérôme Glisse 29c7d8b782SJason Gunthorpe static struct mmu_notifier *hmm_alloc_notifier(struct mm_struct *mm) 30704f3f2cSJérôme Glisse { 318a9320b7SJason Gunthorpe struct hmm *hmm; 32133ff0eaSJérôme Glisse 33c7d8b782SJason Gunthorpe hmm = kzalloc(sizeof(*hmm), GFP_KERNEL); 34c0b12405SJérôme Glisse if (!hmm) 35c7d8b782SJason Gunthorpe return ERR_PTR(-ENOMEM); 36c7d8b782SJason Gunthorpe 37a3e0d41cSJérôme Glisse init_waitqueue_head(&hmm->wq); 38c0b12405SJérôme Glisse INIT_LIST_HEAD(&hmm->mirrors); 39c0b12405SJérôme Glisse init_rwsem(&hmm->mirrors_sem); 40da4c3c73SJérôme Glisse INIT_LIST_HEAD(&hmm->ranges); 415a136b4aSJason Gunthorpe spin_lock_init(&hmm->ranges_lock); 42a3e0d41cSJérôme Glisse hmm->notifiers = 0; 43c7d8b782SJason Gunthorpe return &hmm->mmu_notifier; 44c7d8b782SJason Gunthorpe } 45c0b12405SJérôme Glisse 46c7d8b782SJason Gunthorpe static void hmm_free_notifier(struct mmu_notifier *mn) 47c7d8b782SJason Gunthorpe { 48c7d8b782SJason Gunthorpe struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 49c7d8b782SJason Gunthorpe 50c7d8b782SJason Gunthorpe WARN_ON(!list_empty(&hmm->ranges)); 51c7d8b782SJason Gunthorpe WARN_ON(!list_empty(&hmm->mirrors)); 5286a2d598SRalph Campbell kfree(hmm); 53704f3f2cSJérôme Glisse } 54704f3f2cSJérôme Glisse 55a3e0d41cSJérôme Glisse static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) 56c0b12405SJérôme Glisse { 576d7c3cdeSJason Gunthorpe struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 58c0b12405SJérôme Glisse struct hmm_mirror *mirror; 59da4c3c73SJérôme Glisse 60e1401513SRalph Campbell /* 6147f24598SJason Gunthorpe * Since hmm_range_register() holds the mmget() lock hmm_release() is 6247f24598SJason Gunthorpe * prevented as long as a range exists. 63e1401513SRalph Campbell */ 6447f24598SJason Gunthorpe WARN_ON(!list_empty_careful(&hmm->ranges)); 65704f3f2cSJérôme Glisse 66a3e0d41cSJérôme Glisse down_read(&hmm->mirrors_sem); 67a3e0d41cSJérôme Glisse list_for_each_entry(mirror, &hmm->mirrors, list) { 68e1401513SRalph Campbell /* 6914331726SJason Gunthorpe * Note: The driver is not allowed to trigger 7014331726SJason Gunthorpe * hmm_mirror_unregister() from this thread. 71e1401513SRalph Campbell */ 7214331726SJason Gunthorpe if (mirror->ops->release) 73e1401513SRalph Campbell mirror->ops->release(mirror); 74a3e0d41cSJérôme Glisse } 75a3e0d41cSJérôme Glisse up_read(&hmm->mirrors_sem); 76c0b12405SJérôme Glisse } 77c0b12405SJérôme Glisse 785a136b4aSJason Gunthorpe static void notifiers_decrement(struct hmm *hmm) 79c0b12405SJérôme Glisse { 805a136b4aSJason Gunthorpe unsigned long flags; 81c0b12405SJérôme Glisse 825a136b4aSJason Gunthorpe spin_lock_irqsave(&hmm->ranges_lock, flags); 83a3e0d41cSJérôme Glisse hmm->notifiers--; 84a3e0d41cSJérôme Glisse if (!hmm->notifiers) { 85a3e0d41cSJérôme Glisse struct hmm_range *range; 86a3e0d41cSJérôme Glisse 87a3e0d41cSJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 88a3e0d41cSJérôme Glisse if (range->valid) 89a3e0d41cSJérôme Glisse continue; 90a3e0d41cSJérôme Glisse range->valid = true; 91a3e0d41cSJérôme Glisse } 92a3e0d41cSJérôme Glisse wake_up_all(&hmm->wq); 93a3e0d41cSJérôme Glisse } 945a136b4aSJason Gunthorpe spin_unlock_irqrestore(&hmm->ranges_lock, flags); 955a136b4aSJason Gunthorpe } 96a3e0d41cSJérôme Glisse 97133ff0eaSJérôme Glisse static int hmm_invalidate_range_start(struct mmu_notifier *mn, 98133ff0eaSJérôme Glisse const struct mmu_notifier_range *nrange) 99133ff0eaSJérôme Glisse { 1006d7c3cdeSJason Gunthorpe struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 101133ff0eaSJérôme Glisse struct hmm_mirror *mirror; 102133ff0eaSJérôme Glisse struct hmm_range *range; 1035a136b4aSJason Gunthorpe unsigned long flags; 104133ff0eaSJérôme Glisse int ret = 0; 105133ff0eaSJérôme Glisse 1065a136b4aSJason Gunthorpe spin_lock_irqsave(&hmm->ranges_lock, flags); 107133ff0eaSJérôme Glisse hmm->notifiers++; 108133ff0eaSJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 1091f961807SRalph Campbell if (nrange->end < range->start || nrange->start >= range->end) 110133ff0eaSJérôme Glisse continue; 111133ff0eaSJérôme Glisse 112133ff0eaSJérôme Glisse range->valid = false; 113133ff0eaSJérôme Glisse } 1145a136b4aSJason Gunthorpe spin_unlock_irqrestore(&hmm->ranges_lock, flags); 115c0b12405SJérôme Glisse 116c0b12405SJérôme Glisse if (mmu_notifier_range_blockable(nrange)) 117c0b12405SJérôme Glisse down_read(&hmm->mirrors_sem); 118c0b12405SJérôme Glisse else if (!down_read_trylock(&hmm->mirrors_sem)) { 119c0b12405SJérôme Glisse ret = -EAGAIN; 120c0b12405SJérôme Glisse goto out; 121c0b12405SJérôme Glisse } 122c0b12405SJérôme Glisse 1235a136b4aSJason Gunthorpe list_for_each_entry(mirror, &hmm->mirrors, list) { 1245a136b4aSJason Gunthorpe int rc; 1255a136b4aSJason Gunthorpe 1261f961807SRalph Campbell rc = mirror->ops->sync_cpu_device_pagetables(mirror, nrange); 1275a136b4aSJason Gunthorpe if (rc) { 1281f961807SRalph Campbell if (WARN_ON(mmu_notifier_range_blockable(nrange) || 1291f961807SRalph Campbell rc != -EAGAIN)) 1305a136b4aSJason Gunthorpe continue; 1315a136b4aSJason Gunthorpe ret = -EAGAIN; 132085ea250SRalph Campbell break; 133c0b12405SJérôme Glisse } 1345a136b4aSJason Gunthorpe } 135c0b12405SJérôme Glisse up_read(&hmm->mirrors_sem); 136c0b12405SJérôme Glisse 137c0b12405SJérôme Glisse out: 1385a136b4aSJason Gunthorpe if (ret) 1395a136b4aSJason Gunthorpe notifiers_decrement(hmm); 140c0b12405SJérôme Glisse return ret; 141c0b12405SJérôme Glisse } 142c0b12405SJérôme Glisse 143c0b12405SJérôme Glisse static void hmm_invalidate_range_end(struct mmu_notifier *mn, 144c0b12405SJérôme Glisse const struct mmu_notifier_range *nrange) 145c0b12405SJérôme Glisse { 1466d7c3cdeSJason Gunthorpe struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 147c0b12405SJérôme Glisse 1485a136b4aSJason Gunthorpe notifiers_decrement(hmm); 149c0b12405SJérôme Glisse } 150c0b12405SJérôme Glisse 151c0b12405SJérôme Glisse static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { 152e1401513SRalph Campbell .release = hmm_release, 153c0b12405SJérôme Glisse .invalidate_range_start = hmm_invalidate_range_start, 154c0b12405SJérôme Glisse .invalidate_range_end = hmm_invalidate_range_end, 155c7d8b782SJason Gunthorpe .alloc_notifier = hmm_alloc_notifier, 156c7d8b782SJason Gunthorpe .free_notifier = hmm_free_notifier, 157c0b12405SJérôme Glisse }; 158c0b12405SJérôme Glisse 159c0b12405SJérôme Glisse /* 160c0b12405SJérôme Glisse * hmm_mirror_register() - register a mirror against an mm 161c0b12405SJérôme Glisse * 162c0b12405SJérôme Glisse * @mirror: new mirror struct to register 163c0b12405SJérôme Glisse * @mm: mm to register against 164085ea250SRalph Campbell * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments 165c0b12405SJérôme Glisse * 166c0b12405SJérôme Glisse * To start mirroring a process address space, the device driver must register 167c0b12405SJérôme Glisse * an HMM mirror struct. 168c7d8b782SJason Gunthorpe * 169c7d8b782SJason Gunthorpe * The caller cannot unregister the hmm_mirror while any ranges are 170c7d8b782SJason Gunthorpe * registered. 171c7d8b782SJason Gunthorpe * 172c7d8b782SJason Gunthorpe * Callers using this function must put a call to mmu_notifier_synchronize() 173c7d8b782SJason Gunthorpe * in their module exit functions. 174c0b12405SJérôme Glisse */ 175c0b12405SJérôme Glisse int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) 176c0b12405SJérôme Glisse { 177c7d8b782SJason Gunthorpe struct mmu_notifier *mn; 178c7d8b782SJason Gunthorpe 179fec88ab0SLinus Torvalds lockdep_assert_held_write(&mm->mmap_sem); 1808a1a0cd0SJason Gunthorpe 181c0b12405SJérôme Glisse /* Sanity check */ 182c0b12405SJérôme Glisse if (!mm || !mirror || !mirror->ops) 183c0b12405SJérôme Glisse return -EINVAL; 184c0b12405SJérôme Glisse 185c7d8b782SJason Gunthorpe mn = mmu_notifier_get_locked(&hmm_mmu_notifier_ops, mm); 186c7d8b782SJason Gunthorpe if (IS_ERR(mn)) 187c7d8b782SJason Gunthorpe return PTR_ERR(mn); 188c7d8b782SJason Gunthorpe mirror->hmm = container_of(mn, struct hmm, mmu_notifier); 189c0b12405SJérôme Glisse 190c0b12405SJérôme Glisse down_write(&mirror->hmm->mirrors_sem); 191c0b12405SJérôme Glisse list_add(&mirror->list, &mirror->hmm->mirrors); 192c0b12405SJérôme Glisse up_write(&mirror->hmm->mirrors_sem); 193c0b12405SJérôme Glisse 194c0b12405SJérôme Glisse return 0; 195c0b12405SJérôme Glisse } 196c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_register); 197c0b12405SJérôme Glisse 198c0b12405SJérôme Glisse /* 199c0b12405SJérôme Glisse * hmm_mirror_unregister() - unregister a mirror 200c0b12405SJérôme Glisse * 201085ea250SRalph Campbell * @mirror: mirror struct to unregister 202c0b12405SJérôme Glisse * 203c0b12405SJérôme Glisse * Stop mirroring a process address space, and cleanup. 204c0b12405SJérôme Glisse */ 205c0b12405SJérôme Glisse void hmm_mirror_unregister(struct hmm_mirror *mirror) 206c0b12405SJérôme Glisse { 207187229c2SJason Gunthorpe struct hmm *hmm = mirror->hmm; 208c01cbba2SJérôme Glisse 209c0b12405SJérôme Glisse down_write(&hmm->mirrors_sem); 21014331726SJason Gunthorpe list_del(&mirror->list); 211c0b12405SJérôme Glisse up_write(&hmm->mirrors_sem); 212c7d8b782SJason Gunthorpe mmu_notifier_put(&hmm->mmu_notifier); 213c0b12405SJérôme Glisse } 214c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_unregister); 215da4c3c73SJérôme Glisse 21674eee180SJérôme Glisse struct hmm_vma_walk { 21774eee180SJérôme Glisse struct hmm_range *range; 218992de9a8SJérôme Glisse struct dev_pagemap *pgmap; 21974eee180SJérôme Glisse unsigned long last; 2209a4903e4SChristoph Hellwig unsigned int flags; 22174eee180SJérôme Glisse }; 22274eee180SJérôme Glisse 2232aee09d8SJérôme Glisse static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, 2242aee09d8SJérôme Glisse bool write_fault, uint64_t *pfn) 22574eee180SJérôme Glisse { 2269b1ae605SKuehling, Felix unsigned int flags = FAULT_FLAG_REMOTE; 22774eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 228f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 22974eee180SJérôme Glisse struct vm_area_struct *vma = walk->vma; 23050a7ca3cSSouptick Joarder vm_fault_t ret; 23174eee180SJérôme Glisse 232*6c64f2bbSRalph Campbell if (!vma) 233*6c64f2bbSRalph Campbell goto err; 234*6c64f2bbSRalph Campbell 2359a4903e4SChristoph Hellwig if (hmm_vma_walk->flags & HMM_FAULT_ALLOW_RETRY) 2369a4903e4SChristoph Hellwig flags |= FAULT_FLAG_ALLOW_RETRY; 2379a4903e4SChristoph Hellwig if (write_fault) 2389a4903e4SChristoph Hellwig flags |= FAULT_FLAG_WRITE; 2399a4903e4SChristoph Hellwig 24050a7ca3cSSouptick Joarder ret = handle_mm_fault(vma, addr, flags); 241e709acccSJason Gunthorpe if (ret & VM_FAULT_RETRY) { 242e709acccSJason Gunthorpe /* Note, handle_mm_fault did up_read(&mm->mmap_sem)) */ 24373231612SJérôme Glisse return -EAGAIN; 244e709acccSJason Gunthorpe } 245*6c64f2bbSRalph Campbell if (ret & VM_FAULT_ERROR) 246*6c64f2bbSRalph Campbell goto err; 24774eee180SJérôme Glisse 24873231612SJérôme Glisse return -EBUSY; 249*6c64f2bbSRalph Campbell 250*6c64f2bbSRalph Campbell err: 251*6c64f2bbSRalph Campbell *pfn = range->values[HMM_PFN_ERROR]; 252*6c64f2bbSRalph Campbell return -EFAULT; 25374eee180SJérôme Glisse } 25474eee180SJérôme Glisse 255da4c3c73SJérôme Glisse static int hmm_pfns_bad(unsigned long addr, 256da4c3c73SJérôme Glisse unsigned long end, 257da4c3c73SJérôme Glisse struct mm_walk *walk) 258da4c3c73SJérôme Glisse { 259c719547fSJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 260c719547fSJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 261ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 262da4c3c73SJérôme Glisse unsigned long i; 263da4c3c73SJérôme Glisse 264da4c3c73SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 265da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, i++) 266f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_ERROR]; 267da4c3c73SJérôme Glisse 268da4c3c73SJérôme Glisse return 0; 269da4c3c73SJérôme Glisse } 270da4c3c73SJérôme Glisse 2715504ed29SJérôme Glisse /* 272d2e8d551SRalph Campbell * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s) 273d2e8d551SRalph Campbell * @addr: range virtual start address (inclusive) 2745504ed29SJérôme Glisse * @end: range virtual end address (exclusive) 2752aee09d8SJérôme Glisse * @fault: should we fault or not ? 2762aee09d8SJérôme Glisse * @write_fault: write fault ? 2775504ed29SJérôme Glisse * @walk: mm_walk structure 278085ea250SRalph Campbell * Return: 0 on success, -EBUSY after page fault, or page fault error 2795504ed29SJérôme Glisse * 2805504ed29SJérôme Glisse * This function will be called whenever pmd_none() or pte_none() returns true, 2815504ed29SJérôme Glisse * or whenever there is no page directory covering the virtual address range. 2825504ed29SJérôme Glisse */ 2832aee09d8SJérôme Glisse static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, 2842aee09d8SJérôme Glisse bool fault, bool write_fault, 285da4c3c73SJérôme Glisse struct mm_walk *walk) 286da4c3c73SJérôme Glisse { 28774eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 28874eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 289ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 2907f08263dSChristoph Hellwig unsigned long i; 291da4c3c73SJérôme Glisse 29274eee180SJérôme Glisse hmm_vma_walk->last = addr; 2937f08263dSChristoph Hellwig i = (addr - range->start) >> PAGE_SHIFT; 29463d5066fSJérôme Glisse 2957f08263dSChristoph Hellwig for (; addr < end; addr += PAGE_SIZE, i++) { 296f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_NONE]; 2972aee09d8SJérôme Glisse if (fault || write_fault) { 29874eee180SJérôme Glisse int ret; 299da4c3c73SJérôme Glisse 3002aee09d8SJérôme Glisse ret = hmm_vma_do_fault(walk, addr, write_fault, 3012aee09d8SJérôme Glisse &pfns[i]); 30273231612SJérôme Glisse if (ret != -EBUSY) 30374eee180SJérôme Glisse return ret; 30474eee180SJérôme Glisse } 30574eee180SJérôme Glisse } 30674eee180SJérôme Glisse 30773231612SJérôme Glisse return (fault || write_fault) ? -EBUSY : 0; 3082aee09d8SJérôme Glisse } 3092aee09d8SJérôme Glisse 3102aee09d8SJérôme Glisse static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 3112aee09d8SJérôme Glisse uint64_t pfns, uint64_t cpu_flags, 3122aee09d8SJérôme Glisse bool *fault, bool *write_fault) 3132aee09d8SJérôme Glisse { 314f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 315f88a1e90SJérôme Glisse 316d45d464bSChristoph Hellwig if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) 3172aee09d8SJérôme Glisse return; 3182aee09d8SJérôme Glisse 319023a019aSJérôme Glisse /* 320023a019aSJérôme Glisse * So we not only consider the individual per page request we also 321023a019aSJérôme Glisse * consider the default flags requested for the range. The API can 322d2e8d551SRalph Campbell * be used 2 ways. The first one where the HMM user coalesces 323d2e8d551SRalph Campbell * multiple page faults into one request and sets flags per pfn for 324d2e8d551SRalph Campbell * those faults. The second one where the HMM user wants to pre- 325023a019aSJérôme Glisse * fault a range with specific flags. For the latter one it is a 326023a019aSJérôme Glisse * waste to have the user pre-fill the pfn arrays with a default 327023a019aSJérôme Glisse * flags value. 328023a019aSJérôme Glisse */ 329023a019aSJérôme Glisse pfns = (pfns & range->pfn_flags_mask) | range->default_flags; 330023a019aSJérôme Glisse 3312aee09d8SJérôme Glisse /* We aren't ask to do anything ... */ 332f88a1e90SJérôme Glisse if (!(pfns & range->flags[HMM_PFN_VALID])) 3332aee09d8SJérôme Glisse return; 334d2e8d551SRalph Campbell /* If this is device memory then only fault if explicitly requested */ 335f88a1e90SJérôme Glisse if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { 336f88a1e90SJérôme Glisse /* Do we fault on device memory ? */ 337f88a1e90SJérôme Glisse if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { 338f88a1e90SJérôme Glisse *write_fault = pfns & range->flags[HMM_PFN_WRITE]; 339f88a1e90SJérôme Glisse *fault = true; 340f88a1e90SJérôme Glisse } 3412aee09d8SJérôme Glisse return; 3422aee09d8SJérôme Glisse } 343f88a1e90SJérôme Glisse 344f88a1e90SJérôme Glisse /* If CPU page table is not valid then we need to fault */ 345f88a1e90SJérôme Glisse *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); 346f88a1e90SJérôme Glisse /* Need to write fault ? */ 347f88a1e90SJérôme Glisse if ((pfns & range->flags[HMM_PFN_WRITE]) && 348f88a1e90SJérôme Glisse !(cpu_flags & range->flags[HMM_PFN_WRITE])) { 349f88a1e90SJérôme Glisse *write_fault = true; 3502aee09d8SJérôme Glisse *fault = true; 3512aee09d8SJérôme Glisse } 3522aee09d8SJérôme Glisse } 3532aee09d8SJérôme Glisse 3542aee09d8SJérôme Glisse static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 3552aee09d8SJérôme Glisse const uint64_t *pfns, unsigned long npages, 3562aee09d8SJérôme Glisse uint64_t cpu_flags, bool *fault, 3572aee09d8SJérôme Glisse bool *write_fault) 3582aee09d8SJérôme Glisse { 3592aee09d8SJérôme Glisse unsigned long i; 3602aee09d8SJérôme Glisse 361d45d464bSChristoph Hellwig if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) { 3622aee09d8SJérôme Glisse *fault = *write_fault = false; 3632aee09d8SJérôme Glisse return; 3642aee09d8SJérôme Glisse } 3652aee09d8SJérôme Glisse 366a3e0d41cSJérôme Glisse *fault = *write_fault = false; 3672aee09d8SJérôme Glisse for (i = 0; i < npages; ++i) { 3682aee09d8SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags, 3692aee09d8SJérôme Glisse fault, write_fault); 370a3e0d41cSJérôme Glisse if ((*write_fault)) 3712aee09d8SJérôme Glisse return; 3722aee09d8SJérôme Glisse } 3732aee09d8SJérôme Glisse } 3742aee09d8SJérôme Glisse 3752aee09d8SJérôme Glisse static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, 3762aee09d8SJérôme Glisse struct mm_walk *walk) 3772aee09d8SJérôme Glisse { 3782aee09d8SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 3792aee09d8SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 3802aee09d8SJérôme Glisse bool fault, write_fault; 3812aee09d8SJérôme Glisse unsigned long i, npages; 3822aee09d8SJérôme Glisse uint64_t *pfns; 3832aee09d8SJérôme Glisse 3842aee09d8SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 3852aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 3862aee09d8SJérôme Glisse pfns = &range->pfns[i]; 3872aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 3882aee09d8SJérôme Glisse 0, &fault, &write_fault); 3892aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 3902aee09d8SJérôme Glisse } 3912aee09d8SJérôme Glisse 392f88a1e90SJérôme Glisse static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) 3932aee09d8SJérôme Glisse { 3942aee09d8SJérôme Glisse if (pmd_protnone(pmd)) 3952aee09d8SJérôme Glisse return 0; 396f88a1e90SJérôme Glisse return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | 397f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 398f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 399da4c3c73SJérôme Glisse } 400da4c3c73SJérôme Glisse 401992de9a8SJérôme Glisse #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4029d3973d6SChristoph Hellwig static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, 4039d3973d6SChristoph Hellwig unsigned long end, uint64_t *pfns, pmd_t pmd) 4049d3973d6SChristoph Hellwig { 40553f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 406f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 4072aee09d8SJérôme Glisse unsigned long pfn, npages, i; 4082aee09d8SJérôme Glisse bool fault, write_fault; 409f88a1e90SJérôme Glisse uint64_t cpu_flags; 41053f5c3f4SJérôme Glisse 4112aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 412f88a1e90SJérôme Glisse cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); 4132aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, 4142aee09d8SJérôme Glisse &fault, &write_fault); 41553f5c3f4SJérôme Glisse 4162aee09d8SJérôme Glisse if (pmd_protnone(pmd) || fault || write_fault) 4172aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 41853f5c3f4SJérôme Glisse 419309f9a4fSChristoph Hellwig pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 420992de9a8SJérôme Glisse for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { 421992de9a8SJérôme Glisse if (pmd_devmap(pmd)) { 422992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pfn, 423992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 424992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 425992de9a8SJérôme Glisse return -EBUSY; 426992de9a8SJérôme Glisse } 427391aab11SJérôme Glisse pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags; 428992de9a8SJérôme Glisse } 429992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 430992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 431992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 432992de9a8SJérôme Glisse } 43353f5c3f4SJérôme Glisse hmm_vma_walk->last = end; 43453f5c3f4SJérôme Glisse return 0; 43553f5c3f4SJérôme Glisse } 4369d3973d6SChristoph Hellwig #else /* CONFIG_TRANSPARENT_HUGEPAGE */ 4379d3973d6SChristoph Hellwig /* stub to allow the code below to compile */ 4389d3973d6SChristoph Hellwig int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, 4399d3973d6SChristoph Hellwig unsigned long end, uint64_t *pfns, pmd_t pmd); 4409d3973d6SChristoph Hellwig #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 44153f5c3f4SJérôme Glisse 442f88a1e90SJérôme Glisse static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) 4432aee09d8SJérôme Glisse { 444789c2af8SPhilip Yang if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) 4452aee09d8SJérôme Glisse return 0; 446f88a1e90SJérôme Glisse return pte_write(pte) ? range->flags[HMM_PFN_VALID] | 447f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 448f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 4492aee09d8SJérôme Glisse } 4502aee09d8SJérôme Glisse 45153f5c3f4SJérôme Glisse static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, 45253f5c3f4SJérôme Glisse unsigned long end, pmd_t *pmdp, pte_t *ptep, 45353f5c3f4SJérôme Glisse uint64_t *pfn) 45453f5c3f4SJérôme Glisse { 45553f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 456f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 4572aee09d8SJérôme Glisse bool fault, write_fault; 4582aee09d8SJérôme Glisse uint64_t cpu_flags; 45953f5c3f4SJérôme Glisse pte_t pte = *ptep; 460f88a1e90SJérôme Glisse uint64_t orig_pfn = *pfn; 46153f5c3f4SJérôme Glisse 462f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_NONE]; 46373231612SJérôme Glisse fault = write_fault = false; 46453f5c3f4SJérôme Glisse 46553f5c3f4SJérôme Glisse if (pte_none(pte)) { 46673231612SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, 46773231612SJérôme Glisse &fault, &write_fault); 4682aee09d8SJérôme Glisse if (fault || write_fault) 46953f5c3f4SJérôme Glisse goto fault; 47053f5c3f4SJérôme Glisse return 0; 47153f5c3f4SJérôme Glisse } 47253f5c3f4SJérôme Glisse 47353f5c3f4SJérôme Glisse if (!pte_present(pte)) { 47453f5c3f4SJérôme Glisse swp_entry_t entry = pte_to_swp_entry(pte); 47553f5c3f4SJérôme Glisse 47653f5c3f4SJérôme Glisse if (!non_swap_entry(entry)) { 477e3fe8e55SYang, Philip cpu_flags = pte_to_hmm_pfn_flags(range, pte); 478e3fe8e55SYang, Philip hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 479e3fe8e55SYang, Philip &fault, &write_fault); 4802aee09d8SJérôme Glisse if (fault || write_fault) 48153f5c3f4SJérôme Glisse goto fault; 48253f5c3f4SJérôme Glisse return 0; 48353f5c3f4SJérôme Glisse } 48453f5c3f4SJérôme Glisse 48553f5c3f4SJérôme Glisse /* 48653f5c3f4SJérôme Glisse * This is a special swap entry, ignore migration, use 48753f5c3f4SJérôme Glisse * device and report anything else as error. 48853f5c3f4SJérôme Glisse */ 48953f5c3f4SJérôme Glisse if (is_device_private_entry(entry)) { 490f88a1e90SJérôme Glisse cpu_flags = range->flags[HMM_PFN_VALID] | 491f88a1e90SJérôme Glisse range->flags[HMM_PFN_DEVICE_PRIVATE]; 4922aee09d8SJérôme Glisse cpu_flags |= is_write_device_private_entry(entry) ? 493f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 0; 494f88a1e90SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 495f88a1e90SJérôme Glisse &fault, &write_fault); 496f88a1e90SJérôme Glisse if (fault || write_fault) 497f88a1e90SJérôme Glisse goto fault; 498391aab11SJérôme Glisse *pfn = hmm_device_entry_from_pfn(range, 499391aab11SJérôme Glisse swp_offset(entry)); 500f88a1e90SJérôme Glisse *pfn |= cpu_flags; 50153f5c3f4SJérôme Glisse return 0; 50253f5c3f4SJérôme Glisse } 50353f5c3f4SJérôme Glisse 50453f5c3f4SJérôme Glisse if (is_migration_entry(entry)) { 5052aee09d8SJérôme Glisse if (fault || write_fault) { 50653f5c3f4SJérôme Glisse pte_unmap(ptep); 50753f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 508d2e8d551SRalph Campbell migration_entry_wait(walk->mm, pmdp, addr); 50973231612SJérôme Glisse return -EBUSY; 51053f5c3f4SJérôme Glisse } 51153f5c3f4SJérôme Glisse return 0; 51253f5c3f4SJérôme Glisse } 51353f5c3f4SJérôme Glisse 51453f5c3f4SJérôme Glisse /* Report error for everything else */ 515f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_ERROR]; 51653f5c3f4SJérôme Glisse return -EFAULT; 51773231612SJérôme Glisse } else { 51873231612SJérôme Glisse cpu_flags = pte_to_hmm_pfn_flags(range, pte); 51973231612SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 52073231612SJérôme Glisse &fault, &write_fault); 52153f5c3f4SJérôme Glisse } 52253f5c3f4SJérôme Glisse 5232aee09d8SJérôme Glisse if (fault || write_fault) 52453f5c3f4SJérôme Glisse goto fault; 52553f5c3f4SJérôme Glisse 526992de9a8SJérôme Glisse if (pte_devmap(pte)) { 527992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte), 528992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 529992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 530992de9a8SJérôme Glisse return -EBUSY; 531992de9a8SJérôme Glisse } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) { 532992de9a8SJérôme Glisse *pfn = range->values[HMM_PFN_SPECIAL]; 533992de9a8SJérôme Glisse return -EFAULT; 534992de9a8SJérôme Glisse } 535992de9a8SJérôme Glisse 536391aab11SJérôme Glisse *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags; 53753f5c3f4SJérôme Glisse return 0; 53853f5c3f4SJérôme Glisse 53953f5c3f4SJérôme Glisse fault: 540992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 541992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 542992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 543992de9a8SJérôme Glisse } 54453f5c3f4SJérôme Glisse pte_unmap(ptep); 54553f5c3f4SJérôme Glisse /* Fault any virtual address we were asked to fault */ 5462aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 54753f5c3f4SJérôme Glisse } 54853f5c3f4SJérôme Glisse 549da4c3c73SJérôme Glisse static int hmm_vma_walk_pmd(pmd_t *pmdp, 550da4c3c73SJérôme Glisse unsigned long start, 551da4c3c73SJérôme Glisse unsigned long end, 552da4c3c73SJérôme Glisse struct mm_walk *walk) 553da4c3c73SJérôme Glisse { 55474eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 55574eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 556ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 557da4c3c73SJérôme Glisse unsigned long addr = start, i; 558da4c3c73SJérôme Glisse pte_t *ptep; 559da4c3c73SJérôme Glisse pmd_t pmd; 560da4c3c73SJérôme Glisse 561d08faca0SJérôme Glisse again: 562d08faca0SJérôme Glisse pmd = READ_ONCE(*pmdp); 563d08faca0SJérôme Glisse if (pmd_none(pmd)) 564d08faca0SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 565d08faca0SJérôme Glisse 566d08faca0SJérôme Glisse if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { 567d08faca0SJérôme Glisse bool fault, write_fault; 568d08faca0SJérôme Glisse unsigned long npages; 569d08faca0SJérôme Glisse uint64_t *pfns; 570d08faca0SJérôme Glisse 571d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 572d08faca0SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 573d08faca0SJérôme Glisse pfns = &range->pfns[i]; 574d08faca0SJérôme Glisse 575d08faca0SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 576d08faca0SJérôme Glisse 0, &fault, &write_fault); 577d08faca0SJérôme Glisse if (fault || write_fault) { 578d08faca0SJérôme Glisse hmm_vma_walk->last = addr; 579d2e8d551SRalph Campbell pmd_migration_entry_wait(walk->mm, pmdp); 58073231612SJérôme Glisse return -EBUSY; 581d08faca0SJérôme Glisse } 582d08faca0SJérôme Glisse return 0; 583d08faca0SJérôme Glisse } else if (!pmd_present(pmd)) 584d08faca0SJérôme Glisse return hmm_pfns_bad(start, end, walk); 585d08faca0SJérôme Glisse 586d08faca0SJérôme Glisse if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { 587da4c3c73SJérôme Glisse /* 588d2e8d551SRalph Campbell * No need to take pmd_lock here, even if some other thread 589da4c3c73SJérôme Glisse * is splitting the huge pmd we will get that event through 590da4c3c73SJérôme Glisse * mmu_notifier callback. 591da4c3c73SJérôme Glisse * 592d2e8d551SRalph Campbell * So just read pmd value and check again it's a transparent 593da4c3c73SJérôme Glisse * huge or device mapping one and compute corresponding pfn 594da4c3c73SJérôme Glisse * values. 595da4c3c73SJérôme Glisse */ 596da4c3c73SJérôme Glisse pmd = pmd_read_atomic(pmdp); 597da4c3c73SJérôme Glisse barrier(); 598da4c3c73SJérôme Glisse if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) 599da4c3c73SJérôme Glisse goto again; 600da4c3c73SJérôme Glisse 601d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 60253f5c3f4SJérôme Glisse return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd); 603da4c3c73SJérôme Glisse } 604da4c3c73SJérôme Glisse 605d08faca0SJérôme Glisse /* 606d2e8d551SRalph Campbell * We have handled all the valid cases above ie either none, migration, 607d08faca0SJérôme Glisse * huge or transparent huge. At this point either it is a valid pmd 608d08faca0SJérôme Glisse * entry pointing to pte directory or it is a bad pmd that will not 609d08faca0SJérôme Glisse * recover. 610d08faca0SJérôme Glisse */ 611d08faca0SJérôme Glisse if (pmd_bad(pmd)) 612da4c3c73SJérôme Glisse return hmm_pfns_bad(start, end, walk); 613da4c3c73SJérôme Glisse 614da4c3c73SJérôme Glisse ptep = pte_offset_map(pmdp, addr); 615d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 616da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { 61753f5c3f4SJérôme Glisse int r; 618da4c3c73SJérôme Glisse 61953f5c3f4SJérôme Glisse r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]); 62053f5c3f4SJérôme Glisse if (r) { 62153f5c3f4SJérôme Glisse /* hmm_vma_handle_pte() did unmap pte directory */ 62274eee180SJérôme Glisse hmm_vma_walk->last = addr; 62353f5c3f4SJérôme Glisse return r; 62474eee180SJérôme Glisse } 625da4c3c73SJérôme Glisse } 626992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 627992de9a8SJérôme Glisse /* 628992de9a8SJérôme Glisse * We do put_dev_pagemap() here and not in hmm_vma_handle_pte() 629992de9a8SJérôme Glisse * so that we can leverage get_dev_pagemap() optimization which 630992de9a8SJérôme Glisse * will not re-take a reference on a pgmap if we already have 631992de9a8SJérôme Glisse * one. 632992de9a8SJérôme Glisse */ 633992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 634992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 635992de9a8SJérôme Glisse } 636da4c3c73SJérôme Glisse pte_unmap(ptep - 1); 637da4c3c73SJérôme Glisse 63853f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 639da4c3c73SJérôme Glisse return 0; 640da4c3c73SJérôme Glisse } 641da4c3c73SJérôme Glisse 642f0b3c45cSChristoph Hellwig #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \ 643f0b3c45cSChristoph Hellwig defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 644f0b3c45cSChristoph Hellwig static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud) 645f0b3c45cSChristoph Hellwig { 646f0b3c45cSChristoph Hellwig if (!pud_present(pud)) 647f0b3c45cSChristoph Hellwig return 0; 648f0b3c45cSChristoph Hellwig return pud_write(pud) ? range->flags[HMM_PFN_VALID] | 649f0b3c45cSChristoph Hellwig range->flags[HMM_PFN_WRITE] : 650f0b3c45cSChristoph Hellwig range->flags[HMM_PFN_VALID]; 651f0b3c45cSChristoph Hellwig } 652f0b3c45cSChristoph Hellwig 653f0b3c45cSChristoph Hellwig static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, 654992de9a8SJérôme Glisse struct mm_walk *walk) 655992de9a8SJérôme Glisse { 656992de9a8SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 657992de9a8SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 658992de9a8SJérôme Glisse unsigned long addr = start, next; 659992de9a8SJérôme Glisse pmd_t *pmdp; 660992de9a8SJérôme Glisse pud_t pud; 661992de9a8SJérôme Glisse int ret; 662992de9a8SJérôme Glisse 663992de9a8SJérôme Glisse again: 664992de9a8SJérôme Glisse pud = READ_ONCE(*pudp); 665992de9a8SJérôme Glisse if (pud_none(pud)) 666992de9a8SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 667992de9a8SJérôme Glisse 668992de9a8SJérôme Glisse if (pud_huge(pud) && pud_devmap(pud)) { 669992de9a8SJérôme Glisse unsigned long i, npages, pfn; 670992de9a8SJérôme Glisse uint64_t *pfns, cpu_flags; 671992de9a8SJérôme Glisse bool fault, write_fault; 672992de9a8SJérôme Glisse 673992de9a8SJérôme Glisse if (!pud_present(pud)) 674992de9a8SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 675992de9a8SJérôme Glisse 676992de9a8SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 677992de9a8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 678992de9a8SJérôme Glisse pfns = &range->pfns[i]; 679992de9a8SJérôme Glisse 680992de9a8SJérôme Glisse cpu_flags = pud_to_hmm_pfn_flags(range, pud); 681992de9a8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 682992de9a8SJérôme Glisse cpu_flags, &fault, &write_fault); 683992de9a8SJérôme Glisse if (fault || write_fault) 684992de9a8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, 685992de9a8SJérôme Glisse write_fault, walk); 686992de9a8SJérôme Glisse 687992de9a8SJérôme Glisse pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 688992de9a8SJérôme Glisse for (i = 0; i < npages; ++i, ++pfn) { 689992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pfn, 690992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 691992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 692992de9a8SJérôme Glisse return -EBUSY; 693391aab11SJérôme Glisse pfns[i] = hmm_device_entry_from_pfn(range, pfn) | 694391aab11SJérôme Glisse cpu_flags; 695992de9a8SJérôme Glisse } 696992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 697992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 698992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 699992de9a8SJérôme Glisse } 700992de9a8SJérôme Glisse hmm_vma_walk->last = end; 701992de9a8SJérôme Glisse return 0; 702992de9a8SJérôme Glisse } 703992de9a8SJérôme Glisse 704992de9a8SJérôme Glisse split_huge_pud(walk->vma, pudp, addr); 705992de9a8SJérôme Glisse if (pud_none(*pudp)) 706992de9a8SJérôme Glisse goto again; 707992de9a8SJérôme Glisse 708992de9a8SJérôme Glisse pmdp = pmd_offset(pudp, addr); 709992de9a8SJérôme Glisse do { 710992de9a8SJérôme Glisse next = pmd_addr_end(addr, end); 711992de9a8SJérôme Glisse ret = hmm_vma_walk_pmd(pmdp, addr, next, walk); 712992de9a8SJérôme Glisse if (ret) 713992de9a8SJérôme Glisse return ret; 714992de9a8SJérôme Glisse } while (pmdp++, addr = next, addr != end); 715992de9a8SJérôme Glisse 716992de9a8SJérôme Glisse return 0; 717992de9a8SJérôme Glisse } 718f0b3c45cSChristoph Hellwig #else 719f0b3c45cSChristoph Hellwig #define hmm_vma_walk_pud NULL 720f0b3c45cSChristoph Hellwig #endif 721992de9a8SJérôme Glisse 722251bbe59SChristoph Hellwig #ifdef CONFIG_HUGETLB_PAGE 72363d5066fSJérôme Glisse static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, 72463d5066fSJérôme Glisse unsigned long start, unsigned long end, 72563d5066fSJérôme Glisse struct mm_walk *walk) 72663d5066fSJérôme Glisse { 72705c23af4SChristoph Hellwig unsigned long addr = start, i, pfn; 72863d5066fSJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 72963d5066fSJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 73063d5066fSJérôme Glisse struct vm_area_struct *vma = walk->vma; 73163d5066fSJérôme Glisse uint64_t orig_pfn, cpu_flags; 73263d5066fSJérôme Glisse bool fault, write_fault; 73363d5066fSJérôme Glisse spinlock_t *ptl; 73463d5066fSJérôme Glisse pte_t entry; 73563d5066fSJérôme Glisse int ret = 0; 73663d5066fSJérôme Glisse 737d2e8d551SRalph Campbell ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); 73863d5066fSJérôme Glisse entry = huge_ptep_get(pte); 73963d5066fSJérôme Glisse 7407f08263dSChristoph Hellwig i = (start - range->start) >> PAGE_SHIFT; 74163d5066fSJérôme Glisse orig_pfn = range->pfns[i]; 74263d5066fSJérôme Glisse range->pfns[i] = range->values[HMM_PFN_NONE]; 74363d5066fSJérôme Glisse cpu_flags = pte_to_hmm_pfn_flags(range, entry); 74463d5066fSJérôme Glisse fault = write_fault = false; 74563d5066fSJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 74663d5066fSJérôme Glisse &fault, &write_fault); 74763d5066fSJérôme Glisse if (fault || write_fault) { 74863d5066fSJérôme Glisse ret = -ENOENT; 74963d5066fSJérôme Glisse goto unlock; 75063d5066fSJérôme Glisse } 75163d5066fSJérôme Glisse 75205c23af4SChristoph Hellwig pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); 7537f08263dSChristoph Hellwig for (; addr < end; addr += PAGE_SIZE, i++, pfn++) 754391aab11SJérôme Glisse range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) | 755391aab11SJérôme Glisse cpu_flags; 75663d5066fSJérôme Glisse hmm_vma_walk->last = end; 75763d5066fSJérôme Glisse 75863d5066fSJérôme Glisse unlock: 75963d5066fSJérôme Glisse spin_unlock(ptl); 76063d5066fSJérôme Glisse 76163d5066fSJérôme Glisse if (ret == -ENOENT) 76263d5066fSJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 76363d5066fSJérôme Glisse 76463d5066fSJérôme Glisse return ret; 76563d5066fSJérôme Glisse } 766251bbe59SChristoph Hellwig #else 767251bbe59SChristoph Hellwig #define hmm_vma_walk_hugetlb_entry NULL 768251bbe59SChristoph Hellwig #endif /* CONFIG_HUGETLB_PAGE */ 76963d5066fSJérôme Glisse 770f88a1e90SJérôme Glisse static void hmm_pfns_clear(struct hmm_range *range, 771f88a1e90SJérôme Glisse uint64_t *pfns, 77233cd47dcSJérôme Glisse unsigned long addr, 77333cd47dcSJérôme Glisse unsigned long end) 77433cd47dcSJérôme Glisse { 77533cd47dcSJérôme Glisse for (; addr < end; addr += PAGE_SIZE, pfns++) 776f88a1e90SJérôme Glisse *pfns = range->values[HMM_PFN_NONE]; 77733cd47dcSJérôme Glisse } 77833cd47dcSJérôme Glisse 779da4c3c73SJérôme Glisse /* 780a3e0d41cSJérôme Glisse * hmm_range_register() - start tracking change to CPU page table over a range 781a3e0d41cSJérôme Glisse * @range: range 782a3e0d41cSJérôme Glisse * @mm: the mm struct for the range of virtual address 783fac555acSChristoph Hellwig * 784d2e8d551SRalph Campbell * Return: 0 on success, -EFAULT if the address space is no longer valid 785a3e0d41cSJérôme Glisse * 786a3e0d41cSJérôme Glisse * Track updates to the CPU page table see include/linux/hmm.h 787a3e0d41cSJérôme Glisse */ 788fac555acSChristoph Hellwig int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror) 789a3e0d41cSJérôme Glisse { 790e36acfe6SJason Gunthorpe struct hmm *hmm = mirror->hmm; 7915a136b4aSJason Gunthorpe unsigned long flags; 79263d5066fSJérôme Glisse 793a3e0d41cSJérôme Glisse range->valid = false; 794a3e0d41cSJérôme Glisse range->hmm = NULL; 795a3e0d41cSJérôme Glisse 7967f08263dSChristoph Hellwig if ((range->start & (PAGE_SIZE - 1)) || (range->end & (PAGE_SIZE - 1))) 79763d5066fSJérôme Glisse return -EINVAL; 798fac555acSChristoph Hellwig if (range->start >= range->end) 799a3e0d41cSJérôme Glisse return -EINVAL; 800a3e0d41cSJérôme Glisse 80147f24598SJason Gunthorpe /* Prevent hmm_release() from running while the range is valid */ 802c7d8b782SJason Gunthorpe if (!mmget_not_zero(hmm->mmu_notifier.mm)) 803a3e0d41cSJérôme Glisse return -EFAULT; 804a3e0d41cSJérôme Glisse 805085ea250SRalph Campbell /* Initialize range to track CPU page table updates. */ 8065a136b4aSJason Gunthorpe spin_lock_irqsave(&hmm->ranges_lock, flags); 807a3e0d41cSJérôme Glisse 808085ea250SRalph Campbell range->hmm = hmm; 809157816f3SJason Gunthorpe list_add(&range->list, &hmm->ranges); 810a3e0d41cSJérôme Glisse 811a3e0d41cSJérôme Glisse /* 812a3e0d41cSJérôme Glisse * If there are any concurrent notifiers we have to wait for them for 813a3e0d41cSJérôme Glisse * the range to be valid (see hmm_range_wait_until_valid()). 814a3e0d41cSJérôme Glisse */ 815085ea250SRalph Campbell if (!hmm->notifiers) 816a3e0d41cSJérôme Glisse range->valid = true; 8175a136b4aSJason Gunthorpe spin_unlock_irqrestore(&hmm->ranges_lock, flags); 818a3e0d41cSJérôme Glisse 819a3e0d41cSJérôme Glisse return 0; 820a3e0d41cSJérôme Glisse } 821a3e0d41cSJérôme Glisse EXPORT_SYMBOL(hmm_range_register); 822a3e0d41cSJérôme Glisse 823a3e0d41cSJérôme Glisse /* 824a3e0d41cSJérôme Glisse * hmm_range_unregister() - stop tracking change to CPU page table over a range 825a3e0d41cSJérôme Glisse * @range: range 826a3e0d41cSJérôme Glisse * 827a3e0d41cSJérôme Glisse * Range struct is used to track updates to the CPU page table after a call to 828a3e0d41cSJérôme Glisse * hmm_range_register(). See include/linux/hmm.h for how to use it. 829a3e0d41cSJérôme Glisse */ 830a3e0d41cSJérôme Glisse void hmm_range_unregister(struct hmm_range *range) 831a3e0d41cSJérôme Glisse { 832085ea250SRalph Campbell struct hmm *hmm = range->hmm; 8335a136b4aSJason Gunthorpe unsigned long flags; 834a3e0d41cSJérôme Glisse 8355a136b4aSJason Gunthorpe spin_lock_irqsave(&hmm->ranges_lock, flags); 83647f24598SJason Gunthorpe list_del_init(&range->list); 8375a136b4aSJason Gunthorpe spin_unlock_irqrestore(&hmm->ranges_lock, flags); 838a3e0d41cSJérôme Glisse 839a3e0d41cSJérôme Glisse /* Drop reference taken by hmm_range_register() */ 840c7d8b782SJason Gunthorpe mmput(hmm->mmu_notifier.mm); 8412dcc3eb8SJason Gunthorpe 8422dcc3eb8SJason Gunthorpe /* 8432dcc3eb8SJason Gunthorpe * The range is now invalid and the ref on the hmm is dropped, so 8442dcc3eb8SJason Gunthorpe * poison the pointer. Leave other fields in place, for the caller's 8452dcc3eb8SJason Gunthorpe * use. 8462dcc3eb8SJason Gunthorpe */ 847a3e0d41cSJérôme Glisse range->valid = false; 8482dcc3eb8SJason Gunthorpe memset(&range->hmm, POISON_INUSE, sizeof(range->hmm)); 849a3e0d41cSJérôme Glisse } 850a3e0d41cSJérôme Glisse EXPORT_SYMBOL(hmm_range_unregister); 851a3e0d41cSJérôme Glisse 8529a4903e4SChristoph Hellwig /** 8539a4903e4SChristoph Hellwig * hmm_range_fault - try to fault some address in a virtual address range 85408232a45SJérôme Glisse * @range: range being faulted 8559a4903e4SChristoph Hellwig * @flags: HMM_FAULT_* flags 85673231612SJérôme Glisse * 8579a4903e4SChristoph Hellwig * Return: the number of valid pages in range->pfns[] (from range start 8589a4903e4SChristoph Hellwig * address), which may be zero. On error one of the following status codes 8599a4903e4SChristoph Hellwig * can be returned: 8609a4903e4SChristoph Hellwig * 8619a4903e4SChristoph Hellwig * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma 8629a4903e4SChristoph Hellwig * (e.g., device file vma). 86373231612SJérôme Glisse * -ENOMEM: Out of memory. 8649a4903e4SChristoph Hellwig * -EPERM: Invalid permission (e.g., asking for write and range is read 8659a4903e4SChristoph Hellwig * only). 8669a4903e4SChristoph Hellwig * -EAGAIN: A page fault needs to be retried and mmap_sem was dropped. 8679a4903e4SChristoph Hellwig * -EBUSY: The range has been invalidated and the caller needs to wait for 8689a4903e4SChristoph Hellwig * the invalidation to finish. 8699a4903e4SChristoph Hellwig * -EFAULT: Invalid (i.e., either no valid vma or it is illegal to access 8709a4903e4SChristoph Hellwig * that range) number of valid pages in range->pfns[] (from 87173231612SJérôme Glisse * range start address). 87274eee180SJérôme Glisse * 87374eee180SJérôme Glisse * This is similar to a regular CPU page fault except that it will not trigger 87473231612SJérôme Glisse * any memory migration if the memory being faulted is not accessible by CPUs 87573231612SJérôme Glisse * and caller does not ask for migration. 87674eee180SJérôme Glisse * 877ff05c0c6SJérôme Glisse * On error, for one virtual address in the range, the function will mark the 878ff05c0c6SJérôme Glisse * corresponding HMM pfn entry with an error flag. 87974eee180SJérôme Glisse */ 8809a4903e4SChristoph Hellwig long hmm_range_fault(struct hmm_range *range, unsigned int flags) 88174eee180SJérôme Glisse { 88263d5066fSJérôme Glisse const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; 883a3e0d41cSJérôme Glisse unsigned long start = range->start, end; 88474eee180SJérôme Glisse struct hmm_vma_walk hmm_vma_walk; 885a3e0d41cSJérôme Glisse struct hmm *hmm = range->hmm; 886a3e0d41cSJérôme Glisse struct vm_area_struct *vma; 88774eee180SJérôme Glisse struct mm_walk mm_walk; 88874eee180SJérôme Glisse int ret; 88974eee180SJérôme Glisse 890c7d8b782SJason Gunthorpe lockdep_assert_held(&hmm->mmu_notifier.mm->mmap_sem); 891a3e0d41cSJérôme Glisse 892a3e0d41cSJérôme Glisse do { 893a3e0d41cSJérôme Glisse /* If range is no longer valid force retry. */ 8942bcbeaefSChristoph Hellwig if (!range->valid) 8952bcbeaefSChristoph Hellwig return -EBUSY; 89674eee180SJérôme Glisse 897c7d8b782SJason Gunthorpe vma = find_vma(hmm->mmu_notifier.mm, start); 89863d5066fSJérôme Glisse if (vma == NULL || (vma->vm_flags & device_vma)) 899a3e0d41cSJérôme Glisse return -EFAULT; 900a3e0d41cSJérôme Glisse 90186586a41SJérôme Glisse if (!(vma->vm_flags & VM_READ)) { 90286586a41SJérôme Glisse /* 903a3e0d41cSJérôme Glisse * If vma do not allow read access, then assume that it 904a3e0d41cSJérôme Glisse * does not allow write access, either. HMM does not 905a3e0d41cSJérôme Glisse * support architecture that allow write without read. 90686586a41SJérôme Glisse */ 907a3e0d41cSJérôme Glisse hmm_pfns_clear(range, range->pfns, 908a3e0d41cSJérôme Glisse range->start, range->end); 90986586a41SJérôme Glisse return -EPERM; 91086586a41SJérôme Glisse } 91174eee180SJérôme Glisse 912992de9a8SJérôme Glisse hmm_vma_walk.pgmap = NULL; 913a3e0d41cSJérôme Glisse hmm_vma_walk.last = start; 9149a4903e4SChristoph Hellwig hmm_vma_walk.flags = flags; 91574eee180SJérôme Glisse hmm_vma_walk.range = range; 91674eee180SJérôme Glisse mm_walk.private = &hmm_vma_walk; 917a3e0d41cSJérôme Glisse end = min(range->end, vma->vm_end); 91874eee180SJérôme Glisse 91974eee180SJérôme Glisse mm_walk.vma = vma; 92074eee180SJérôme Glisse mm_walk.mm = vma->vm_mm; 92174eee180SJérôme Glisse mm_walk.pte_entry = NULL; 92274eee180SJérôme Glisse mm_walk.test_walk = NULL; 92374eee180SJérôme Glisse mm_walk.hugetlb_entry = NULL; 924992de9a8SJérôme Glisse mm_walk.pud_entry = hmm_vma_walk_pud; 92574eee180SJérôme Glisse mm_walk.pmd_entry = hmm_vma_walk_pmd; 92674eee180SJérôme Glisse mm_walk.pte_hole = hmm_vma_walk_hole; 92763d5066fSJérôme Glisse mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry; 92874eee180SJérôme Glisse 92974eee180SJérôme Glisse do { 930a3e0d41cSJérôme Glisse ret = walk_page_range(start, end, &mm_walk); 93174eee180SJérôme Glisse start = hmm_vma_walk.last; 932a3e0d41cSJérôme Glisse 93373231612SJérôme Glisse /* Keep trying while the range is valid. */ 93473231612SJérôme Glisse } while (ret == -EBUSY && range->valid); 93574eee180SJérôme Glisse 93674eee180SJérôme Glisse if (ret) { 93774eee180SJérôme Glisse unsigned long i; 93874eee180SJérôme Glisse 93974eee180SJérôme Glisse i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 940a3e0d41cSJérôme Glisse hmm_pfns_clear(range, &range->pfns[i], 941a3e0d41cSJérôme Glisse hmm_vma_walk.last, range->end); 94273231612SJérôme Glisse return ret; 94374eee180SJérôme Glisse } 944a3e0d41cSJérôme Glisse start = end; 945a3e0d41cSJérôme Glisse 946a3e0d41cSJérôme Glisse } while (start < range->end); 947704f3f2cSJérôme Glisse 94873231612SJérôme Glisse return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 94974eee180SJérôme Glisse } 95073231612SJérôme Glisse EXPORT_SYMBOL(hmm_range_fault); 95155c0ece8SJérôme Glisse 95255c0ece8SJérôme Glisse /** 9539a4903e4SChristoph Hellwig * hmm_range_dma_map - hmm_range_fault() and dma map page all in one. 95455c0ece8SJérôme Glisse * @range: range being faulted 9559a4903e4SChristoph Hellwig * @device: device to map page to 9569a4903e4SChristoph Hellwig * @daddrs: array of dma addresses for the mapped pages 9579a4903e4SChristoph Hellwig * @flags: HMM_FAULT_* 95855c0ece8SJérôme Glisse * 9599a4903e4SChristoph Hellwig * Return: the number of pages mapped on success (including zero), or any 9609a4903e4SChristoph Hellwig * status return from hmm_range_fault() otherwise. 96155c0ece8SJérôme Glisse */ 9629a4903e4SChristoph Hellwig long hmm_range_dma_map(struct hmm_range *range, struct device *device, 9639a4903e4SChristoph Hellwig dma_addr_t *daddrs, unsigned int flags) 96455c0ece8SJérôme Glisse { 96555c0ece8SJérôme Glisse unsigned long i, npages, mapped; 96655c0ece8SJérôme Glisse long ret; 96755c0ece8SJérôme Glisse 9689a4903e4SChristoph Hellwig ret = hmm_range_fault(range, flags); 96955c0ece8SJérôme Glisse if (ret <= 0) 97055c0ece8SJérôme Glisse return ret ? ret : -EBUSY; 97155c0ece8SJérôme Glisse 97255c0ece8SJérôme Glisse npages = (range->end - range->start) >> PAGE_SHIFT; 97355c0ece8SJérôme Glisse for (i = 0, mapped = 0; i < npages; ++i) { 97455c0ece8SJérôme Glisse enum dma_data_direction dir = DMA_TO_DEVICE; 97555c0ece8SJérôme Glisse struct page *page; 97655c0ece8SJérôme Glisse 97755c0ece8SJérôme Glisse /* 97855c0ece8SJérôme Glisse * FIXME need to update DMA API to provide invalid DMA address 97955c0ece8SJérôme Glisse * value instead of a function to test dma address value. This 98055c0ece8SJérôme Glisse * would remove lot of dumb code duplicated accross many arch. 98155c0ece8SJérôme Glisse * 98255c0ece8SJérôme Glisse * For now setting it to 0 here is good enough as the pfns[] 98355c0ece8SJérôme Glisse * value is what is use to check what is valid and what isn't. 98455c0ece8SJérôme Glisse */ 98555c0ece8SJérôme Glisse daddrs[i] = 0; 98655c0ece8SJérôme Glisse 987391aab11SJérôme Glisse page = hmm_device_entry_to_page(range, range->pfns[i]); 98855c0ece8SJérôme Glisse if (page == NULL) 98955c0ece8SJérôme Glisse continue; 99055c0ece8SJérôme Glisse 99155c0ece8SJérôme Glisse /* Check if range is being invalidated */ 99255c0ece8SJérôme Glisse if (!range->valid) { 99355c0ece8SJérôme Glisse ret = -EBUSY; 99455c0ece8SJérôme Glisse goto unmap; 99555c0ece8SJérôme Glisse } 99655c0ece8SJérôme Glisse 99755c0ece8SJérôme Glisse /* If it is read and write than map bi-directional. */ 99855c0ece8SJérôme Glisse if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) 99955c0ece8SJérôme Glisse dir = DMA_BIDIRECTIONAL; 100055c0ece8SJérôme Glisse 100155c0ece8SJérôme Glisse daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir); 100255c0ece8SJérôme Glisse if (dma_mapping_error(device, daddrs[i])) { 100355c0ece8SJérôme Glisse ret = -EFAULT; 100455c0ece8SJérôme Glisse goto unmap; 100555c0ece8SJérôme Glisse } 100655c0ece8SJérôme Glisse 100755c0ece8SJérôme Glisse mapped++; 100855c0ece8SJérôme Glisse } 100955c0ece8SJérôme Glisse 101055c0ece8SJérôme Glisse return mapped; 101155c0ece8SJérôme Glisse 101255c0ece8SJérôme Glisse unmap: 101355c0ece8SJérôme Glisse for (npages = i, i = 0; (i < npages) && mapped; ++i) { 101455c0ece8SJérôme Glisse enum dma_data_direction dir = DMA_TO_DEVICE; 101555c0ece8SJérôme Glisse struct page *page; 101655c0ece8SJérôme Glisse 1017391aab11SJérôme Glisse page = hmm_device_entry_to_page(range, range->pfns[i]); 101855c0ece8SJérôme Glisse if (page == NULL) 101955c0ece8SJérôme Glisse continue; 102055c0ece8SJérôme Glisse 102155c0ece8SJérôme Glisse if (dma_mapping_error(device, daddrs[i])) 102255c0ece8SJérôme Glisse continue; 102355c0ece8SJérôme Glisse 102455c0ece8SJérôme Glisse /* If it is read and write than map bi-directional. */ 102555c0ece8SJérôme Glisse if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) 102655c0ece8SJérôme Glisse dir = DMA_BIDIRECTIONAL; 102755c0ece8SJérôme Glisse 102855c0ece8SJérôme Glisse dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir); 102955c0ece8SJérôme Glisse mapped--; 103055c0ece8SJérôme Glisse } 103155c0ece8SJérôme Glisse 103255c0ece8SJérôme Glisse return ret; 103355c0ece8SJérôme Glisse } 103455c0ece8SJérôme Glisse EXPORT_SYMBOL(hmm_range_dma_map); 103555c0ece8SJérôme Glisse 103655c0ece8SJérôme Glisse /** 103755c0ece8SJérôme Glisse * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map() 103855c0ece8SJérôme Glisse * @range: range being unmapped 103955c0ece8SJérôme Glisse * @device: device against which dma map was done 104055c0ece8SJérôme Glisse * @daddrs: dma address of mapped pages 104155c0ece8SJérôme Glisse * @dirty: dirty page if it had the write flag set 1042085ea250SRalph Campbell * Return: number of page unmapped on success, -EINVAL otherwise 104355c0ece8SJérôme Glisse * 104455c0ece8SJérôme Glisse * Note that caller MUST abide by mmu notifier or use HMM mirror and abide 104555c0ece8SJérôme Glisse * to the sync_cpu_device_pagetables() callback so that it is safe here to 104655c0ece8SJérôme Glisse * call set_page_dirty(). Caller must also take appropriate locks to avoid 104755c0ece8SJérôme Glisse * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress. 104855c0ece8SJérôme Glisse */ 104955c0ece8SJérôme Glisse long hmm_range_dma_unmap(struct hmm_range *range, 105055c0ece8SJérôme Glisse struct device *device, 105155c0ece8SJérôme Glisse dma_addr_t *daddrs, 105255c0ece8SJérôme Glisse bool dirty) 105355c0ece8SJérôme Glisse { 105455c0ece8SJérôme Glisse unsigned long i, npages; 105555c0ece8SJérôme Glisse long cpages = 0; 105655c0ece8SJérôme Glisse 105755c0ece8SJérôme Glisse /* Sanity check. */ 105855c0ece8SJérôme Glisse if (range->end <= range->start) 105955c0ece8SJérôme Glisse return -EINVAL; 106055c0ece8SJérôme Glisse if (!daddrs) 106155c0ece8SJérôme Glisse return -EINVAL; 106255c0ece8SJérôme Glisse if (!range->pfns) 106355c0ece8SJérôme Glisse return -EINVAL; 106455c0ece8SJérôme Glisse 106555c0ece8SJérôme Glisse npages = (range->end - range->start) >> PAGE_SHIFT; 106655c0ece8SJérôme Glisse for (i = 0; i < npages; ++i) { 106755c0ece8SJérôme Glisse enum dma_data_direction dir = DMA_TO_DEVICE; 106855c0ece8SJérôme Glisse struct page *page; 106955c0ece8SJérôme Glisse 1070391aab11SJérôme Glisse page = hmm_device_entry_to_page(range, range->pfns[i]); 107155c0ece8SJérôme Glisse if (page == NULL) 107255c0ece8SJérôme Glisse continue; 107355c0ece8SJérôme Glisse 107455c0ece8SJérôme Glisse /* If it is read and write than map bi-directional. */ 107555c0ece8SJérôme Glisse if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) { 107655c0ece8SJérôme Glisse dir = DMA_BIDIRECTIONAL; 107755c0ece8SJérôme Glisse 107855c0ece8SJérôme Glisse /* 107955c0ece8SJérôme Glisse * See comments in function description on why it is 108055c0ece8SJérôme Glisse * safe here to call set_page_dirty() 108155c0ece8SJérôme Glisse */ 108255c0ece8SJérôme Glisse if (dirty) 108355c0ece8SJérôme Glisse set_page_dirty(page); 108455c0ece8SJérôme Glisse } 108555c0ece8SJérôme Glisse 108655c0ece8SJérôme Glisse /* Unmap and clear pfns/dma address */ 108755c0ece8SJérôme Glisse dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir); 108855c0ece8SJérôme Glisse range->pfns[i] = range->values[HMM_PFN_NONE]; 108955c0ece8SJérôme Glisse /* FIXME see comments in hmm_vma_dma_map() */ 109055c0ece8SJérôme Glisse daddrs[i] = 0; 109155c0ece8SJérôme Glisse cpages++; 109255c0ece8SJérôme Glisse } 109355c0ece8SJérôme Glisse 109455c0ece8SJérôme Glisse return cpages; 109555c0ece8SJérôme Glisse } 109655c0ece8SJérôme Glisse EXPORT_SYMBOL(hmm_range_dma_unmap); 1097