1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2133ff0eaSJérôme Glisse /* 3133ff0eaSJérôme Glisse * Copyright 2013 Red Hat Inc. 4133ff0eaSJérôme Glisse * 5f813f219SJérôme Glisse * Authors: Jérôme Glisse <jglisse@redhat.com> 6133ff0eaSJérôme Glisse */ 7133ff0eaSJérôme Glisse /* 8133ff0eaSJérôme Glisse * Refer to include/linux/hmm.h for information about heterogeneous memory 9133ff0eaSJérôme Glisse * management or HMM for short. 10133ff0eaSJérôme Glisse */ 11133ff0eaSJérôme Glisse #include <linux/mm.h> 12133ff0eaSJérôme Glisse #include <linux/hmm.h> 13858b54daSJérôme Glisse #include <linux/init.h> 14da4c3c73SJérôme Glisse #include <linux/rmap.h> 15da4c3c73SJérôme Glisse #include <linux/swap.h> 16133ff0eaSJérôme Glisse #include <linux/slab.h> 17133ff0eaSJérôme Glisse #include <linux/sched.h> 184ef589dcSJérôme Glisse #include <linux/mmzone.h> 194ef589dcSJérôme Glisse #include <linux/pagemap.h> 20da4c3c73SJérôme Glisse #include <linux/swapops.h> 21da4c3c73SJérôme Glisse #include <linux/hugetlb.h> 224ef589dcSJérôme Glisse #include <linux/memremap.h> 23c8a53b2dSJason Gunthorpe #include <linux/sched/mm.h> 247b2d55d2SJérôme Glisse #include <linux/jump_label.h> 2555c0ece8SJérôme Glisse #include <linux/dma-mapping.h> 26c0b12405SJérôme Glisse #include <linux/mmu_notifier.h> 274ef589dcSJérôme Glisse #include <linux/memory_hotplug.h> 284ef589dcSJérôme Glisse 29c0b12405SJérôme Glisse static const struct mmu_notifier_ops hmm_mmu_notifier_ops; 30c0b12405SJérôme Glisse 31704f3f2cSJérôme Glisse /** 32704f3f2cSJérôme Glisse * hmm_get_or_create - register HMM against an mm (HMM internal) 33704f3f2cSJérôme Glisse * 34704f3f2cSJérôme Glisse * @mm: mm struct to attach to 35d2e8d551SRalph Campbell * Return: an HMM object, either by referencing the existing 36704f3f2cSJérôme Glisse * (per-process) object, or by creating a new one. 37704f3f2cSJérôme Glisse * 38704f3f2cSJérôme Glisse * This is not intended to be used directly by device drivers. If mm already 39704f3f2cSJérôme Glisse * has an HMM struct then it get a reference on it and returns it. Otherwise 40704f3f2cSJérôme Glisse * it allocates an HMM struct, initializes it, associate it with the mm and 41704f3f2cSJérôme Glisse * returns it. 42704f3f2cSJérôme Glisse */ 43704f3f2cSJérôme Glisse static struct hmm *hmm_get_or_create(struct mm_struct *mm) 44704f3f2cSJérôme Glisse { 458a9320b7SJason Gunthorpe struct hmm *hmm; 46133ff0eaSJérôme Glisse 47fec88ab0SLinus Torvalds lockdep_assert_held_write(&mm->mmap_sem); 488a9320b7SJason Gunthorpe 498a9320b7SJason Gunthorpe /* Abuse the page_table_lock to also protect mm->hmm. */ 508a9320b7SJason Gunthorpe spin_lock(&mm->page_table_lock); 518a9320b7SJason Gunthorpe hmm = mm->hmm; 528a9320b7SJason Gunthorpe if (mm->hmm && kref_get_unless_zero(&mm->hmm->kref)) 538a9320b7SJason Gunthorpe goto out_unlock; 548a9320b7SJason Gunthorpe spin_unlock(&mm->page_table_lock); 55c0b12405SJérôme Glisse 56c0b12405SJérôme Glisse hmm = kmalloc(sizeof(*hmm), GFP_KERNEL); 57c0b12405SJérôme Glisse if (!hmm) 58c0b12405SJérôme Glisse return NULL; 59a3e0d41cSJérôme Glisse init_waitqueue_head(&hmm->wq); 60c0b12405SJérôme Glisse INIT_LIST_HEAD(&hmm->mirrors); 61c0b12405SJérôme Glisse init_rwsem(&hmm->mirrors_sem); 62c0b12405SJérôme Glisse hmm->mmu_notifier.ops = NULL; 63da4c3c73SJérôme Glisse INIT_LIST_HEAD(&hmm->ranges); 645a136b4aSJason Gunthorpe spin_lock_init(&hmm->ranges_lock); 65704f3f2cSJérôme Glisse kref_init(&hmm->kref); 66a3e0d41cSJérôme Glisse hmm->notifiers = 0; 67c0b12405SJérôme Glisse hmm->mm = mm; 68c0b12405SJérôme Glisse 6986a2d598SRalph Campbell hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops; 708a9320b7SJason Gunthorpe if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) { 7186a2d598SRalph Campbell kfree(hmm); 7286a2d598SRalph Campbell return NULL; 73133ff0eaSJérôme Glisse } 74133ff0eaSJérôme Glisse 758a9320b7SJason Gunthorpe mmgrab(hmm->mm); 768a9320b7SJason Gunthorpe 778a9320b7SJason Gunthorpe /* 788a9320b7SJason Gunthorpe * We hold the exclusive mmap_sem here so we know that mm->hmm is 798a9320b7SJason Gunthorpe * still NULL or 0 kref, and is safe to update. 808a9320b7SJason Gunthorpe */ 818a9320b7SJason Gunthorpe spin_lock(&mm->page_table_lock); 828a9320b7SJason Gunthorpe mm->hmm = hmm; 838a9320b7SJason Gunthorpe 848a9320b7SJason Gunthorpe out_unlock: 858a9320b7SJason Gunthorpe spin_unlock(&mm->page_table_lock); 868a9320b7SJason Gunthorpe return hmm; 878a9320b7SJason Gunthorpe } 888a9320b7SJason Gunthorpe 896d7c3cdeSJason Gunthorpe static void hmm_free_rcu(struct rcu_head *rcu) 906d7c3cdeSJason Gunthorpe { 918a9320b7SJason Gunthorpe struct hmm *hmm = container_of(rcu, struct hmm, rcu); 928a9320b7SJason Gunthorpe 938a9320b7SJason Gunthorpe mmdrop(hmm->mm); 948a9320b7SJason Gunthorpe kfree(hmm); 956d7c3cdeSJason Gunthorpe } 966d7c3cdeSJason Gunthorpe 97704f3f2cSJérôme Glisse static void hmm_free(struct kref *kref) 98704f3f2cSJérôme Glisse { 99704f3f2cSJérôme Glisse struct hmm *hmm = container_of(kref, struct hmm, kref); 100704f3f2cSJérôme Glisse 1018a9320b7SJason Gunthorpe spin_lock(&hmm->mm->page_table_lock); 1028a9320b7SJason Gunthorpe if (hmm->mm->hmm == hmm) 1038a9320b7SJason Gunthorpe hmm->mm->hmm = NULL; 1048a9320b7SJason Gunthorpe spin_unlock(&hmm->mm->page_table_lock); 105704f3f2cSJérôme Glisse 1068a9320b7SJason Gunthorpe mmu_notifier_unregister_no_release(&hmm->mmu_notifier, hmm->mm); 1076d7c3cdeSJason Gunthorpe mmu_notifier_call_srcu(&hmm->rcu, hmm_free_rcu); 108704f3f2cSJérôme Glisse } 109704f3f2cSJérôme Glisse 110704f3f2cSJérôme Glisse static inline void hmm_put(struct hmm *hmm) 111704f3f2cSJérôme Glisse { 112704f3f2cSJérôme Glisse kref_put(&hmm->kref, hmm_free); 113704f3f2cSJérôme Glisse } 114704f3f2cSJérôme Glisse 115a3e0d41cSJérôme Glisse static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) 116c0b12405SJérôme Glisse { 1176d7c3cdeSJason Gunthorpe struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 118c0b12405SJérôme Glisse struct hmm_mirror *mirror; 119da4c3c73SJérôme Glisse 1206d7c3cdeSJason Gunthorpe /* Bail out if hmm is in the process of being freed */ 1216d7c3cdeSJason Gunthorpe if (!kref_get_unless_zero(&hmm->kref)) 1226d7c3cdeSJason Gunthorpe return; 123da4c3c73SJérôme Glisse 124e1401513SRalph Campbell /* 12547f24598SJason Gunthorpe * Since hmm_range_register() holds the mmget() lock hmm_release() is 12647f24598SJason Gunthorpe * prevented as long as a range exists. 127e1401513SRalph Campbell */ 12847f24598SJason Gunthorpe WARN_ON(!list_empty_careful(&hmm->ranges)); 129704f3f2cSJérôme Glisse 130a3e0d41cSJérôme Glisse down_read(&hmm->mirrors_sem); 131a3e0d41cSJérôme Glisse list_for_each_entry(mirror, &hmm->mirrors, list) { 132e1401513SRalph Campbell /* 13314331726SJason Gunthorpe * Note: The driver is not allowed to trigger 13414331726SJason Gunthorpe * hmm_mirror_unregister() from this thread. 135e1401513SRalph Campbell */ 13614331726SJason Gunthorpe if (mirror->ops->release) 137e1401513SRalph Campbell mirror->ops->release(mirror); 138a3e0d41cSJérôme Glisse } 139a3e0d41cSJérôme Glisse up_read(&hmm->mirrors_sem); 140a3e0d41cSJérôme Glisse 141704f3f2cSJérôme Glisse hmm_put(hmm); 142c0b12405SJérôme Glisse } 143c0b12405SJérôme Glisse 1445a136b4aSJason Gunthorpe static void notifiers_decrement(struct hmm *hmm) 145c0b12405SJérôme Glisse { 1465a136b4aSJason Gunthorpe unsigned long flags; 147c0b12405SJérôme Glisse 1485a136b4aSJason Gunthorpe spin_lock_irqsave(&hmm->ranges_lock, flags); 149a3e0d41cSJérôme Glisse hmm->notifiers--; 150a3e0d41cSJérôme Glisse if (!hmm->notifiers) { 151a3e0d41cSJérôme Glisse struct hmm_range *range; 152a3e0d41cSJérôme Glisse 153a3e0d41cSJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 154a3e0d41cSJérôme Glisse if (range->valid) 155a3e0d41cSJérôme Glisse continue; 156a3e0d41cSJérôme Glisse range->valid = true; 157a3e0d41cSJérôme Glisse } 158a3e0d41cSJérôme Glisse wake_up_all(&hmm->wq); 159a3e0d41cSJérôme Glisse } 1605a136b4aSJason Gunthorpe spin_unlock_irqrestore(&hmm->ranges_lock, flags); 1615a136b4aSJason Gunthorpe } 162a3e0d41cSJérôme Glisse 163133ff0eaSJérôme Glisse static int hmm_invalidate_range_start(struct mmu_notifier *mn, 164133ff0eaSJérôme Glisse const struct mmu_notifier_range *nrange) 165133ff0eaSJérôme Glisse { 1666d7c3cdeSJason Gunthorpe struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 167133ff0eaSJérôme Glisse struct hmm_mirror *mirror; 168133ff0eaSJérôme Glisse struct hmm_range *range; 1695a136b4aSJason Gunthorpe unsigned long flags; 170133ff0eaSJérôme Glisse int ret = 0; 171133ff0eaSJérôme Glisse 1726d7c3cdeSJason Gunthorpe if (!kref_get_unless_zero(&hmm->kref)) 1736d7c3cdeSJason Gunthorpe return 0; 174133ff0eaSJérôme Glisse 1755a136b4aSJason Gunthorpe spin_lock_irqsave(&hmm->ranges_lock, flags); 176133ff0eaSJérôme Glisse hmm->notifiers++; 177133ff0eaSJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 1781f961807SRalph Campbell if (nrange->end < range->start || nrange->start >= range->end) 179133ff0eaSJérôme Glisse continue; 180133ff0eaSJérôme Glisse 181133ff0eaSJérôme Glisse range->valid = false; 182133ff0eaSJérôme Glisse } 1835a136b4aSJason Gunthorpe spin_unlock_irqrestore(&hmm->ranges_lock, flags); 184c0b12405SJérôme Glisse 185c0b12405SJérôme Glisse if (mmu_notifier_range_blockable(nrange)) 186c0b12405SJérôme Glisse down_read(&hmm->mirrors_sem); 187c0b12405SJérôme Glisse else if (!down_read_trylock(&hmm->mirrors_sem)) { 188c0b12405SJérôme Glisse ret = -EAGAIN; 189c0b12405SJérôme Glisse goto out; 190c0b12405SJérôme Glisse } 191c0b12405SJérôme Glisse 1925a136b4aSJason Gunthorpe list_for_each_entry(mirror, &hmm->mirrors, list) { 1935a136b4aSJason Gunthorpe int rc; 1945a136b4aSJason Gunthorpe 1951f961807SRalph Campbell rc = mirror->ops->sync_cpu_device_pagetables(mirror, nrange); 1965a136b4aSJason Gunthorpe if (rc) { 1971f961807SRalph Campbell if (WARN_ON(mmu_notifier_range_blockable(nrange) || 1981f961807SRalph Campbell rc != -EAGAIN)) 1995a136b4aSJason Gunthorpe continue; 2005a136b4aSJason Gunthorpe ret = -EAGAIN; 201085ea250SRalph Campbell break; 202c0b12405SJérôme Glisse } 2035a136b4aSJason Gunthorpe } 204c0b12405SJérôme Glisse up_read(&hmm->mirrors_sem); 205c0b12405SJérôme Glisse 206c0b12405SJérôme Glisse out: 2075a136b4aSJason Gunthorpe if (ret) 2085a136b4aSJason Gunthorpe notifiers_decrement(hmm); 209c0b12405SJérôme Glisse hmm_put(hmm); 210c0b12405SJérôme Glisse return ret; 211c0b12405SJérôme Glisse } 212c0b12405SJérôme Glisse 213c0b12405SJérôme Glisse static void hmm_invalidate_range_end(struct mmu_notifier *mn, 214c0b12405SJérôme Glisse const struct mmu_notifier_range *nrange) 215c0b12405SJérôme Glisse { 2166d7c3cdeSJason Gunthorpe struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 217c0b12405SJérôme Glisse 2186d7c3cdeSJason Gunthorpe if (!kref_get_unless_zero(&hmm->kref)) 2196d7c3cdeSJason Gunthorpe return; 220c0b12405SJérôme Glisse 2215a136b4aSJason Gunthorpe notifiers_decrement(hmm); 222704f3f2cSJérôme Glisse hmm_put(hmm); 223c0b12405SJérôme Glisse } 224c0b12405SJérôme Glisse 225c0b12405SJérôme Glisse static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { 226e1401513SRalph Campbell .release = hmm_release, 227c0b12405SJérôme Glisse .invalidate_range_start = hmm_invalidate_range_start, 228c0b12405SJérôme Glisse .invalidate_range_end = hmm_invalidate_range_end, 229c0b12405SJérôme Glisse }; 230c0b12405SJérôme Glisse 231c0b12405SJérôme Glisse /* 232c0b12405SJérôme Glisse * hmm_mirror_register() - register a mirror against an mm 233c0b12405SJérôme Glisse * 234c0b12405SJérôme Glisse * @mirror: new mirror struct to register 235c0b12405SJérôme Glisse * @mm: mm to register against 236085ea250SRalph Campbell * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments 237c0b12405SJérôme Glisse * 238c0b12405SJérôme Glisse * To start mirroring a process address space, the device driver must register 239c0b12405SJérôme Glisse * an HMM mirror struct. 240c0b12405SJérôme Glisse */ 241c0b12405SJérôme Glisse int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) 242c0b12405SJérôme Glisse { 243fec88ab0SLinus Torvalds lockdep_assert_held_write(&mm->mmap_sem); 2448a1a0cd0SJason Gunthorpe 245c0b12405SJérôme Glisse /* Sanity check */ 246c0b12405SJérôme Glisse if (!mm || !mirror || !mirror->ops) 247c0b12405SJérôme Glisse return -EINVAL; 248c0b12405SJérôme Glisse 249704f3f2cSJérôme Glisse mirror->hmm = hmm_get_or_create(mm); 250c0b12405SJérôme Glisse if (!mirror->hmm) 251c0b12405SJérôme Glisse return -ENOMEM; 252c0b12405SJérôme Glisse 253c0b12405SJérôme Glisse down_write(&mirror->hmm->mirrors_sem); 254c0b12405SJérôme Glisse list_add(&mirror->list, &mirror->hmm->mirrors); 255c0b12405SJérôme Glisse up_write(&mirror->hmm->mirrors_sem); 256c0b12405SJérôme Glisse 257c0b12405SJérôme Glisse return 0; 258c0b12405SJérôme Glisse } 259c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_register); 260c0b12405SJérôme Glisse 261c0b12405SJérôme Glisse /* 262c0b12405SJérôme Glisse * hmm_mirror_unregister() - unregister a mirror 263c0b12405SJérôme Glisse * 264085ea250SRalph Campbell * @mirror: mirror struct to unregister 265c0b12405SJérôme Glisse * 266c0b12405SJérôme Glisse * Stop mirroring a process address space, and cleanup. 267c0b12405SJérôme Glisse */ 268c0b12405SJérôme Glisse void hmm_mirror_unregister(struct hmm_mirror *mirror) 269c0b12405SJérôme Glisse { 270187229c2SJason Gunthorpe struct hmm *hmm = mirror->hmm; 271c01cbba2SJérôme Glisse 272c0b12405SJérôme Glisse down_write(&hmm->mirrors_sem); 27314331726SJason Gunthorpe list_del(&mirror->list); 274c0b12405SJérôme Glisse up_write(&hmm->mirrors_sem); 275704f3f2cSJérôme Glisse hmm_put(hmm); 276c0b12405SJérôme Glisse } 277c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_unregister); 278da4c3c73SJérôme Glisse 27974eee180SJérôme Glisse struct hmm_vma_walk { 28074eee180SJérôme Glisse struct hmm_range *range; 281992de9a8SJérôme Glisse struct dev_pagemap *pgmap; 28274eee180SJérôme Glisse unsigned long last; 2839a4903e4SChristoph Hellwig unsigned int flags; 28474eee180SJérôme Glisse }; 28574eee180SJérôme Glisse 2862aee09d8SJérôme Glisse static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, 2872aee09d8SJérôme Glisse bool write_fault, uint64_t *pfn) 28874eee180SJérôme Glisse { 2899b1ae605SKuehling, Felix unsigned int flags = FAULT_FLAG_REMOTE; 29074eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 291f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 29274eee180SJérôme Glisse struct vm_area_struct *vma = walk->vma; 29350a7ca3cSSouptick Joarder vm_fault_t ret; 29474eee180SJérôme Glisse 2959a4903e4SChristoph Hellwig if (hmm_vma_walk->flags & HMM_FAULT_ALLOW_RETRY) 2969a4903e4SChristoph Hellwig flags |= FAULT_FLAG_ALLOW_RETRY; 2979a4903e4SChristoph Hellwig if (write_fault) 2989a4903e4SChristoph Hellwig flags |= FAULT_FLAG_WRITE; 2999a4903e4SChristoph Hellwig 30050a7ca3cSSouptick Joarder ret = handle_mm_fault(vma, addr, flags); 301e709acccSJason Gunthorpe if (ret & VM_FAULT_RETRY) { 302e709acccSJason Gunthorpe /* Note, handle_mm_fault did up_read(&mm->mmap_sem)) */ 30373231612SJérôme Glisse return -EAGAIN; 304e709acccSJason Gunthorpe } 30550a7ca3cSSouptick Joarder if (ret & VM_FAULT_ERROR) { 306f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_ERROR]; 30774eee180SJérôme Glisse return -EFAULT; 30874eee180SJérôme Glisse } 30974eee180SJérôme Glisse 31073231612SJérôme Glisse return -EBUSY; 31174eee180SJérôme Glisse } 31274eee180SJérôme Glisse 313da4c3c73SJérôme Glisse static int hmm_pfns_bad(unsigned long addr, 314da4c3c73SJérôme Glisse unsigned long end, 315da4c3c73SJérôme Glisse struct mm_walk *walk) 316da4c3c73SJérôme Glisse { 317c719547fSJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 318c719547fSJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 319ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 320da4c3c73SJérôme Glisse unsigned long i; 321da4c3c73SJérôme Glisse 322da4c3c73SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 323da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, i++) 324f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_ERROR]; 325da4c3c73SJérôme Glisse 326da4c3c73SJérôme Glisse return 0; 327da4c3c73SJérôme Glisse } 328da4c3c73SJérôme Glisse 3295504ed29SJérôme Glisse /* 330d2e8d551SRalph Campbell * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s) 331d2e8d551SRalph Campbell * @addr: range virtual start address (inclusive) 3325504ed29SJérôme Glisse * @end: range virtual end address (exclusive) 3332aee09d8SJérôme Glisse * @fault: should we fault or not ? 3342aee09d8SJérôme Glisse * @write_fault: write fault ? 3355504ed29SJérôme Glisse * @walk: mm_walk structure 336085ea250SRalph Campbell * Return: 0 on success, -EBUSY after page fault, or page fault error 3375504ed29SJérôme Glisse * 3385504ed29SJérôme Glisse * This function will be called whenever pmd_none() or pte_none() returns true, 3395504ed29SJérôme Glisse * or whenever there is no page directory covering the virtual address range. 3405504ed29SJérôme Glisse */ 3412aee09d8SJérôme Glisse static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, 3422aee09d8SJérôme Glisse bool fault, bool write_fault, 343da4c3c73SJérôme Glisse struct mm_walk *walk) 344da4c3c73SJérôme Glisse { 34574eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 34674eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 347ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 34863d5066fSJérôme Glisse unsigned long i, page_size; 349da4c3c73SJérôme Glisse 35074eee180SJérôme Glisse hmm_vma_walk->last = addr; 35163d5066fSJérôme Glisse page_size = hmm_range_page_size(range); 35263d5066fSJérôme Glisse i = (addr - range->start) >> range->page_shift; 35363d5066fSJérôme Glisse 35463d5066fSJérôme Glisse for (; addr < end; addr += page_size, i++) { 355f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_NONE]; 3562aee09d8SJérôme Glisse if (fault || write_fault) { 35774eee180SJérôme Glisse int ret; 358da4c3c73SJérôme Glisse 3592aee09d8SJérôme Glisse ret = hmm_vma_do_fault(walk, addr, write_fault, 3602aee09d8SJérôme Glisse &pfns[i]); 36173231612SJérôme Glisse if (ret != -EBUSY) 36274eee180SJérôme Glisse return ret; 36374eee180SJérôme Glisse } 36474eee180SJérôme Glisse } 36574eee180SJérôme Glisse 36673231612SJérôme Glisse return (fault || write_fault) ? -EBUSY : 0; 3672aee09d8SJérôme Glisse } 3682aee09d8SJérôme Glisse 3692aee09d8SJérôme Glisse static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 3702aee09d8SJérôme Glisse uint64_t pfns, uint64_t cpu_flags, 3712aee09d8SJérôme Glisse bool *fault, bool *write_fault) 3722aee09d8SJérôme Glisse { 373f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 374f88a1e90SJérôme Glisse 375*d45d464bSChristoph Hellwig if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) 3762aee09d8SJérôme Glisse return; 3772aee09d8SJérôme Glisse 378023a019aSJérôme Glisse /* 379023a019aSJérôme Glisse * So we not only consider the individual per page request we also 380023a019aSJérôme Glisse * consider the default flags requested for the range. The API can 381d2e8d551SRalph Campbell * be used 2 ways. The first one where the HMM user coalesces 382d2e8d551SRalph Campbell * multiple page faults into one request and sets flags per pfn for 383d2e8d551SRalph Campbell * those faults. The second one where the HMM user wants to pre- 384023a019aSJérôme Glisse * fault a range with specific flags. For the latter one it is a 385023a019aSJérôme Glisse * waste to have the user pre-fill the pfn arrays with a default 386023a019aSJérôme Glisse * flags value. 387023a019aSJérôme Glisse */ 388023a019aSJérôme Glisse pfns = (pfns & range->pfn_flags_mask) | range->default_flags; 389023a019aSJérôme Glisse 3902aee09d8SJérôme Glisse /* We aren't ask to do anything ... */ 391f88a1e90SJérôme Glisse if (!(pfns & range->flags[HMM_PFN_VALID])) 3922aee09d8SJérôme Glisse return; 393d2e8d551SRalph Campbell /* If this is device memory then only fault if explicitly requested */ 394f88a1e90SJérôme Glisse if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { 395f88a1e90SJérôme Glisse /* Do we fault on device memory ? */ 396f88a1e90SJérôme Glisse if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { 397f88a1e90SJérôme Glisse *write_fault = pfns & range->flags[HMM_PFN_WRITE]; 398f88a1e90SJérôme Glisse *fault = true; 399f88a1e90SJérôme Glisse } 4002aee09d8SJérôme Glisse return; 4012aee09d8SJérôme Glisse } 402f88a1e90SJérôme Glisse 403f88a1e90SJérôme Glisse /* If CPU page table is not valid then we need to fault */ 404f88a1e90SJérôme Glisse *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); 405f88a1e90SJérôme Glisse /* Need to write fault ? */ 406f88a1e90SJérôme Glisse if ((pfns & range->flags[HMM_PFN_WRITE]) && 407f88a1e90SJérôme Glisse !(cpu_flags & range->flags[HMM_PFN_WRITE])) { 408f88a1e90SJérôme Glisse *write_fault = true; 4092aee09d8SJérôme Glisse *fault = true; 4102aee09d8SJérôme Glisse } 4112aee09d8SJérôme Glisse } 4122aee09d8SJérôme Glisse 4132aee09d8SJérôme Glisse static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 4142aee09d8SJérôme Glisse const uint64_t *pfns, unsigned long npages, 4152aee09d8SJérôme Glisse uint64_t cpu_flags, bool *fault, 4162aee09d8SJérôme Glisse bool *write_fault) 4172aee09d8SJérôme Glisse { 4182aee09d8SJérôme Glisse unsigned long i; 4192aee09d8SJérôme Glisse 420*d45d464bSChristoph Hellwig if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) { 4212aee09d8SJérôme Glisse *fault = *write_fault = false; 4222aee09d8SJérôme Glisse return; 4232aee09d8SJérôme Glisse } 4242aee09d8SJérôme Glisse 425a3e0d41cSJérôme Glisse *fault = *write_fault = false; 4262aee09d8SJérôme Glisse for (i = 0; i < npages; ++i) { 4272aee09d8SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags, 4282aee09d8SJérôme Glisse fault, write_fault); 429a3e0d41cSJérôme Glisse if ((*write_fault)) 4302aee09d8SJérôme Glisse return; 4312aee09d8SJérôme Glisse } 4322aee09d8SJérôme Glisse } 4332aee09d8SJérôme Glisse 4342aee09d8SJérôme Glisse static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, 4352aee09d8SJérôme Glisse struct mm_walk *walk) 4362aee09d8SJérôme Glisse { 4372aee09d8SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 4382aee09d8SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 4392aee09d8SJérôme Glisse bool fault, write_fault; 4402aee09d8SJérôme Glisse unsigned long i, npages; 4412aee09d8SJérôme Glisse uint64_t *pfns; 4422aee09d8SJérôme Glisse 4432aee09d8SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 4442aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 4452aee09d8SJérôme Glisse pfns = &range->pfns[i]; 4462aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 4472aee09d8SJérôme Glisse 0, &fault, &write_fault); 4482aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 4492aee09d8SJérôme Glisse } 4502aee09d8SJérôme Glisse 451f88a1e90SJérôme Glisse static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) 4522aee09d8SJérôme Glisse { 4532aee09d8SJérôme Glisse if (pmd_protnone(pmd)) 4542aee09d8SJérôme Glisse return 0; 455f88a1e90SJérôme Glisse return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | 456f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 457f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 458da4c3c73SJérôme Glisse } 459da4c3c73SJérôme Glisse 460992de9a8SJérôme Glisse static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud) 461992de9a8SJérôme Glisse { 462992de9a8SJérôme Glisse if (!pud_present(pud)) 463992de9a8SJérôme Glisse return 0; 464992de9a8SJérôme Glisse return pud_write(pud) ? range->flags[HMM_PFN_VALID] | 465992de9a8SJérôme Glisse range->flags[HMM_PFN_WRITE] : 466992de9a8SJérôme Glisse range->flags[HMM_PFN_VALID]; 467992de9a8SJérôme Glisse } 468992de9a8SJérôme Glisse 46953f5c3f4SJérôme Glisse static int hmm_vma_handle_pmd(struct mm_walk *walk, 47053f5c3f4SJérôme Glisse unsigned long addr, 47153f5c3f4SJérôme Glisse unsigned long end, 47253f5c3f4SJérôme Glisse uint64_t *pfns, 47353f5c3f4SJérôme Glisse pmd_t pmd) 47453f5c3f4SJérôme Glisse { 475992de9a8SJérôme Glisse #ifdef CONFIG_TRANSPARENT_HUGEPAGE 47653f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 477f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 4782aee09d8SJérôme Glisse unsigned long pfn, npages, i; 4792aee09d8SJérôme Glisse bool fault, write_fault; 480f88a1e90SJérôme Glisse uint64_t cpu_flags; 48153f5c3f4SJérôme Glisse 4822aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 483f88a1e90SJérôme Glisse cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); 4842aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, 4852aee09d8SJérôme Glisse &fault, &write_fault); 48653f5c3f4SJérôme Glisse 4872aee09d8SJérôme Glisse if (pmd_protnone(pmd) || fault || write_fault) 4882aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 48953f5c3f4SJérôme Glisse 49053f5c3f4SJérôme Glisse pfn = pmd_pfn(pmd) + pte_index(addr); 491992de9a8SJérôme Glisse for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { 492992de9a8SJérôme Glisse if (pmd_devmap(pmd)) { 493992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pfn, 494992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 495992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 496992de9a8SJérôme Glisse return -EBUSY; 497992de9a8SJérôme Glisse } 498391aab11SJérôme Glisse pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags; 499992de9a8SJérôme Glisse } 500992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 501992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 502992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 503992de9a8SJérôme Glisse } 50453f5c3f4SJérôme Glisse hmm_vma_walk->last = end; 50553f5c3f4SJérôme Glisse return 0; 506992de9a8SJérôme Glisse #else 507d2e8d551SRalph Campbell /* If THP is not enabled then we should never reach this code ! */ 508992de9a8SJérôme Glisse return -EINVAL; 509992de9a8SJérôme Glisse #endif 51053f5c3f4SJérôme Glisse } 51153f5c3f4SJérôme Glisse 512f88a1e90SJérôme Glisse static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) 5132aee09d8SJérôme Glisse { 514789c2af8SPhilip Yang if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) 5152aee09d8SJérôme Glisse return 0; 516f88a1e90SJérôme Glisse return pte_write(pte) ? range->flags[HMM_PFN_VALID] | 517f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 518f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 5192aee09d8SJérôme Glisse } 5202aee09d8SJérôme Glisse 52153f5c3f4SJérôme Glisse static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, 52253f5c3f4SJérôme Glisse unsigned long end, pmd_t *pmdp, pte_t *ptep, 52353f5c3f4SJérôme Glisse uint64_t *pfn) 52453f5c3f4SJérôme Glisse { 52553f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 526f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 5272aee09d8SJérôme Glisse bool fault, write_fault; 5282aee09d8SJérôme Glisse uint64_t cpu_flags; 52953f5c3f4SJérôme Glisse pte_t pte = *ptep; 530f88a1e90SJérôme Glisse uint64_t orig_pfn = *pfn; 53153f5c3f4SJérôme Glisse 532f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_NONE]; 53373231612SJérôme Glisse fault = write_fault = false; 53453f5c3f4SJérôme Glisse 53553f5c3f4SJérôme Glisse if (pte_none(pte)) { 53673231612SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, 53773231612SJérôme Glisse &fault, &write_fault); 5382aee09d8SJérôme Glisse if (fault || write_fault) 53953f5c3f4SJérôme Glisse goto fault; 54053f5c3f4SJérôme Glisse return 0; 54153f5c3f4SJérôme Glisse } 54253f5c3f4SJérôme Glisse 54353f5c3f4SJérôme Glisse if (!pte_present(pte)) { 54453f5c3f4SJérôme Glisse swp_entry_t entry = pte_to_swp_entry(pte); 54553f5c3f4SJérôme Glisse 54653f5c3f4SJérôme Glisse if (!non_swap_entry(entry)) { 5472aee09d8SJérôme Glisse if (fault || write_fault) 54853f5c3f4SJérôme Glisse goto fault; 54953f5c3f4SJérôme Glisse return 0; 55053f5c3f4SJérôme Glisse } 55153f5c3f4SJérôme Glisse 55253f5c3f4SJérôme Glisse /* 55353f5c3f4SJérôme Glisse * This is a special swap entry, ignore migration, use 55453f5c3f4SJérôme Glisse * device and report anything else as error. 55553f5c3f4SJérôme Glisse */ 55653f5c3f4SJérôme Glisse if (is_device_private_entry(entry)) { 557f88a1e90SJérôme Glisse cpu_flags = range->flags[HMM_PFN_VALID] | 558f88a1e90SJérôme Glisse range->flags[HMM_PFN_DEVICE_PRIVATE]; 5592aee09d8SJérôme Glisse cpu_flags |= is_write_device_private_entry(entry) ? 560f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 0; 561f88a1e90SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 562f88a1e90SJérôme Glisse &fault, &write_fault); 563f88a1e90SJérôme Glisse if (fault || write_fault) 564f88a1e90SJérôme Glisse goto fault; 565391aab11SJérôme Glisse *pfn = hmm_device_entry_from_pfn(range, 566391aab11SJérôme Glisse swp_offset(entry)); 567f88a1e90SJérôme Glisse *pfn |= cpu_flags; 56853f5c3f4SJérôme Glisse return 0; 56953f5c3f4SJérôme Glisse } 57053f5c3f4SJérôme Glisse 57153f5c3f4SJérôme Glisse if (is_migration_entry(entry)) { 5722aee09d8SJérôme Glisse if (fault || write_fault) { 57353f5c3f4SJérôme Glisse pte_unmap(ptep); 57453f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 575d2e8d551SRalph Campbell migration_entry_wait(walk->mm, pmdp, addr); 57673231612SJérôme Glisse return -EBUSY; 57753f5c3f4SJérôme Glisse } 57853f5c3f4SJérôme Glisse return 0; 57953f5c3f4SJérôme Glisse } 58053f5c3f4SJérôme Glisse 58153f5c3f4SJérôme Glisse /* Report error for everything else */ 582f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_ERROR]; 58353f5c3f4SJérôme Glisse return -EFAULT; 58473231612SJérôme Glisse } else { 58573231612SJérôme Glisse cpu_flags = pte_to_hmm_pfn_flags(range, pte); 58673231612SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 58773231612SJérôme Glisse &fault, &write_fault); 58853f5c3f4SJérôme Glisse } 58953f5c3f4SJérôme Glisse 5902aee09d8SJérôme Glisse if (fault || write_fault) 59153f5c3f4SJérôme Glisse goto fault; 59253f5c3f4SJérôme Glisse 593992de9a8SJérôme Glisse if (pte_devmap(pte)) { 594992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte), 595992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 596992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 597992de9a8SJérôme Glisse return -EBUSY; 598992de9a8SJérôme Glisse } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) { 599992de9a8SJérôme Glisse *pfn = range->values[HMM_PFN_SPECIAL]; 600992de9a8SJérôme Glisse return -EFAULT; 601992de9a8SJérôme Glisse } 602992de9a8SJérôme Glisse 603391aab11SJérôme Glisse *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags; 60453f5c3f4SJérôme Glisse return 0; 60553f5c3f4SJérôme Glisse 60653f5c3f4SJérôme Glisse fault: 607992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 608992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 609992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 610992de9a8SJérôme Glisse } 61153f5c3f4SJérôme Glisse pte_unmap(ptep); 61253f5c3f4SJérôme Glisse /* Fault any virtual address we were asked to fault */ 6132aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 61453f5c3f4SJérôme Glisse } 61553f5c3f4SJérôme Glisse 616da4c3c73SJérôme Glisse static int hmm_vma_walk_pmd(pmd_t *pmdp, 617da4c3c73SJérôme Glisse unsigned long start, 618da4c3c73SJérôme Glisse unsigned long end, 619da4c3c73SJérôme Glisse struct mm_walk *walk) 620da4c3c73SJérôme Glisse { 62174eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 62274eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 623ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 624da4c3c73SJérôme Glisse unsigned long addr = start, i; 625da4c3c73SJérôme Glisse pte_t *ptep; 626da4c3c73SJérôme Glisse pmd_t pmd; 627da4c3c73SJérôme Glisse 628d08faca0SJérôme Glisse again: 629d08faca0SJérôme Glisse pmd = READ_ONCE(*pmdp); 630d08faca0SJérôme Glisse if (pmd_none(pmd)) 631d08faca0SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 632d08faca0SJérôme Glisse 633d08faca0SJérôme Glisse if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB)) 634d08faca0SJérôme Glisse return hmm_pfns_bad(start, end, walk); 635d08faca0SJérôme Glisse 636d08faca0SJérôme Glisse if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { 637d08faca0SJérôme Glisse bool fault, write_fault; 638d08faca0SJérôme Glisse unsigned long npages; 639d08faca0SJérôme Glisse uint64_t *pfns; 640d08faca0SJérôme Glisse 641d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 642d08faca0SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 643d08faca0SJérôme Glisse pfns = &range->pfns[i]; 644d08faca0SJérôme Glisse 645d08faca0SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 646d08faca0SJérôme Glisse 0, &fault, &write_fault); 647d08faca0SJérôme Glisse if (fault || write_fault) { 648d08faca0SJérôme Glisse hmm_vma_walk->last = addr; 649d2e8d551SRalph Campbell pmd_migration_entry_wait(walk->mm, pmdp); 65073231612SJérôme Glisse return -EBUSY; 651d08faca0SJérôme Glisse } 652d08faca0SJérôme Glisse return 0; 653d08faca0SJérôme Glisse } else if (!pmd_present(pmd)) 654d08faca0SJérôme Glisse return hmm_pfns_bad(start, end, walk); 655d08faca0SJérôme Glisse 656d08faca0SJérôme Glisse if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { 657da4c3c73SJérôme Glisse /* 658d2e8d551SRalph Campbell * No need to take pmd_lock here, even if some other thread 659da4c3c73SJérôme Glisse * is splitting the huge pmd we will get that event through 660da4c3c73SJérôme Glisse * mmu_notifier callback. 661da4c3c73SJérôme Glisse * 662d2e8d551SRalph Campbell * So just read pmd value and check again it's a transparent 663da4c3c73SJérôme Glisse * huge or device mapping one and compute corresponding pfn 664da4c3c73SJérôme Glisse * values. 665da4c3c73SJérôme Glisse */ 666da4c3c73SJérôme Glisse pmd = pmd_read_atomic(pmdp); 667da4c3c73SJérôme Glisse barrier(); 668da4c3c73SJérôme Glisse if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) 669da4c3c73SJérôme Glisse goto again; 670da4c3c73SJérôme Glisse 671d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 67253f5c3f4SJérôme Glisse return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd); 673da4c3c73SJérôme Glisse } 674da4c3c73SJérôme Glisse 675d08faca0SJérôme Glisse /* 676d2e8d551SRalph Campbell * We have handled all the valid cases above ie either none, migration, 677d08faca0SJérôme Glisse * huge or transparent huge. At this point either it is a valid pmd 678d08faca0SJérôme Glisse * entry pointing to pte directory or it is a bad pmd that will not 679d08faca0SJérôme Glisse * recover. 680d08faca0SJérôme Glisse */ 681d08faca0SJérôme Glisse if (pmd_bad(pmd)) 682da4c3c73SJérôme Glisse return hmm_pfns_bad(start, end, walk); 683da4c3c73SJérôme Glisse 684da4c3c73SJérôme Glisse ptep = pte_offset_map(pmdp, addr); 685d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 686da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { 68753f5c3f4SJérôme Glisse int r; 688da4c3c73SJérôme Glisse 68953f5c3f4SJérôme Glisse r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]); 69053f5c3f4SJérôme Glisse if (r) { 69153f5c3f4SJérôme Glisse /* hmm_vma_handle_pte() did unmap pte directory */ 69274eee180SJérôme Glisse hmm_vma_walk->last = addr; 69353f5c3f4SJérôme Glisse return r; 69474eee180SJérôme Glisse } 695da4c3c73SJérôme Glisse } 696992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 697992de9a8SJérôme Glisse /* 698992de9a8SJérôme Glisse * We do put_dev_pagemap() here and not in hmm_vma_handle_pte() 699992de9a8SJérôme Glisse * so that we can leverage get_dev_pagemap() optimization which 700992de9a8SJérôme Glisse * will not re-take a reference on a pgmap if we already have 701992de9a8SJérôme Glisse * one. 702992de9a8SJérôme Glisse */ 703992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 704992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 705992de9a8SJérôme Glisse } 706da4c3c73SJérôme Glisse pte_unmap(ptep - 1); 707da4c3c73SJérôme Glisse 70853f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 709da4c3c73SJérôme Glisse return 0; 710da4c3c73SJérôme Glisse } 711da4c3c73SJérôme Glisse 712992de9a8SJérôme Glisse static int hmm_vma_walk_pud(pud_t *pudp, 713992de9a8SJérôme Glisse unsigned long start, 714992de9a8SJérôme Glisse unsigned long end, 715992de9a8SJérôme Glisse struct mm_walk *walk) 716992de9a8SJérôme Glisse { 717992de9a8SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 718992de9a8SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 719992de9a8SJérôme Glisse unsigned long addr = start, next; 720992de9a8SJérôme Glisse pmd_t *pmdp; 721992de9a8SJérôme Glisse pud_t pud; 722992de9a8SJérôme Glisse int ret; 723992de9a8SJérôme Glisse 724992de9a8SJérôme Glisse again: 725992de9a8SJérôme Glisse pud = READ_ONCE(*pudp); 726992de9a8SJérôme Glisse if (pud_none(pud)) 727992de9a8SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 728992de9a8SJérôme Glisse 729992de9a8SJérôme Glisse if (pud_huge(pud) && pud_devmap(pud)) { 730992de9a8SJérôme Glisse unsigned long i, npages, pfn; 731992de9a8SJérôme Glisse uint64_t *pfns, cpu_flags; 732992de9a8SJérôme Glisse bool fault, write_fault; 733992de9a8SJérôme Glisse 734992de9a8SJérôme Glisse if (!pud_present(pud)) 735992de9a8SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 736992de9a8SJérôme Glisse 737992de9a8SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 738992de9a8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 739992de9a8SJérôme Glisse pfns = &range->pfns[i]; 740992de9a8SJérôme Glisse 741992de9a8SJérôme Glisse cpu_flags = pud_to_hmm_pfn_flags(range, pud); 742992de9a8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 743992de9a8SJérôme Glisse cpu_flags, &fault, &write_fault); 744992de9a8SJérôme Glisse if (fault || write_fault) 745992de9a8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, 746992de9a8SJérôme Glisse write_fault, walk); 747992de9a8SJérôme Glisse 748992de9a8SJérôme Glisse pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 749992de9a8SJérôme Glisse for (i = 0; i < npages; ++i, ++pfn) { 750992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pfn, 751992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 752992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 753992de9a8SJérôme Glisse return -EBUSY; 754391aab11SJérôme Glisse pfns[i] = hmm_device_entry_from_pfn(range, pfn) | 755391aab11SJérôme Glisse cpu_flags; 756992de9a8SJérôme Glisse } 757992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 758992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 759992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 760992de9a8SJérôme Glisse } 761992de9a8SJérôme Glisse hmm_vma_walk->last = end; 762992de9a8SJérôme Glisse return 0; 763992de9a8SJérôme Glisse } 764992de9a8SJérôme Glisse 765992de9a8SJérôme Glisse split_huge_pud(walk->vma, pudp, addr); 766992de9a8SJérôme Glisse if (pud_none(*pudp)) 767992de9a8SJérôme Glisse goto again; 768992de9a8SJérôme Glisse 769992de9a8SJérôme Glisse pmdp = pmd_offset(pudp, addr); 770992de9a8SJérôme Glisse do { 771992de9a8SJérôme Glisse next = pmd_addr_end(addr, end); 772992de9a8SJérôme Glisse ret = hmm_vma_walk_pmd(pmdp, addr, next, walk); 773992de9a8SJérôme Glisse if (ret) 774992de9a8SJérôme Glisse return ret; 775992de9a8SJérôme Glisse } while (pmdp++, addr = next, addr != end); 776992de9a8SJérôme Glisse 777992de9a8SJérôme Glisse return 0; 778992de9a8SJérôme Glisse } 779992de9a8SJérôme Glisse 78063d5066fSJérôme Glisse static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, 78163d5066fSJérôme Glisse unsigned long start, unsigned long end, 78263d5066fSJérôme Glisse struct mm_walk *walk) 78363d5066fSJérôme Glisse { 78463d5066fSJérôme Glisse #ifdef CONFIG_HUGETLB_PAGE 78563d5066fSJérôme Glisse unsigned long addr = start, i, pfn, mask, size, pfn_inc; 78663d5066fSJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 78763d5066fSJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 78863d5066fSJérôme Glisse struct vm_area_struct *vma = walk->vma; 78963d5066fSJérôme Glisse struct hstate *h = hstate_vma(vma); 79063d5066fSJérôme Glisse uint64_t orig_pfn, cpu_flags; 79163d5066fSJérôme Glisse bool fault, write_fault; 79263d5066fSJérôme Glisse spinlock_t *ptl; 79363d5066fSJérôme Glisse pte_t entry; 79463d5066fSJérôme Glisse int ret = 0; 79563d5066fSJérôme Glisse 796d2e8d551SRalph Campbell size = huge_page_size(h); 79763d5066fSJérôme Glisse mask = size - 1; 79863d5066fSJérôme Glisse if (range->page_shift != PAGE_SHIFT) { 799d2e8d551SRalph Campbell /* Make sure we are looking at a full page. */ 80063d5066fSJérôme Glisse if (start & mask) 80163d5066fSJérôme Glisse return -EINVAL; 80263d5066fSJérôme Glisse if (end < (start + size)) 80363d5066fSJérôme Glisse return -EINVAL; 80463d5066fSJérôme Glisse pfn_inc = size >> PAGE_SHIFT; 80563d5066fSJérôme Glisse } else { 80663d5066fSJérôme Glisse pfn_inc = 1; 80763d5066fSJérôme Glisse size = PAGE_SIZE; 80863d5066fSJérôme Glisse } 80963d5066fSJérôme Glisse 810d2e8d551SRalph Campbell ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); 81163d5066fSJérôme Glisse entry = huge_ptep_get(pte); 81263d5066fSJérôme Glisse 81363d5066fSJérôme Glisse i = (start - range->start) >> range->page_shift; 81463d5066fSJérôme Glisse orig_pfn = range->pfns[i]; 81563d5066fSJérôme Glisse range->pfns[i] = range->values[HMM_PFN_NONE]; 81663d5066fSJérôme Glisse cpu_flags = pte_to_hmm_pfn_flags(range, entry); 81763d5066fSJérôme Glisse fault = write_fault = false; 81863d5066fSJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 81963d5066fSJérôme Glisse &fault, &write_fault); 82063d5066fSJérôme Glisse if (fault || write_fault) { 82163d5066fSJérôme Glisse ret = -ENOENT; 82263d5066fSJérôme Glisse goto unlock; 82363d5066fSJérôme Glisse } 82463d5066fSJérôme Glisse 82563d5066fSJérôme Glisse pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift); 82663d5066fSJérôme Glisse for (; addr < end; addr += size, i++, pfn += pfn_inc) 827391aab11SJérôme Glisse range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) | 828391aab11SJérôme Glisse cpu_flags; 82963d5066fSJérôme Glisse hmm_vma_walk->last = end; 83063d5066fSJérôme Glisse 83163d5066fSJérôme Glisse unlock: 83263d5066fSJérôme Glisse spin_unlock(ptl); 83363d5066fSJérôme Glisse 83463d5066fSJérôme Glisse if (ret == -ENOENT) 83563d5066fSJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 83663d5066fSJérôme Glisse 83763d5066fSJérôme Glisse return ret; 83863d5066fSJérôme Glisse #else /* CONFIG_HUGETLB_PAGE */ 83963d5066fSJérôme Glisse return -EINVAL; 84063d5066fSJérôme Glisse #endif 84163d5066fSJérôme Glisse } 84263d5066fSJérôme Glisse 843f88a1e90SJérôme Glisse static void hmm_pfns_clear(struct hmm_range *range, 844f88a1e90SJérôme Glisse uint64_t *pfns, 84533cd47dcSJérôme Glisse unsigned long addr, 84633cd47dcSJérôme Glisse unsigned long end) 84733cd47dcSJérôme Glisse { 84833cd47dcSJérôme Glisse for (; addr < end; addr += PAGE_SIZE, pfns++) 849f88a1e90SJérôme Glisse *pfns = range->values[HMM_PFN_NONE]; 85033cd47dcSJérôme Glisse } 85133cd47dcSJérôme Glisse 852da4c3c73SJérôme Glisse /* 853a3e0d41cSJérôme Glisse * hmm_range_register() - start tracking change to CPU page table over a range 854a3e0d41cSJérôme Glisse * @range: range 855a3e0d41cSJérôme Glisse * @mm: the mm struct for the range of virtual address 856a3e0d41cSJérôme Glisse * @start: start virtual address (inclusive) 857a3e0d41cSJérôme Glisse * @end: end virtual address (exclusive) 85863d5066fSJérôme Glisse * @page_shift: expect page shift for the range 859d2e8d551SRalph Campbell * Return: 0 on success, -EFAULT if the address space is no longer valid 860a3e0d41cSJérôme Glisse * 861a3e0d41cSJérôme Glisse * Track updates to the CPU page table see include/linux/hmm.h 862a3e0d41cSJérôme Glisse */ 863a3e0d41cSJérôme Glisse int hmm_range_register(struct hmm_range *range, 864e36acfe6SJason Gunthorpe struct hmm_mirror *mirror, 865a3e0d41cSJérôme Glisse unsigned long start, 86663d5066fSJérôme Glisse unsigned long end, 86763d5066fSJérôme Glisse unsigned page_shift) 868a3e0d41cSJérôme Glisse { 86963d5066fSJérôme Glisse unsigned long mask = ((1UL << page_shift) - 1UL); 870e36acfe6SJason Gunthorpe struct hmm *hmm = mirror->hmm; 8715a136b4aSJason Gunthorpe unsigned long flags; 87263d5066fSJérôme Glisse 873a3e0d41cSJérôme Glisse range->valid = false; 874a3e0d41cSJérôme Glisse range->hmm = NULL; 875a3e0d41cSJérôme Glisse 87663d5066fSJérôme Glisse if ((start & mask) || (end & mask)) 87763d5066fSJérôme Glisse return -EINVAL; 87863d5066fSJérôme Glisse if (start >= end) 879a3e0d41cSJérôme Glisse return -EINVAL; 880a3e0d41cSJérôme Glisse 88163d5066fSJérôme Glisse range->page_shift = page_shift; 882a3e0d41cSJérôme Glisse range->start = start; 883a3e0d41cSJérôme Glisse range->end = end; 884a3e0d41cSJérôme Glisse 88547f24598SJason Gunthorpe /* Prevent hmm_release() from running while the range is valid */ 88647f24598SJason Gunthorpe if (!mmget_not_zero(hmm->mm)) 887a3e0d41cSJérôme Glisse return -EFAULT; 888a3e0d41cSJérôme Glisse 889085ea250SRalph Campbell /* Initialize range to track CPU page table updates. */ 8905a136b4aSJason Gunthorpe spin_lock_irqsave(&hmm->ranges_lock, flags); 891a3e0d41cSJérôme Glisse 892085ea250SRalph Campbell range->hmm = hmm; 893e36acfe6SJason Gunthorpe kref_get(&hmm->kref); 894157816f3SJason Gunthorpe list_add(&range->list, &hmm->ranges); 895a3e0d41cSJérôme Glisse 896a3e0d41cSJérôme Glisse /* 897a3e0d41cSJérôme Glisse * If there are any concurrent notifiers we have to wait for them for 898a3e0d41cSJérôme Glisse * the range to be valid (see hmm_range_wait_until_valid()). 899a3e0d41cSJérôme Glisse */ 900085ea250SRalph Campbell if (!hmm->notifiers) 901a3e0d41cSJérôme Glisse range->valid = true; 9025a136b4aSJason Gunthorpe spin_unlock_irqrestore(&hmm->ranges_lock, flags); 903a3e0d41cSJérôme Glisse 904a3e0d41cSJérôme Glisse return 0; 905a3e0d41cSJérôme Glisse } 906a3e0d41cSJérôme Glisse EXPORT_SYMBOL(hmm_range_register); 907a3e0d41cSJérôme Glisse 908a3e0d41cSJérôme Glisse /* 909a3e0d41cSJérôme Glisse * hmm_range_unregister() - stop tracking change to CPU page table over a range 910a3e0d41cSJérôme Glisse * @range: range 911a3e0d41cSJérôme Glisse * 912a3e0d41cSJérôme Glisse * Range struct is used to track updates to the CPU page table after a call to 913a3e0d41cSJérôme Glisse * hmm_range_register(). See include/linux/hmm.h for how to use it. 914a3e0d41cSJérôme Glisse */ 915a3e0d41cSJérôme Glisse void hmm_range_unregister(struct hmm_range *range) 916a3e0d41cSJérôme Glisse { 917085ea250SRalph Campbell struct hmm *hmm = range->hmm; 9185a136b4aSJason Gunthorpe unsigned long flags; 919a3e0d41cSJérôme Glisse 9205a136b4aSJason Gunthorpe spin_lock_irqsave(&hmm->ranges_lock, flags); 92147f24598SJason Gunthorpe list_del_init(&range->list); 9225a136b4aSJason Gunthorpe spin_unlock_irqrestore(&hmm->ranges_lock, flags); 923a3e0d41cSJérôme Glisse 924a3e0d41cSJérôme Glisse /* Drop reference taken by hmm_range_register() */ 92547f24598SJason Gunthorpe mmput(hmm->mm); 926085ea250SRalph Campbell hmm_put(hmm); 9272dcc3eb8SJason Gunthorpe 9282dcc3eb8SJason Gunthorpe /* 9292dcc3eb8SJason Gunthorpe * The range is now invalid and the ref on the hmm is dropped, so 9302dcc3eb8SJason Gunthorpe * poison the pointer. Leave other fields in place, for the caller's 9312dcc3eb8SJason Gunthorpe * use. 9322dcc3eb8SJason Gunthorpe */ 933a3e0d41cSJérôme Glisse range->valid = false; 9342dcc3eb8SJason Gunthorpe memset(&range->hmm, POISON_INUSE, sizeof(range->hmm)); 935a3e0d41cSJérôme Glisse } 936a3e0d41cSJérôme Glisse EXPORT_SYMBOL(hmm_range_unregister); 937a3e0d41cSJérôme Glisse 9389a4903e4SChristoph Hellwig /** 9399a4903e4SChristoph Hellwig * hmm_range_fault - try to fault some address in a virtual address range 94008232a45SJérôme Glisse * @range: range being faulted 9419a4903e4SChristoph Hellwig * @flags: HMM_FAULT_* flags 94273231612SJérôme Glisse * 9439a4903e4SChristoph Hellwig * Return: the number of valid pages in range->pfns[] (from range start 9449a4903e4SChristoph Hellwig * address), which may be zero. On error one of the following status codes 9459a4903e4SChristoph Hellwig * can be returned: 9469a4903e4SChristoph Hellwig * 9479a4903e4SChristoph Hellwig * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma 9489a4903e4SChristoph Hellwig * (e.g., device file vma). 94973231612SJérôme Glisse * -ENOMEM: Out of memory. 9509a4903e4SChristoph Hellwig * -EPERM: Invalid permission (e.g., asking for write and range is read 9519a4903e4SChristoph Hellwig * only). 9529a4903e4SChristoph Hellwig * -EAGAIN: A page fault needs to be retried and mmap_sem was dropped. 9539a4903e4SChristoph Hellwig * -EBUSY: The range has been invalidated and the caller needs to wait for 9549a4903e4SChristoph Hellwig * the invalidation to finish. 9559a4903e4SChristoph Hellwig * -EFAULT: Invalid (i.e., either no valid vma or it is illegal to access 9569a4903e4SChristoph Hellwig * that range) number of valid pages in range->pfns[] (from 95773231612SJérôme Glisse * range start address). 95874eee180SJérôme Glisse * 95974eee180SJérôme Glisse * This is similar to a regular CPU page fault except that it will not trigger 96073231612SJérôme Glisse * any memory migration if the memory being faulted is not accessible by CPUs 96173231612SJérôme Glisse * and caller does not ask for migration. 96274eee180SJérôme Glisse * 963ff05c0c6SJérôme Glisse * On error, for one virtual address in the range, the function will mark the 964ff05c0c6SJérôme Glisse * corresponding HMM pfn entry with an error flag. 96574eee180SJérôme Glisse */ 9669a4903e4SChristoph Hellwig long hmm_range_fault(struct hmm_range *range, unsigned int flags) 96774eee180SJérôme Glisse { 96863d5066fSJérôme Glisse const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; 969a3e0d41cSJérôme Glisse unsigned long start = range->start, end; 97074eee180SJérôme Glisse struct hmm_vma_walk hmm_vma_walk; 971a3e0d41cSJérôme Glisse struct hmm *hmm = range->hmm; 972a3e0d41cSJérôme Glisse struct vm_area_struct *vma; 97374eee180SJérôme Glisse struct mm_walk mm_walk; 97474eee180SJérôme Glisse int ret; 97574eee180SJérôme Glisse 97647f24598SJason Gunthorpe lockdep_assert_held(&hmm->mm->mmap_sem); 977a3e0d41cSJérôme Glisse 978a3e0d41cSJérôme Glisse do { 979a3e0d41cSJérôme Glisse /* If range is no longer valid force retry. */ 9802bcbeaefSChristoph Hellwig if (!range->valid) 9812bcbeaefSChristoph Hellwig return -EBUSY; 98274eee180SJérôme Glisse 983a3e0d41cSJérôme Glisse vma = find_vma(hmm->mm, start); 98463d5066fSJérôme Glisse if (vma == NULL || (vma->vm_flags & device_vma)) 985a3e0d41cSJérôme Glisse return -EFAULT; 986a3e0d41cSJérôme Glisse 98763d5066fSJérôme Glisse if (is_vm_hugetlb_page(vma)) { 98863d5066fSJérôme Glisse if (huge_page_shift(hstate_vma(vma)) != 98963d5066fSJérôme Glisse range->page_shift && 99063d5066fSJérôme Glisse range->page_shift != PAGE_SHIFT) 99163d5066fSJérôme Glisse return -EINVAL; 99263d5066fSJérôme Glisse } else { 99363d5066fSJérôme Glisse if (range->page_shift != PAGE_SHIFT) 99463d5066fSJérôme Glisse return -EINVAL; 99563d5066fSJérôme Glisse } 99663d5066fSJérôme Glisse 99786586a41SJérôme Glisse if (!(vma->vm_flags & VM_READ)) { 99886586a41SJérôme Glisse /* 999a3e0d41cSJérôme Glisse * If vma do not allow read access, then assume that it 1000a3e0d41cSJérôme Glisse * does not allow write access, either. HMM does not 1001a3e0d41cSJérôme Glisse * support architecture that allow write without read. 100286586a41SJérôme Glisse */ 1003a3e0d41cSJérôme Glisse hmm_pfns_clear(range, range->pfns, 1004a3e0d41cSJérôme Glisse range->start, range->end); 100586586a41SJérôme Glisse return -EPERM; 100686586a41SJérôme Glisse } 100774eee180SJérôme Glisse 1008a3e0d41cSJérôme Glisse range->vma = vma; 1009992de9a8SJérôme Glisse hmm_vma_walk.pgmap = NULL; 1010a3e0d41cSJérôme Glisse hmm_vma_walk.last = start; 10119a4903e4SChristoph Hellwig hmm_vma_walk.flags = flags; 101274eee180SJérôme Glisse hmm_vma_walk.range = range; 101374eee180SJérôme Glisse mm_walk.private = &hmm_vma_walk; 1014a3e0d41cSJérôme Glisse end = min(range->end, vma->vm_end); 101574eee180SJérôme Glisse 101674eee180SJérôme Glisse mm_walk.vma = vma; 101774eee180SJérôme Glisse mm_walk.mm = vma->vm_mm; 101874eee180SJérôme Glisse mm_walk.pte_entry = NULL; 101974eee180SJérôme Glisse mm_walk.test_walk = NULL; 102074eee180SJérôme Glisse mm_walk.hugetlb_entry = NULL; 1021992de9a8SJérôme Glisse mm_walk.pud_entry = hmm_vma_walk_pud; 102274eee180SJérôme Glisse mm_walk.pmd_entry = hmm_vma_walk_pmd; 102374eee180SJérôme Glisse mm_walk.pte_hole = hmm_vma_walk_hole; 102463d5066fSJérôme Glisse mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry; 102574eee180SJérôme Glisse 102674eee180SJérôme Glisse do { 1027a3e0d41cSJérôme Glisse ret = walk_page_range(start, end, &mm_walk); 102874eee180SJérôme Glisse start = hmm_vma_walk.last; 1029a3e0d41cSJérôme Glisse 103073231612SJérôme Glisse /* Keep trying while the range is valid. */ 103173231612SJérôme Glisse } while (ret == -EBUSY && range->valid); 103274eee180SJérôme Glisse 103374eee180SJérôme Glisse if (ret) { 103474eee180SJérôme Glisse unsigned long i; 103574eee180SJérôme Glisse 103674eee180SJérôme Glisse i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 1037a3e0d41cSJérôme Glisse hmm_pfns_clear(range, &range->pfns[i], 1038a3e0d41cSJérôme Glisse hmm_vma_walk.last, range->end); 103973231612SJérôme Glisse return ret; 104074eee180SJérôme Glisse } 1041a3e0d41cSJérôme Glisse start = end; 1042a3e0d41cSJérôme Glisse 1043a3e0d41cSJérôme Glisse } while (start < range->end); 1044704f3f2cSJérôme Glisse 104573231612SJérôme Glisse return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 104674eee180SJérôme Glisse } 104773231612SJérôme Glisse EXPORT_SYMBOL(hmm_range_fault); 104855c0ece8SJérôme Glisse 104955c0ece8SJérôme Glisse /** 10509a4903e4SChristoph Hellwig * hmm_range_dma_map - hmm_range_fault() and dma map page all in one. 105155c0ece8SJérôme Glisse * @range: range being faulted 10529a4903e4SChristoph Hellwig * @device: device to map page to 10539a4903e4SChristoph Hellwig * @daddrs: array of dma addresses for the mapped pages 10549a4903e4SChristoph Hellwig * @flags: HMM_FAULT_* 105555c0ece8SJérôme Glisse * 10569a4903e4SChristoph Hellwig * Return: the number of pages mapped on success (including zero), or any 10579a4903e4SChristoph Hellwig * status return from hmm_range_fault() otherwise. 105855c0ece8SJérôme Glisse */ 10599a4903e4SChristoph Hellwig long hmm_range_dma_map(struct hmm_range *range, struct device *device, 10609a4903e4SChristoph Hellwig dma_addr_t *daddrs, unsigned int flags) 106155c0ece8SJérôme Glisse { 106255c0ece8SJérôme Glisse unsigned long i, npages, mapped; 106355c0ece8SJérôme Glisse long ret; 106455c0ece8SJérôme Glisse 10659a4903e4SChristoph Hellwig ret = hmm_range_fault(range, flags); 106655c0ece8SJérôme Glisse if (ret <= 0) 106755c0ece8SJérôme Glisse return ret ? ret : -EBUSY; 106855c0ece8SJérôme Glisse 106955c0ece8SJérôme Glisse npages = (range->end - range->start) >> PAGE_SHIFT; 107055c0ece8SJérôme Glisse for (i = 0, mapped = 0; i < npages; ++i) { 107155c0ece8SJérôme Glisse enum dma_data_direction dir = DMA_TO_DEVICE; 107255c0ece8SJérôme Glisse struct page *page; 107355c0ece8SJérôme Glisse 107455c0ece8SJérôme Glisse /* 107555c0ece8SJérôme Glisse * FIXME need to update DMA API to provide invalid DMA address 107655c0ece8SJérôme Glisse * value instead of a function to test dma address value. This 107755c0ece8SJérôme Glisse * would remove lot of dumb code duplicated accross many arch. 107855c0ece8SJérôme Glisse * 107955c0ece8SJérôme Glisse * For now setting it to 0 here is good enough as the pfns[] 108055c0ece8SJérôme Glisse * value is what is use to check what is valid and what isn't. 108155c0ece8SJérôme Glisse */ 108255c0ece8SJérôme Glisse daddrs[i] = 0; 108355c0ece8SJérôme Glisse 1084391aab11SJérôme Glisse page = hmm_device_entry_to_page(range, range->pfns[i]); 108555c0ece8SJérôme Glisse if (page == NULL) 108655c0ece8SJérôme Glisse continue; 108755c0ece8SJérôme Glisse 108855c0ece8SJérôme Glisse /* Check if range is being invalidated */ 108955c0ece8SJérôme Glisse if (!range->valid) { 109055c0ece8SJérôme Glisse ret = -EBUSY; 109155c0ece8SJérôme Glisse goto unmap; 109255c0ece8SJérôme Glisse } 109355c0ece8SJérôme Glisse 109455c0ece8SJérôme Glisse /* If it is read and write than map bi-directional. */ 109555c0ece8SJérôme Glisse if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) 109655c0ece8SJérôme Glisse dir = DMA_BIDIRECTIONAL; 109755c0ece8SJérôme Glisse 109855c0ece8SJérôme Glisse daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir); 109955c0ece8SJérôme Glisse if (dma_mapping_error(device, daddrs[i])) { 110055c0ece8SJérôme Glisse ret = -EFAULT; 110155c0ece8SJérôme Glisse goto unmap; 110255c0ece8SJérôme Glisse } 110355c0ece8SJérôme Glisse 110455c0ece8SJérôme Glisse mapped++; 110555c0ece8SJérôme Glisse } 110655c0ece8SJérôme Glisse 110755c0ece8SJérôme Glisse return mapped; 110855c0ece8SJérôme Glisse 110955c0ece8SJérôme Glisse unmap: 111055c0ece8SJérôme Glisse for (npages = i, i = 0; (i < npages) && mapped; ++i) { 111155c0ece8SJérôme Glisse enum dma_data_direction dir = DMA_TO_DEVICE; 111255c0ece8SJérôme Glisse struct page *page; 111355c0ece8SJérôme Glisse 1114391aab11SJérôme Glisse page = hmm_device_entry_to_page(range, range->pfns[i]); 111555c0ece8SJérôme Glisse if (page == NULL) 111655c0ece8SJérôme Glisse continue; 111755c0ece8SJérôme Glisse 111855c0ece8SJérôme Glisse if (dma_mapping_error(device, daddrs[i])) 111955c0ece8SJérôme Glisse continue; 112055c0ece8SJérôme Glisse 112155c0ece8SJérôme Glisse /* If it is read and write than map bi-directional. */ 112255c0ece8SJérôme Glisse if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) 112355c0ece8SJérôme Glisse dir = DMA_BIDIRECTIONAL; 112455c0ece8SJérôme Glisse 112555c0ece8SJérôme Glisse dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir); 112655c0ece8SJérôme Glisse mapped--; 112755c0ece8SJérôme Glisse } 112855c0ece8SJérôme Glisse 112955c0ece8SJérôme Glisse return ret; 113055c0ece8SJérôme Glisse } 113155c0ece8SJérôme Glisse EXPORT_SYMBOL(hmm_range_dma_map); 113255c0ece8SJérôme Glisse 113355c0ece8SJérôme Glisse /** 113455c0ece8SJérôme Glisse * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map() 113555c0ece8SJérôme Glisse * @range: range being unmapped 113655c0ece8SJérôme Glisse * @vma: the vma against which the range (optional) 113755c0ece8SJérôme Glisse * @device: device against which dma map was done 113855c0ece8SJérôme Glisse * @daddrs: dma address of mapped pages 113955c0ece8SJérôme Glisse * @dirty: dirty page if it had the write flag set 1140085ea250SRalph Campbell * Return: number of page unmapped on success, -EINVAL otherwise 114155c0ece8SJérôme Glisse * 114255c0ece8SJérôme Glisse * Note that caller MUST abide by mmu notifier or use HMM mirror and abide 114355c0ece8SJérôme Glisse * to the sync_cpu_device_pagetables() callback so that it is safe here to 114455c0ece8SJérôme Glisse * call set_page_dirty(). Caller must also take appropriate locks to avoid 114555c0ece8SJérôme Glisse * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress. 114655c0ece8SJérôme Glisse */ 114755c0ece8SJérôme Glisse long hmm_range_dma_unmap(struct hmm_range *range, 114855c0ece8SJérôme Glisse struct vm_area_struct *vma, 114955c0ece8SJérôme Glisse struct device *device, 115055c0ece8SJérôme Glisse dma_addr_t *daddrs, 115155c0ece8SJérôme Glisse bool dirty) 115255c0ece8SJérôme Glisse { 115355c0ece8SJérôme Glisse unsigned long i, npages; 115455c0ece8SJérôme Glisse long cpages = 0; 115555c0ece8SJérôme Glisse 115655c0ece8SJérôme Glisse /* Sanity check. */ 115755c0ece8SJérôme Glisse if (range->end <= range->start) 115855c0ece8SJérôme Glisse return -EINVAL; 115955c0ece8SJérôme Glisse if (!daddrs) 116055c0ece8SJérôme Glisse return -EINVAL; 116155c0ece8SJérôme Glisse if (!range->pfns) 116255c0ece8SJérôme Glisse return -EINVAL; 116355c0ece8SJérôme Glisse 116455c0ece8SJérôme Glisse npages = (range->end - range->start) >> PAGE_SHIFT; 116555c0ece8SJérôme Glisse for (i = 0; i < npages; ++i) { 116655c0ece8SJérôme Glisse enum dma_data_direction dir = DMA_TO_DEVICE; 116755c0ece8SJérôme Glisse struct page *page; 116855c0ece8SJérôme Glisse 1169391aab11SJérôme Glisse page = hmm_device_entry_to_page(range, range->pfns[i]); 117055c0ece8SJérôme Glisse if (page == NULL) 117155c0ece8SJérôme Glisse continue; 117255c0ece8SJérôme Glisse 117355c0ece8SJérôme Glisse /* If it is read and write than map bi-directional. */ 117455c0ece8SJérôme Glisse if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) { 117555c0ece8SJérôme Glisse dir = DMA_BIDIRECTIONAL; 117655c0ece8SJérôme Glisse 117755c0ece8SJérôme Glisse /* 117855c0ece8SJérôme Glisse * See comments in function description on why it is 117955c0ece8SJérôme Glisse * safe here to call set_page_dirty() 118055c0ece8SJérôme Glisse */ 118155c0ece8SJérôme Glisse if (dirty) 118255c0ece8SJérôme Glisse set_page_dirty(page); 118355c0ece8SJérôme Glisse } 118455c0ece8SJérôme Glisse 118555c0ece8SJérôme Glisse /* Unmap and clear pfns/dma address */ 118655c0ece8SJérôme Glisse dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir); 118755c0ece8SJérôme Glisse range->pfns[i] = range->values[HMM_PFN_NONE]; 118855c0ece8SJérôme Glisse /* FIXME see comments in hmm_vma_dma_map() */ 118955c0ece8SJérôme Glisse daddrs[i] = 0; 119055c0ece8SJérôme Glisse cpages++; 119155c0ece8SJérôme Glisse } 119255c0ece8SJérôme Glisse 119355c0ece8SJérôme Glisse return cpages; 119455c0ece8SJérôme Glisse } 119555c0ece8SJérôme Glisse EXPORT_SYMBOL(hmm_range_dma_unmap); 1196