1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2133ff0eaSJérôme Glisse /* 3133ff0eaSJérôme Glisse * Copyright 2013 Red Hat Inc. 4133ff0eaSJérôme Glisse * 5f813f219SJérôme Glisse * Authors: Jérôme Glisse <jglisse@redhat.com> 6133ff0eaSJérôme Glisse */ 7133ff0eaSJérôme Glisse /* 8133ff0eaSJérôme Glisse * Refer to include/linux/hmm.h for information about heterogeneous memory 9133ff0eaSJérôme Glisse * management or HMM for short. 10133ff0eaSJérôme Glisse */ 11133ff0eaSJérôme Glisse #include <linux/mm.h> 12133ff0eaSJérôme Glisse #include <linux/hmm.h> 13858b54daSJérôme Glisse #include <linux/init.h> 14da4c3c73SJérôme Glisse #include <linux/rmap.h> 15da4c3c73SJérôme Glisse #include <linux/swap.h> 16133ff0eaSJérôme Glisse #include <linux/slab.h> 17133ff0eaSJérôme Glisse #include <linux/sched.h> 184ef589dcSJérôme Glisse #include <linux/mmzone.h> 194ef589dcSJérôme Glisse #include <linux/pagemap.h> 20da4c3c73SJérôme Glisse #include <linux/swapops.h> 21da4c3c73SJérôme Glisse #include <linux/hugetlb.h> 224ef589dcSJérôme Glisse #include <linux/memremap.h> 23c8a53b2dSJason Gunthorpe #include <linux/sched/mm.h> 247b2d55d2SJérôme Glisse #include <linux/jump_label.h> 2555c0ece8SJérôme Glisse #include <linux/dma-mapping.h> 26c0b12405SJérôme Glisse #include <linux/mmu_notifier.h> 274ef589dcSJérôme Glisse #include <linux/memory_hotplug.h> 284ef589dcSJérôme Glisse 294ef589dcSJérôme Glisse #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT) 30133ff0eaSJérôme Glisse 316b368cd4SJérôme Glisse #if IS_ENABLED(CONFIG_HMM_MIRROR) 32c0b12405SJérôme Glisse static const struct mmu_notifier_ops hmm_mmu_notifier_ops; 33c0b12405SJérôme Glisse 34704f3f2cSJérôme Glisse /** 35704f3f2cSJérôme Glisse * hmm_get_or_create - register HMM against an mm (HMM internal) 36704f3f2cSJérôme Glisse * 37704f3f2cSJérôme Glisse * @mm: mm struct to attach to 38704f3f2cSJérôme Glisse * Returns: returns an HMM object, either by referencing the existing 39704f3f2cSJérôme Glisse * (per-process) object, or by creating a new one. 40704f3f2cSJérôme Glisse * 41704f3f2cSJérôme Glisse * This is not intended to be used directly by device drivers. If mm already 42704f3f2cSJérôme Glisse * has an HMM struct then it get a reference on it and returns it. Otherwise 43704f3f2cSJérôme Glisse * it allocates an HMM struct, initializes it, associate it with the mm and 44704f3f2cSJérôme Glisse * returns it. 45704f3f2cSJérôme Glisse */ 46704f3f2cSJérôme Glisse static struct hmm *hmm_get_or_create(struct mm_struct *mm) 47704f3f2cSJérôme Glisse { 488a9320b7SJason Gunthorpe struct hmm *hmm; 49133ff0eaSJérôme Glisse 508a9320b7SJason Gunthorpe lockdep_assert_held_exclusive(&mm->mmap_sem); 518a9320b7SJason Gunthorpe 528a9320b7SJason Gunthorpe /* Abuse the page_table_lock to also protect mm->hmm. */ 538a9320b7SJason Gunthorpe spin_lock(&mm->page_table_lock); 548a9320b7SJason Gunthorpe hmm = mm->hmm; 558a9320b7SJason Gunthorpe if (mm->hmm && kref_get_unless_zero(&mm->hmm->kref)) 568a9320b7SJason Gunthorpe goto out_unlock; 578a9320b7SJason Gunthorpe spin_unlock(&mm->page_table_lock); 58c0b12405SJérôme Glisse 59c0b12405SJérôme Glisse hmm = kmalloc(sizeof(*hmm), GFP_KERNEL); 60c0b12405SJérôme Glisse if (!hmm) 61c0b12405SJérôme Glisse return NULL; 62a3e0d41cSJérôme Glisse init_waitqueue_head(&hmm->wq); 63c0b12405SJérôme Glisse INIT_LIST_HEAD(&hmm->mirrors); 64c0b12405SJérôme Glisse init_rwsem(&hmm->mirrors_sem); 65c0b12405SJérôme Glisse hmm->mmu_notifier.ops = NULL; 66da4c3c73SJérôme Glisse INIT_LIST_HEAD(&hmm->ranges); 67a3e0d41cSJérôme Glisse mutex_init(&hmm->lock); 68704f3f2cSJérôme Glisse kref_init(&hmm->kref); 69a3e0d41cSJérôme Glisse hmm->notifiers = 0; 70c0b12405SJérôme Glisse hmm->mm = mm; 71c0b12405SJérôme Glisse 7286a2d598SRalph Campbell hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops; 738a9320b7SJason Gunthorpe if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) { 7486a2d598SRalph Campbell kfree(hmm); 7586a2d598SRalph Campbell return NULL; 76133ff0eaSJérôme Glisse } 77133ff0eaSJérôme Glisse 788a9320b7SJason Gunthorpe mmgrab(hmm->mm); 798a9320b7SJason Gunthorpe 808a9320b7SJason Gunthorpe /* 818a9320b7SJason Gunthorpe * We hold the exclusive mmap_sem here so we know that mm->hmm is 828a9320b7SJason Gunthorpe * still NULL or 0 kref, and is safe to update. 838a9320b7SJason Gunthorpe */ 848a9320b7SJason Gunthorpe spin_lock(&mm->page_table_lock); 858a9320b7SJason Gunthorpe mm->hmm = hmm; 868a9320b7SJason Gunthorpe 878a9320b7SJason Gunthorpe out_unlock: 888a9320b7SJason Gunthorpe spin_unlock(&mm->page_table_lock); 898a9320b7SJason Gunthorpe return hmm; 908a9320b7SJason Gunthorpe } 918a9320b7SJason Gunthorpe 926d7c3cdeSJason Gunthorpe static void hmm_free_rcu(struct rcu_head *rcu) 936d7c3cdeSJason Gunthorpe { 948a9320b7SJason Gunthorpe struct hmm *hmm = container_of(rcu, struct hmm, rcu); 958a9320b7SJason Gunthorpe 968a9320b7SJason Gunthorpe mmdrop(hmm->mm); 978a9320b7SJason Gunthorpe kfree(hmm); 986d7c3cdeSJason Gunthorpe } 996d7c3cdeSJason Gunthorpe 100704f3f2cSJérôme Glisse static void hmm_free(struct kref *kref) 101704f3f2cSJérôme Glisse { 102704f3f2cSJérôme Glisse struct hmm *hmm = container_of(kref, struct hmm, kref); 103704f3f2cSJérôme Glisse 1048a9320b7SJason Gunthorpe spin_lock(&hmm->mm->page_table_lock); 1058a9320b7SJason Gunthorpe if (hmm->mm->hmm == hmm) 1068a9320b7SJason Gunthorpe hmm->mm->hmm = NULL; 1078a9320b7SJason Gunthorpe spin_unlock(&hmm->mm->page_table_lock); 108704f3f2cSJérôme Glisse 1098a9320b7SJason Gunthorpe mmu_notifier_unregister_no_release(&hmm->mmu_notifier, hmm->mm); 1106d7c3cdeSJason Gunthorpe mmu_notifier_call_srcu(&hmm->rcu, hmm_free_rcu); 111704f3f2cSJérôme Glisse } 112704f3f2cSJérôme Glisse 113704f3f2cSJérôme Glisse static inline void hmm_put(struct hmm *hmm) 114704f3f2cSJérôme Glisse { 115704f3f2cSJérôme Glisse kref_put(&hmm->kref, hmm_free); 116704f3f2cSJérôme Glisse } 117704f3f2cSJérôme Glisse 118a3e0d41cSJérôme Glisse static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) 119c0b12405SJérôme Glisse { 1206d7c3cdeSJason Gunthorpe struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 121c0b12405SJérôme Glisse struct hmm_mirror *mirror; 122da4c3c73SJérôme Glisse 1236d7c3cdeSJason Gunthorpe /* Bail out if hmm is in the process of being freed */ 1246d7c3cdeSJason Gunthorpe if (!kref_get_unless_zero(&hmm->kref)) 1256d7c3cdeSJason Gunthorpe return; 1266d7c3cdeSJason Gunthorpe 12747f24598SJason Gunthorpe /* 12847f24598SJason Gunthorpe * Since hmm_range_register() holds the mmget() lock hmm_release() is 12947f24598SJason Gunthorpe * prevented as long as a range exists. 13047f24598SJason Gunthorpe */ 13147f24598SJason Gunthorpe WARN_ON(!list_empty_careful(&hmm->ranges)); 132e1401513SRalph Campbell 133*14331726SJason Gunthorpe down_read(&hmm->mirrors_sem); 134*14331726SJason Gunthorpe list_for_each_entry(mirror, &hmm->mirrors, list) { 135e1401513SRalph Campbell /* 136*14331726SJason Gunthorpe * Note: The driver is not allowed to trigger 137*14331726SJason Gunthorpe * hmm_mirror_unregister() from this thread. 138e1401513SRalph Campbell */ 139*14331726SJason Gunthorpe if (mirror->ops->release) 140e1401513SRalph Campbell mirror->ops->release(mirror); 141e1401513SRalph Campbell } 142*14331726SJason Gunthorpe up_read(&hmm->mirrors_sem); 143704f3f2cSJérôme Glisse 144704f3f2cSJérôme Glisse hmm_put(hmm); 145e1401513SRalph Campbell } 146e1401513SRalph Campbell 14793065ac7SMichal Hocko static int hmm_invalidate_range_start(struct mmu_notifier *mn, 148a3e0d41cSJérôme Glisse const struct mmu_notifier_range *nrange) 149c0b12405SJérôme Glisse { 1506d7c3cdeSJason Gunthorpe struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 151a3e0d41cSJérôme Glisse struct hmm_mirror *mirror; 152ec131b2dSJérôme Glisse struct hmm_update update; 153a3e0d41cSJérôme Glisse struct hmm_range *range; 154a3e0d41cSJérôme Glisse int ret = 0; 155c0b12405SJérôme Glisse 1566d7c3cdeSJason Gunthorpe if (!kref_get_unless_zero(&hmm->kref)) 1576d7c3cdeSJason Gunthorpe return 0; 158c0b12405SJérôme Glisse 159a3e0d41cSJérôme Glisse update.start = nrange->start; 160a3e0d41cSJérôme Glisse update.end = nrange->end; 161ec131b2dSJérôme Glisse update.event = HMM_UPDATE_INVALIDATE; 162dfcd6660SJérôme Glisse update.blockable = mmu_notifier_range_blockable(nrange); 163a3e0d41cSJérôme Glisse 164dfcd6660SJérôme Glisse if (mmu_notifier_range_blockable(nrange)) 165a3e0d41cSJérôme Glisse mutex_lock(&hmm->lock); 166a3e0d41cSJérôme Glisse else if (!mutex_trylock(&hmm->lock)) { 167a3e0d41cSJérôme Glisse ret = -EAGAIN; 168a3e0d41cSJérôme Glisse goto out; 169a3e0d41cSJérôme Glisse } 170a3e0d41cSJérôme Glisse hmm->notifiers++; 171a3e0d41cSJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 172a3e0d41cSJérôme Glisse if (update.end < range->start || update.start >= range->end) 173a3e0d41cSJérôme Glisse continue; 174a3e0d41cSJérôme Glisse 175a3e0d41cSJérôme Glisse range->valid = false; 176a3e0d41cSJérôme Glisse } 177a3e0d41cSJérôme Glisse mutex_unlock(&hmm->lock); 178a3e0d41cSJérôme Glisse 179dfcd6660SJérôme Glisse if (mmu_notifier_range_blockable(nrange)) 180a3e0d41cSJérôme Glisse down_read(&hmm->mirrors_sem); 181a3e0d41cSJérôme Glisse else if (!down_read_trylock(&hmm->mirrors_sem)) { 182a3e0d41cSJérôme Glisse ret = -EAGAIN; 183a3e0d41cSJérôme Glisse goto out; 184a3e0d41cSJérôme Glisse } 185a3e0d41cSJérôme Glisse list_for_each_entry(mirror, &hmm->mirrors, list) { 186a3e0d41cSJérôme Glisse int ret; 187a3e0d41cSJérôme Glisse 188a3e0d41cSJérôme Glisse ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update); 189085ea250SRalph Campbell if (!update.blockable && ret == -EAGAIN) 190085ea250SRalph Campbell break; 191a3e0d41cSJérôme Glisse } 192a3e0d41cSJérôme Glisse up_read(&hmm->mirrors_sem); 193a3e0d41cSJérôme Glisse 194a3e0d41cSJérôme Glisse out: 195704f3f2cSJérôme Glisse hmm_put(hmm); 196704f3f2cSJérôme Glisse return ret; 197c0b12405SJérôme Glisse } 198c0b12405SJérôme Glisse 199c0b12405SJérôme Glisse static void hmm_invalidate_range_end(struct mmu_notifier *mn, 200a3e0d41cSJérôme Glisse const struct mmu_notifier_range *nrange) 201c0b12405SJérôme Glisse { 2026d7c3cdeSJason Gunthorpe struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 203c0b12405SJérôme Glisse 2046d7c3cdeSJason Gunthorpe if (!kref_get_unless_zero(&hmm->kref)) 2056d7c3cdeSJason Gunthorpe return; 206c0b12405SJérôme Glisse 207a3e0d41cSJérôme Glisse mutex_lock(&hmm->lock); 208a3e0d41cSJérôme Glisse hmm->notifiers--; 209a3e0d41cSJérôme Glisse if (!hmm->notifiers) { 210a3e0d41cSJérôme Glisse struct hmm_range *range; 211a3e0d41cSJérôme Glisse 212a3e0d41cSJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 213a3e0d41cSJérôme Glisse if (range->valid) 214a3e0d41cSJérôme Glisse continue; 215a3e0d41cSJérôme Glisse range->valid = true; 216a3e0d41cSJérôme Glisse } 217a3e0d41cSJérôme Glisse wake_up_all(&hmm->wq); 218a3e0d41cSJérôme Glisse } 219a3e0d41cSJérôme Glisse mutex_unlock(&hmm->lock); 220a3e0d41cSJérôme Glisse 221704f3f2cSJérôme Glisse hmm_put(hmm); 222c0b12405SJérôme Glisse } 223c0b12405SJérôme Glisse 224c0b12405SJérôme Glisse static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { 225e1401513SRalph Campbell .release = hmm_release, 226c0b12405SJérôme Glisse .invalidate_range_start = hmm_invalidate_range_start, 227c0b12405SJérôme Glisse .invalidate_range_end = hmm_invalidate_range_end, 228c0b12405SJérôme Glisse }; 229c0b12405SJérôme Glisse 230c0b12405SJérôme Glisse /* 231c0b12405SJérôme Glisse * hmm_mirror_register() - register a mirror against an mm 232c0b12405SJérôme Glisse * 233c0b12405SJérôme Glisse * @mirror: new mirror struct to register 234c0b12405SJérôme Glisse * @mm: mm to register against 235085ea250SRalph Campbell * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments 236c0b12405SJérôme Glisse * 237c0b12405SJérôme Glisse * To start mirroring a process address space, the device driver must register 238c0b12405SJérôme Glisse * an HMM mirror struct. 239c0b12405SJérôme Glisse */ 240c0b12405SJérôme Glisse int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) 241c0b12405SJérôme Glisse { 2428a1a0cd0SJason Gunthorpe lockdep_assert_held_exclusive(&mm->mmap_sem); 2438a1a0cd0SJason Gunthorpe 244c0b12405SJérôme Glisse /* Sanity check */ 245c0b12405SJérôme Glisse if (!mm || !mirror || !mirror->ops) 246c0b12405SJérôme Glisse return -EINVAL; 247c0b12405SJérôme Glisse 248704f3f2cSJérôme Glisse mirror->hmm = hmm_get_or_create(mm); 249c0b12405SJérôme Glisse if (!mirror->hmm) 250c0b12405SJérôme Glisse return -ENOMEM; 251c0b12405SJérôme Glisse 252c0b12405SJérôme Glisse down_write(&mirror->hmm->mirrors_sem); 253c0b12405SJérôme Glisse list_add(&mirror->list, &mirror->hmm->mirrors); 254c0b12405SJérôme Glisse up_write(&mirror->hmm->mirrors_sem); 255c0b12405SJérôme Glisse 256c0b12405SJérôme Glisse return 0; 257c0b12405SJérôme Glisse } 258c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_register); 259c0b12405SJérôme Glisse 260c0b12405SJérôme Glisse /* 261c0b12405SJérôme Glisse * hmm_mirror_unregister() - unregister a mirror 262c0b12405SJérôme Glisse * 263085ea250SRalph Campbell * @mirror: mirror struct to unregister 264c0b12405SJérôme Glisse * 265c0b12405SJérôme Glisse * Stop mirroring a process address space, and cleanup. 266c0b12405SJérôme Glisse */ 267c0b12405SJérôme Glisse void hmm_mirror_unregister(struct hmm_mirror *mirror) 268c0b12405SJérôme Glisse { 269187229c2SJason Gunthorpe struct hmm *hmm = mirror->hmm; 270c01cbba2SJérôme Glisse 271c0b12405SJérôme Glisse down_write(&hmm->mirrors_sem); 272*14331726SJason Gunthorpe list_del(&mirror->list); 273c0b12405SJérôme Glisse up_write(&hmm->mirrors_sem); 274704f3f2cSJérôme Glisse hmm_put(hmm); 275c0b12405SJérôme Glisse } 276c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_unregister); 277da4c3c73SJérôme Glisse 27874eee180SJérôme Glisse struct hmm_vma_walk { 27974eee180SJérôme Glisse struct hmm_range *range; 280992de9a8SJérôme Glisse struct dev_pagemap *pgmap; 28174eee180SJérôme Glisse unsigned long last; 28274eee180SJérôme Glisse bool fault; 28374eee180SJérôme Glisse bool block; 28474eee180SJérôme Glisse }; 28574eee180SJérôme Glisse 2862aee09d8SJérôme Glisse static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, 2872aee09d8SJérôme Glisse bool write_fault, uint64_t *pfn) 28874eee180SJérôme Glisse { 2899b1ae605SKuehling, Felix unsigned int flags = FAULT_FLAG_REMOTE; 29074eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 291f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 29274eee180SJérôme Glisse struct vm_area_struct *vma = walk->vma; 29350a7ca3cSSouptick Joarder vm_fault_t ret; 29474eee180SJérôme Glisse 29574eee180SJérôme Glisse flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY; 2962aee09d8SJérôme Glisse flags |= write_fault ? FAULT_FLAG_WRITE : 0; 29750a7ca3cSSouptick Joarder ret = handle_mm_fault(vma, addr, flags); 29850a7ca3cSSouptick Joarder if (ret & VM_FAULT_RETRY) 29973231612SJérôme Glisse return -EAGAIN; 30050a7ca3cSSouptick Joarder if (ret & VM_FAULT_ERROR) { 301f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_ERROR]; 30274eee180SJérôme Glisse return -EFAULT; 30374eee180SJérôme Glisse } 30474eee180SJérôme Glisse 30573231612SJérôme Glisse return -EBUSY; 30674eee180SJérôme Glisse } 30774eee180SJérôme Glisse 308da4c3c73SJérôme Glisse static int hmm_pfns_bad(unsigned long addr, 309da4c3c73SJérôme Glisse unsigned long end, 310da4c3c73SJérôme Glisse struct mm_walk *walk) 311da4c3c73SJérôme Glisse { 312c719547fSJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 313c719547fSJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 314ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 315da4c3c73SJérôme Glisse unsigned long i; 316da4c3c73SJérôme Glisse 317da4c3c73SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 318da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, i++) 319f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_ERROR]; 320da4c3c73SJérôme Glisse 321da4c3c73SJérôme Glisse return 0; 322da4c3c73SJérôme Glisse } 323da4c3c73SJérôme Glisse 3245504ed29SJérôme Glisse /* 3255504ed29SJérôme Glisse * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s) 3265504ed29SJérôme Glisse * @start: range virtual start address (inclusive) 3275504ed29SJérôme Glisse * @end: range virtual end address (exclusive) 3282aee09d8SJérôme Glisse * @fault: should we fault or not ? 3292aee09d8SJérôme Glisse * @write_fault: write fault ? 3305504ed29SJérôme Glisse * @walk: mm_walk structure 331085ea250SRalph Campbell * Return: 0 on success, -EBUSY after page fault, or page fault error 3325504ed29SJérôme Glisse * 3335504ed29SJérôme Glisse * This function will be called whenever pmd_none() or pte_none() returns true, 3345504ed29SJérôme Glisse * or whenever there is no page directory covering the virtual address range. 3355504ed29SJérôme Glisse */ 3362aee09d8SJérôme Glisse static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, 3372aee09d8SJérôme Glisse bool fault, bool write_fault, 338da4c3c73SJérôme Glisse struct mm_walk *walk) 339da4c3c73SJérôme Glisse { 34074eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 34174eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 342ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 34363d5066fSJérôme Glisse unsigned long i, page_size; 344da4c3c73SJérôme Glisse 34574eee180SJérôme Glisse hmm_vma_walk->last = addr; 34663d5066fSJérôme Glisse page_size = hmm_range_page_size(range); 34763d5066fSJérôme Glisse i = (addr - range->start) >> range->page_shift; 34863d5066fSJérôme Glisse 34963d5066fSJérôme Glisse for (; addr < end; addr += page_size, i++) { 350f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_NONE]; 3512aee09d8SJérôme Glisse if (fault || write_fault) { 35274eee180SJérôme Glisse int ret; 353da4c3c73SJérôme Glisse 3542aee09d8SJérôme Glisse ret = hmm_vma_do_fault(walk, addr, write_fault, 3552aee09d8SJérôme Glisse &pfns[i]); 35673231612SJérôme Glisse if (ret != -EBUSY) 35774eee180SJérôme Glisse return ret; 35874eee180SJérôme Glisse } 35974eee180SJérôme Glisse } 36074eee180SJérôme Glisse 36173231612SJérôme Glisse return (fault || write_fault) ? -EBUSY : 0; 3622aee09d8SJérôme Glisse } 3632aee09d8SJérôme Glisse 3642aee09d8SJérôme Glisse static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 3652aee09d8SJérôme Glisse uint64_t pfns, uint64_t cpu_flags, 3662aee09d8SJérôme Glisse bool *fault, bool *write_fault) 3672aee09d8SJérôme Glisse { 368f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 369f88a1e90SJérôme Glisse 3702aee09d8SJérôme Glisse if (!hmm_vma_walk->fault) 3712aee09d8SJérôme Glisse return; 3722aee09d8SJérôme Glisse 373023a019aSJérôme Glisse /* 374023a019aSJérôme Glisse * So we not only consider the individual per page request we also 375023a019aSJérôme Glisse * consider the default flags requested for the range. The API can 376023a019aSJérôme Glisse * be use in 2 fashions. The first one where the HMM user coalesce 377023a019aSJérôme Glisse * multiple page fault into one request and set flags per pfns for 378023a019aSJérôme Glisse * of those faults. The second one where the HMM user want to pre- 379023a019aSJérôme Glisse * fault a range with specific flags. For the latter one it is a 380023a019aSJérôme Glisse * waste to have the user pre-fill the pfn arrays with a default 381023a019aSJérôme Glisse * flags value. 382023a019aSJérôme Glisse */ 383023a019aSJérôme Glisse pfns = (pfns & range->pfn_flags_mask) | range->default_flags; 384023a019aSJérôme Glisse 3852aee09d8SJérôme Glisse /* We aren't ask to do anything ... */ 386f88a1e90SJérôme Glisse if (!(pfns & range->flags[HMM_PFN_VALID])) 3872aee09d8SJérôme Glisse return; 388f88a1e90SJérôme Glisse /* If this is device memory than only fault if explicitly requested */ 389f88a1e90SJérôme Glisse if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { 390f88a1e90SJérôme Glisse /* Do we fault on device memory ? */ 391f88a1e90SJérôme Glisse if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { 392f88a1e90SJérôme Glisse *write_fault = pfns & range->flags[HMM_PFN_WRITE]; 393f88a1e90SJérôme Glisse *fault = true; 394f88a1e90SJérôme Glisse } 3952aee09d8SJérôme Glisse return; 3962aee09d8SJérôme Glisse } 397f88a1e90SJérôme Glisse 398f88a1e90SJérôme Glisse /* If CPU page table is not valid then we need to fault */ 399f88a1e90SJérôme Glisse *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); 400f88a1e90SJérôme Glisse /* Need to write fault ? */ 401f88a1e90SJérôme Glisse if ((pfns & range->flags[HMM_PFN_WRITE]) && 402f88a1e90SJérôme Glisse !(cpu_flags & range->flags[HMM_PFN_WRITE])) { 403f88a1e90SJérôme Glisse *write_fault = true; 4042aee09d8SJérôme Glisse *fault = true; 4052aee09d8SJérôme Glisse } 4062aee09d8SJérôme Glisse } 4072aee09d8SJérôme Glisse 4082aee09d8SJérôme Glisse static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 4092aee09d8SJérôme Glisse const uint64_t *pfns, unsigned long npages, 4102aee09d8SJérôme Glisse uint64_t cpu_flags, bool *fault, 4112aee09d8SJérôme Glisse bool *write_fault) 4122aee09d8SJérôme Glisse { 4132aee09d8SJérôme Glisse unsigned long i; 4142aee09d8SJérôme Glisse 4152aee09d8SJérôme Glisse if (!hmm_vma_walk->fault) { 4162aee09d8SJérôme Glisse *fault = *write_fault = false; 4172aee09d8SJérôme Glisse return; 4182aee09d8SJérôme Glisse } 4192aee09d8SJérôme Glisse 420a3e0d41cSJérôme Glisse *fault = *write_fault = false; 4212aee09d8SJérôme Glisse for (i = 0; i < npages; ++i) { 4222aee09d8SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags, 4232aee09d8SJérôme Glisse fault, write_fault); 424a3e0d41cSJérôme Glisse if ((*write_fault)) 4252aee09d8SJérôme Glisse return; 4262aee09d8SJérôme Glisse } 4272aee09d8SJérôme Glisse } 4282aee09d8SJérôme Glisse 4292aee09d8SJérôme Glisse static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, 4302aee09d8SJérôme Glisse struct mm_walk *walk) 4312aee09d8SJérôme Glisse { 4322aee09d8SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 4332aee09d8SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 4342aee09d8SJérôme Glisse bool fault, write_fault; 4352aee09d8SJérôme Glisse unsigned long i, npages; 4362aee09d8SJérôme Glisse uint64_t *pfns; 4372aee09d8SJérôme Glisse 4382aee09d8SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 4392aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 4402aee09d8SJérôme Glisse pfns = &range->pfns[i]; 4412aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 4422aee09d8SJérôme Glisse 0, &fault, &write_fault); 4432aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 4442aee09d8SJérôme Glisse } 4452aee09d8SJérôme Glisse 446f88a1e90SJérôme Glisse static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) 4472aee09d8SJérôme Glisse { 4482aee09d8SJérôme Glisse if (pmd_protnone(pmd)) 4492aee09d8SJérôme Glisse return 0; 450f88a1e90SJérôme Glisse return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | 451f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 452f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 453da4c3c73SJérôme Glisse } 454da4c3c73SJérôme Glisse 455992de9a8SJérôme Glisse static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud) 456992de9a8SJérôme Glisse { 457992de9a8SJérôme Glisse if (!pud_present(pud)) 458992de9a8SJérôme Glisse return 0; 459992de9a8SJérôme Glisse return pud_write(pud) ? range->flags[HMM_PFN_VALID] | 460992de9a8SJérôme Glisse range->flags[HMM_PFN_WRITE] : 461992de9a8SJérôme Glisse range->flags[HMM_PFN_VALID]; 462992de9a8SJérôme Glisse } 463992de9a8SJérôme Glisse 46453f5c3f4SJérôme Glisse static int hmm_vma_handle_pmd(struct mm_walk *walk, 46553f5c3f4SJérôme Glisse unsigned long addr, 46653f5c3f4SJérôme Glisse unsigned long end, 46753f5c3f4SJérôme Glisse uint64_t *pfns, 46853f5c3f4SJérôme Glisse pmd_t pmd) 46953f5c3f4SJérôme Glisse { 470992de9a8SJérôme Glisse #ifdef CONFIG_TRANSPARENT_HUGEPAGE 47153f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 472f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 4732aee09d8SJérôme Glisse unsigned long pfn, npages, i; 4742aee09d8SJérôme Glisse bool fault, write_fault; 475f88a1e90SJérôme Glisse uint64_t cpu_flags; 47653f5c3f4SJérôme Glisse 4772aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 478f88a1e90SJérôme Glisse cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); 4792aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, 4802aee09d8SJérôme Glisse &fault, &write_fault); 48153f5c3f4SJérôme Glisse 4822aee09d8SJérôme Glisse if (pmd_protnone(pmd) || fault || write_fault) 4832aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 48453f5c3f4SJérôme Glisse 48553f5c3f4SJérôme Glisse pfn = pmd_pfn(pmd) + pte_index(addr); 486992de9a8SJérôme Glisse for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { 487992de9a8SJérôme Glisse if (pmd_devmap(pmd)) { 488992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pfn, 489992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 490992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 491992de9a8SJérôme Glisse return -EBUSY; 492992de9a8SJérôme Glisse } 493391aab11SJérôme Glisse pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags; 494992de9a8SJérôme Glisse } 495992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 496992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 497992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 498992de9a8SJérôme Glisse } 49953f5c3f4SJérôme Glisse hmm_vma_walk->last = end; 50053f5c3f4SJérôme Glisse return 0; 501992de9a8SJérôme Glisse #else 502992de9a8SJérôme Glisse /* If THP is not enabled then we should never reach that code ! */ 503992de9a8SJérôme Glisse return -EINVAL; 504992de9a8SJérôme Glisse #endif 50553f5c3f4SJérôme Glisse } 50653f5c3f4SJérôme Glisse 507f88a1e90SJérôme Glisse static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) 5082aee09d8SJérôme Glisse { 509789c2af8SPhilip Yang if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) 5102aee09d8SJérôme Glisse return 0; 511f88a1e90SJérôme Glisse return pte_write(pte) ? range->flags[HMM_PFN_VALID] | 512f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 513f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 5142aee09d8SJérôme Glisse } 5152aee09d8SJérôme Glisse 51653f5c3f4SJérôme Glisse static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, 51753f5c3f4SJérôme Glisse unsigned long end, pmd_t *pmdp, pte_t *ptep, 51853f5c3f4SJérôme Glisse uint64_t *pfn) 51953f5c3f4SJérôme Glisse { 52053f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 521f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 52253f5c3f4SJérôme Glisse struct vm_area_struct *vma = walk->vma; 5232aee09d8SJérôme Glisse bool fault, write_fault; 5242aee09d8SJérôme Glisse uint64_t cpu_flags; 52553f5c3f4SJérôme Glisse pte_t pte = *ptep; 526f88a1e90SJérôme Glisse uint64_t orig_pfn = *pfn; 52753f5c3f4SJérôme Glisse 528f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_NONE]; 52973231612SJérôme Glisse fault = write_fault = false; 53053f5c3f4SJérôme Glisse 53153f5c3f4SJérôme Glisse if (pte_none(pte)) { 53273231612SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, 53373231612SJérôme Glisse &fault, &write_fault); 5342aee09d8SJérôme Glisse if (fault || write_fault) 53553f5c3f4SJérôme Glisse goto fault; 53653f5c3f4SJérôme Glisse return 0; 53753f5c3f4SJérôme Glisse } 53853f5c3f4SJérôme Glisse 53953f5c3f4SJérôme Glisse if (!pte_present(pte)) { 54053f5c3f4SJérôme Glisse swp_entry_t entry = pte_to_swp_entry(pte); 54153f5c3f4SJérôme Glisse 54253f5c3f4SJérôme Glisse if (!non_swap_entry(entry)) { 5432aee09d8SJérôme Glisse if (fault || write_fault) 54453f5c3f4SJérôme Glisse goto fault; 54553f5c3f4SJérôme Glisse return 0; 54653f5c3f4SJérôme Glisse } 54753f5c3f4SJérôme Glisse 54853f5c3f4SJérôme Glisse /* 54953f5c3f4SJérôme Glisse * This is a special swap entry, ignore migration, use 55053f5c3f4SJérôme Glisse * device and report anything else as error. 55153f5c3f4SJérôme Glisse */ 55253f5c3f4SJérôme Glisse if (is_device_private_entry(entry)) { 553f88a1e90SJérôme Glisse cpu_flags = range->flags[HMM_PFN_VALID] | 554f88a1e90SJérôme Glisse range->flags[HMM_PFN_DEVICE_PRIVATE]; 5552aee09d8SJérôme Glisse cpu_flags |= is_write_device_private_entry(entry) ? 556f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 0; 557f88a1e90SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 558f88a1e90SJérôme Glisse &fault, &write_fault); 559f88a1e90SJérôme Glisse if (fault || write_fault) 560f88a1e90SJérôme Glisse goto fault; 561391aab11SJérôme Glisse *pfn = hmm_device_entry_from_pfn(range, 562391aab11SJérôme Glisse swp_offset(entry)); 563f88a1e90SJérôme Glisse *pfn |= cpu_flags; 56453f5c3f4SJérôme Glisse return 0; 56553f5c3f4SJérôme Glisse } 56653f5c3f4SJérôme Glisse 56753f5c3f4SJérôme Glisse if (is_migration_entry(entry)) { 5682aee09d8SJérôme Glisse if (fault || write_fault) { 56953f5c3f4SJérôme Glisse pte_unmap(ptep); 57053f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 57153f5c3f4SJérôme Glisse migration_entry_wait(vma->vm_mm, 57253f5c3f4SJérôme Glisse pmdp, addr); 57373231612SJérôme Glisse return -EBUSY; 57453f5c3f4SJérôme Glisse } 57553f5c3f4SJérôme Glisse return 0; 57653f5c3f4SJérôme Glisse } 57753f5c3f4SJérôme Glisse 57853f5c3f4SJérôme Glisse /* Report error for everything else */ 579f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_ERROR]; 58053f5c3f4SJérôme Glisse return -EFAULT; 58173231612SJérôme Glisse } else { 58273231612SJérôme Glisse cpu_flags = pte_to_hmm_pfn_flags(range, pte); 58373231612SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 58473231612SJérôme Glisse &fault, &write_fault); 58553f5c3f4SJérôme Glisse } 58653f5c3f4SJérôme Glisse 5872aee09d8SJérôme Glisse if (fault || write_fault) 58853f5c3f4SJérôme Glisse goto fault; 58953f5c3f4SJérôme Glisse 590992de9a8SJérôme Glisse if (pte_devmap(pte)) { 591992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte), 592992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 593992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 594992de9a8SJérôme Glisse return -EBUSY; 595992de9a8SJérôme Glisse } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) { 596992de9a8SJérôme Glisse *pfn = range->values[HMM_PFN_SPECIAL]; 597992de9a8SJérôme Glisse return -EFAULT; 598992de9a8SJérôme Glisse } 599992de9a8SJérôme Glisse 600391aab11SJérôme Glisse *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags; 60153f5c3f4SJérôme Glisse return 0; 60253f5c3f4SJérôme Glisse 60353f5c3f4SJérôme Glisse fault: 604992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 605992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 606992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 607992de9a8SJérôme Glisse } 60853f5c3f4SJérôme Glisse pte_unmap(ptep); 60953f5c3f4SJérôme Glisse /* Fault any virtual address we were asked to fault */ 6102aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 61153f5c3f4SJérôme Glisse } 61253f5c3f4SJérôme Glisse 613da4c3c73SJérôme Glisse static int hmm_vma_walk_pmd(pmd_t *pmdp, 614da4c3c73SJérôme Glisse unsigned long start, 615da4c3c73SJérôme Glisse unsigned long end, 616da4c3c73SJérôme Glisse struct mm_walk *walk) 617da4c3c73SJérôme Glisse { 61874eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 61974eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 620d08faca0SJérôme Glisse struct vm_area_struct *vma = walk->vma; 621ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 622da4c3c73SJérôme Glisse unsigned long addr = start, i; 623da4c3c73SJérôme Glisse pte_t *ptep; 624da4c3c73SJérôme Glisse pmd_t pmd; 625da4c3c73SJérôme Glisse 626d08faca0SJérôme Glisse 627d08faca0SJérôme Glisse again: 628d08faca0SJérôme Glisse pmd = READ_ONCE(*pmdp); 629d08faca0SJérôme Glisse if (pmd_none(pmd)) 630d08faca0SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 631d08faca0SJérôme Glisse 632d08faca0SJérôme Glisse if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB)) 633d08faca0SJérôme Glisse return hmm_pfns_bad(start, end, walk); 634d08faca0SJérôme Glisse 635d08faca0SJérôme Glisse if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { 636d08faca0SJérôme Glisse bool fault, write_fault; 637d08faca0SJérôme Glisse unsigned long npages; 638d08faca0SJérôme Glisse uint64_t *pfns; 639d08faca0SJérôme Glisse 640d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 641d08faca0SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 642d08faca0SJérôme Glisse pfns = &range->pfns[i]; 643d08faca0SJérôme Glisse 644d08faca0SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 645d08faca0SJérôme Glisse 0, &fault, &write_fault); 646d08faca0SJérôme Glisse if (fault || write_fault) { 647d08faca0SJérôme Glisse hmm_vma_walk->last = addr; 648d08faca0SJérôme Glisse pmd_migration_entry_wait(vma->vm_mm, pmdp); 64973231612SJérôme Glisse return -EBUSY; 650d08faca0SJérôme Glisse } 651d08faca0SJérôme Glisse return 0; 652d08faca0SJérôme Glisse } else if (!pmd_present(pmd)) 653d08faca0SJérôme Glisse return hmm_pfns_bad(start, end, walk); 654d08faca0SJérôme Glisse 655d08faca0SJérôme Glisse if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { 656da4c3c73SJérôme Glisse /* 657da4c3c73SJérôme Glisse * No need to take pmd_lock here, even if some other threads 658da4c3c73SJérôme Glisse * is splitting the huge pmd we will get that event through 659da4c3c73SJérôme Glisse * mmu_notifier callback. 660da4c3c73SJérôme Glisse * 661da4c3c73SJérôme Glisse * So just read pmd value and check again its a transparent 662da4c3c73SJérôme Glisse * huge or device mapping one and compute corresponding pfn 663da4c3c73SJérôme Glisse * values. 664da4c3c73SJérôme Glisse */ 665da4c3c73SJérôme Glisse pmd = pmd_read_atomic(pmdp); 666da4c3c73SJérôme Glisse barrier(); 667da4c3c73SJérôme Glisse if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) 668da4c3c73SJérôme Glisse goto again; 669da4c3c73SJérôme Glisse 670d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 67153f5c3f4SJérôme Glisse return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd); 672da4c3c73SJérôme Glisse } 673da4c3c73SJérôme Glisse 674d08faca0SJérôme Glisse /* 675d08faca0SJérôme Glisse * We have handled all the valid case above ie either none, migration, 676d08faca0SJérôme Glisse * huge or transparent huge. At this point either it is a valid pmd 677d08faca0SJérôme Glisse * entry pointing to pte directory or it is a bad pmd that will not 678d08faca0SJérôme Glisse * recover. 679d08faca0SJérôme Glisse */ 680d08faca0SJérôme Glisse if (pmd_bad(pmd)) 681da4c3c73SJérôme Glisse return hmm_pfns_bad(start, end, walk); 682da4c3c73SJérôme Glisse 683da4c3c73SJérôme Glisse ptep = pte_offset_map(pmdp, addr); 684d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 685da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { 68653f5c3f4SJérôme Glisse int r; 687da4c3c73SJérôme Glisse 68853f5c3f4SJérôme Glisse r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]); 68953f5c3f4SJérôme Glisse if (r) { 69053f5c3f4SJérôme Glisse /* hmm_vma_handle_pte() did unmap pte directory */ 69174eee180SJérôme Glisse hmm_vma_walk->last = addr; 69253f5c3f4SJérôme Glisse return r; 69374eee180SJérôme Glisse } 694da4c3c73SJérôme Glisse } 695992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 696992de9a8SJérôme Glisse /* 697992de9a8SJérôme Glisse * We do put_dev_pagemap() here and not in hmm_vma_handle_pte() 698992de9a8SJérôme Glisse * so that we can leverage get_dev_pagemap() optimization which 699992de9a8SJérôme Glisse * will not re-take a reference on a pgmap if we already have 700992de9a8SJérôme Glisse * one. 701992de9a8SJérôme Glisse */ 702992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 703992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 704992de9a8SJérôme Glisse } 705da4c3c73SJérôme Glisse pte_unmap(ptep - 1); 706da4c3c73SJérôme Glisse 70753f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 708da4c3c73SJérôme Glisse return 0; 709da4c3c73SJérôme Glisse } 710da4c3c73SJérôme Glisse 711992de9a8SJérôme Glisse static int hmm_vma_walk_pud(pud_t *pudp, 712992de9a8SJérôme Glisse unsigned long start, 713992de9a8SJérôme Glisse unsigned long end, 714992de9a8SJérôme Glisse struct mm_walk *walk) 715992de9a8SJérôme Glisse { 716992de9a8SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 717992de9a8SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 718992de9a8SJérôme Glisse unsigned long addr = start, next; 719992de9a8SJérôme Glisse pmd_t *pmdp; 720992de9a8SJérôme Glisse pud_t pud; 721992de9a8SJérôme Glisse int ret; 722992de9a8SJérôme Glisse 723992de9a8SJérôme Glisse again: 724992de9a8SJérôme Glisse pud = READ_ONCE(*pudp); 725992de9a8SJérôme Glisse if (pud_none(pud)) 726992de9a8SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 727992de9a8SJérôme Glisse 728992de9a8SJérôme Glisse if (pud_huge(pud) && pud_devmap(pud)) { 729992de9a8SJérôme Glisse unsigned long i, npages, pfn; 730992de9a8SJérôme Glisse uint64_t *pfns, cpu_flags; 731992de9a8SJérôme Glisse bool fault, write_fault; 732992de9a8SJérôme Glisse 733992de9a8SJérôme Glisse if (!pud_present(pud)) 734992de9a8SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 735992de9a8SJérôme Glisse 736992de9a8SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 737992de9a8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 738992de9a8SJérôme Glisse pfns = &range->pfns[i]; 739992de9a8SJérôme Glisse 740992de9a8SJérôme Glisse cpu_flags = pud_to_hmm_pfn_flags(range, pud); 741992de9a8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 742992de9a8SJérôme Glisse cpu_flags, &fault, &write_fault); 743992de9a8SJérôme Glisse if (fault || write_fault) 744992de9a8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, 745992de9a8SJérôme Glisse write_fault, walk); 746992de9a8SJérôme Glisse 747992de9a8SJérôme Glisse pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 748992de9a8SJérôme Glisse for (i = 0; i < npages; ++i, ++pfn) { 749992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pfn, 750992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 751992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 752992de9a8SJérôme Glisse return -EBUSY; 753391aab11SJérôme Glisse pfns[i] = hmm_device_entry_from_pfn(range, pfn) | 754391aab11SJérôme Glisse cpu_flags; 755992de9a8SJérôme Glisse } 756992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 757992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 758992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 759992de9a8SJérôme Glisse } 760992de9a8SJérôme Glisse hmm_vma_walk->last = end; 761992de9a8SJérôme Glisse return 0; 762992de9a8SJérôme Glisse } 763992de9a8SJérôme Glisse 764992de9a8SJérôme Glisse split_huge_pud(walk->vma, pudp, addr); 765992de9a8SJérôme Glisse if (pud_none(*pudp)) 766992de9a8SJérôme Glisse goto again; 767992de9a8SJérôme Glisse 768992de9a8SJérôme Glisse pmdp = pmd_offset(pudp, addr); 769992de9a8SJérôme Glisse do { 770992de9a8SJérôme Glisse next = pmd_addr_end(addr, end); 771992de9a8SJérôme Glisse ret = hmm_vma_walk_pmd(pmdp, addr, next, walk); 772992de9a8SJérôme Glisse if (ret) 773992de9a8SJérôme Glisse return ret; 774992de9a8SJérôme Glisse } while (pmdp++, addr = next, addr != end); 775992de9a8SJérôme Glisse 776992de9a8SJérôme Glisse return 0; 777992de9a8SJérôme Glisse } 778992de9a8SJérôme Glisse 77963d5066fSJérôme Glisse static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, 78063d5066fSJérôme Glisse unsigned long start, unsigned long end, 78163d5066fSJérôme Glisse struct mm_walk *walk) 78263d5066fSJérôme Glisse { 78363d5066fSJérôme Glisse #ifdef CONFIG_HUGETLB_PAGE 78463d5066fSJérôme Glisse unsigned long addr = start, i, pfn, mask, size, pfn_inc; 78563d5066fSJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 78663d5066fSJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 78763d5066fSJérôme Glisse struct vm_area_struct *vma = walk->vma; 78863d5066fSJérôme Glisse struct hstate *h = hstate_vma(vma); 78963d5066fSJérôme Glisse uint64_t orig_pfn, cpu_flags; 79063d5066fSJérôme Glisse bool fault, write_fault; 79163d5066fSJérôme Glisse spinlock_t *ptl; 79263d5066fSJérôme Glisse pte_t entry; 79363d5066fSJérôme Glisse int ret = 0; 79463d5066fSJérôme Glisse 79563d5066fSJérôme Glisse size = 1UL << huge_page_shift(h); 79663d5066fSJérôme Glisse mask = size - 1; 79763d5066fSJérôme Glisse if (range->page_shift != PAGE_SHIFT) { 79863d5066fSJérôme Glisse /* Make sure we are looking at full page. */ 79963d5066fSJérôme Glisse if (start & mask) 80063d5066fSJérôme Glisse return -EINVAL; 80163d5066fSJérôme Glisse if (end < (start + size)) 80263d5066fSJérôme Glisse return -EINVAL; 80363d5066fSJérôme Glisse pfn_inc = size >> PAGE_SHIFT; 80463d5066fSJérôme Glisse } else { 80563d5066fSJérôme Glisse pfn_inc = 1; 80663d5066fSJérôme Glisse size = PAGE_SIZE; 80763d5066fSJérôme Glisse } 80863d5066fSJérôme Glisse 80963d5066fSJérôme Glisse 81063d5066fSJérôme Glisse ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 81163d5066fSJérôme Glisse entry = huge_ptep_get(pte); 81263d5066fSJérôme Glisse 81363d5066fSJérôme Glisse i = (start - range->start) >> range->page_shift; 81463d5066fSJérôme Glisse orig_pfn = range->pfns[i]; 81563d5066fSJérôme Glisse range->pfns[i] = range->values[HMM_PFN_NONE]; 81663d5066fSJérôme Glisse cpu_flags = pte_to_hmm_pfn_flags(range, entry); 81763d5066fSJérôme Glisse fault = write_fault = false; 81863d5066fSJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 81963d5066fSJérôme Glisse &fault, &write_fault); 82063d5066fSJérôme Glisse if (fault || write_fault) { 82163d5066fSJérôme Glisse ret = -ENOENT; 82263d5066fSJérôme Glisse goto unlock; 82363d5066fSJérôme Glisse } 82463d5066fSJérôme Glisse 82563d5066fSJérôme Glisse pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift); 82663d5066fSJérôme Glisse for (; addr < end; addr += size, i++, pfn += pfn_inc) 827391aab11SJérôme Glisse range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) | 828391aab11SJérôme Glisse cpu_flags; 82963d5066fSJérôme Glisse hmm_vma_walk->last = end; 83063d5066fSJérôme Glisse 83163d5066fSJérôme Glisse unlock: 83263d5066fSJérôme Glisse spin_unlock(ptl); 83363d5066fSJérôme Glisse 83463d5066fSJérôme Glisse if (ret == -ENOENT) 83563d5066fSJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 83663d5066fSJérôme Glisse 83763d5066fSJérôme Glisse return ret; 83863d5066fSJérôme Glisse #else /* CONFIG_HUGETLB_PAGE */ 83963d5066fSJérôme Glisse return -EINVAL; 84063d5066fSJérôme Glisse #endif 84163d5066fSJérôme Glisse } 84263d5066fSJérôme Glisse 843f88a1e90SJérôme Glisse static void hmm_pfns_clear(struct hmm_range *range, 844f88a1e90SJérôme Glisse uint64_t *pfns, 84533cd47dcSJérôme Glisse unsigned long addr, 84633cd47dcSJérôme Glisse unsigned long end) 84733cd47dcSJérôme Glisse { 84833cd47dcSJérôme Glisse for (; addr < end; addr += PAGE_SIZE, pfns++) 849f88a1e90SJérôme Glisse *pfns = range->values[HMM_PFN_NONE]; 85033cd47dcSJérôme Glisse } 85133cd47dcSJérôme Glisse 852da4c3c73SJérôme Glisse /* 853a3e0d41cSJérôme Glisse * hmm_range_register() - start tracking change to CPU page table over a range 854a3e0d41cSJérôme Glisse * @range: range 855a3e0d41cSJérôme Glisse * @mm: the mm struct for the range of virtual address 856a3e0d41cSJérôme Glisse * @start: start virtual address (inclusive) 857a3e0d41cSJérôme Glisse * @end: end virtual address (exclusive) 85863d5066fSJérôme Glisse * @page_shift: expect page shift for the range 859a3e0d41cSJérôme Glisse * Returns 0 on success, -EFAULT if the address space is no longer valid 860a3e0d41cSJérôme Glisse * 861a3e0d41cSJérôme Glisse * Track updates to the CPU page table see include/linux/hmm.h 862a3e0d41cSJérôme Glisse */ 863a3e0d41cSJérôme Glisse int hmm_range_register(struct hmm_range *range, 864e36acfe6SJason Gunthorpe struct hmm_mirror *mirror, 865a3e0d41cSJérôme Glisse unsigned long start, 86663d5066fSJérôme Glisse unsigned long end, 86763d5066fSJérôme Glisse unsigned page_shift) 868a3e0d41cSJérôme Glisse { 86963d5066fSJérôme Glisse unsigned long mask = ((1UL << page_shift) - 1UL); 870e36acfe6SJason Gunthorpe struct hmm *hmm = mirror->hmm; 87163d5066fSJérôme Glisse 872a3e0d41cSJérôme Glisse range->valid = false; 873a3e0d41cSJérôme Glisse range->hmm = NULL; 874a3e0d41cSJérôme Glisse 87563d5066fSJérôme Glisse if ((start & mask) || (end & mask)) 87663d5066fSJérôme Glisse return -EINVAL; 87763d5066fSJérôme Glisse if (start >= end) 878a3e0d41cSJérôme Glisse return -EINVAL; 879a3e0d41cSJérôme Glisse 88063d5066fSJérôme Glisse range->page_shift = page_shift; 881a3e0d41cSJérôme Glisse range->start = start; 882a3e0d41cSJérôme Glisse range->end = end; 883a3e0d41cSJérôme Glisse 88447f24598SJason Gunthorpe /* Prevent hmm_release() from running while the range is valid */ 88547f24598SJason Gunthorpe if (!mmget_not_zero(hmm->mm)) 886a3e0d41cSJérôme Glisse return -EFAULT; 887a3e0d41cSJérôme Glisse 888085ea250SRalph Campbell /* Initialize range to track CPU page table updates. */ 889085ea250SRalph Campbell mutex_lock(&hmm->lock); 890a3e0d41cSJérôme Glisse 891085ea250SRalph Campbell range->hmm = hmm; 892e36acfe6SJason Gunthorpe kref_get(&hmm->kref); 893157816f3SJason Gunthorpe list_add(&range->list, &hmm->ranges); 894a3e0d41cSJérôme Glisse 895a3e0d41cSJérôme Glisse /* 896a3e0d41cSJérôme Glisse * If there are any concurrent notifiers we have to wait for them for 897a3e0d41cSJérôme Glisse * the range to be valid (see hmm_range_wait_until_valid()). 898a3e0d41cSJérôme Glisse */ 899085ea250SRalph Campbell if (!hmm->notifiers) 900a3e0d41cSJérôme Glisse range->valid = true; 901085ea250SRalph Campbell mutex_unlock(&hmm->lock); 902a3e0d41cSJérôme Glisse 903a3e0d41cSJérôme Glisse return 0; 904a3e0d41cSJérôme Glisse } 905a3e0d41cSJérôme Glisse EXPORT_SYMBOL(hmm_range_register); 906a3e0d41cSJérôme Glisse 907a3e0d41cSJérôme Glisse /* 908a3e0d41cSJérôme Glisse * hmm_range_unregister() - stop tracking change to CPU page table over a range 909a3e0d41cSJérôme Glisse * @range: range 910a3e0d41cSJérôme Glisse * 911a3e0d41cSJérôme Glisse * Range struct is used to track updates to the CPU page table after a call to 912a3e0d41cSJérôme Glisse * hmm_range_register(). See include/linux/hmm.h for how to use it. 913a3e0d41cSJérôme Glisse */ 914a3e0d41cSJérôme Glisse void hmm_range_unregister(struct hmm_range *range) 915a3e0d41cSJérôme Glisse { 916085ea250SRalph Campbell struct hmm *hmm = range->hmm; 917085ea250SRalph Campbell 918085ea250SRalph Campbell mutex_lock(&hmm->lock); 91947f24598SJason Gunthorpe list_del_init(&range->list); 920085ea250SRalph Campbell mutex_unlock(&hmm->lock); 921a3e0d41cSJérôme Glisse 922a3e0d41cSJérôme Glisse /* Drop reference taken by hmm_range_register() */ 92347f24598SJason Gunthorpe mmput(hmm->mm); 924085ea250SRalph Campbell hmm_put(hmm); 9252dcc3eb8SJason Gunthorpe 9262dcc3eb8SJason Gunthorpe /* 9272dcc3eb8SJason Gunthorpe * The range is now invalid and the ref on the hmm is dropped, so 9282dcc3eb8SJason Gunthorpe * poison the pointer. Leave other fields in place, for the caller's 9292dcc3eb8SJason Gunthorpe * use. 9302dcc3eb8SJason Gunthorpe */ 9312dcc3eb8SJason Gunthorpe range->valid = false; 9322dcc3eb8SJason Gunthorpe memset(&range->hmm, POISON_INUSE, sizeof(range->hmm)); 933a3e0d41cSJérôme Glisse } 934a3e0d41cSJérôme Glisse EXPORT_SYMBOL(hmm_range_unregister); 935a3e0d41cSJérôme Glisse 936a3e0d41cSJérôme Glisse /* 93725f23a0cSJérôme Glisse * hmm_range_snapshot() - snapshot CPU page table for a range 93825f23a0cSJérôme Glisse * @range: range 939085ea250SRalph Campbell * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid 940a3e0d41cSJérôme Glisse * permission (for instance asking for write and range is read only), 941a3e0d41cSJérôme Glisse * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid 942a3e0d41cSJérôme Glisse * vma or it is illegal to access that range), number of valid pages 943a3e0d41cSJérôme Glisse * in range->pfns[] (from range start address). 944da4c3c73SJérôme Glisse * 945da4c3c73SJérôme Glisse * This snapshots the CPU page table for a range of virtual addresses. Snapshot 946a3e0d41cSJérôme Glisse * validity is tracked by range struct. See in include/linux/hmm.h for example 947a3e0d41cSJérôme Glisse * on how to use. 948da4c3c73SJérôme Glisse */ 94925f23a0cSJérôme Glisse long hmm_range_snapshot(struct hmm_range *range) 950da4c3c73SJérôme Glisse { 95163d5066fSJérôme Glisse const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; 952a3e0d41cSJérôme Glisse unsigned long start = range->start, end; 95374eee180SJérôme Glisse struct hmm_vma_walk hmm_vma_walk; 954a3e0d41cSJérôme Glisse struct hmm *hmm = range->hmm; 955a3e0d41cSJérôme Glisse struct vm_area_struct *vma; 956da4c3c73SJérôme Glisse struct mm_walk mm_walk; 957704f3f2cSJérôme Glisse 95847f24598SJason Gunthorpe lockdep_assert_held(&hmm->mm->mmap_sem); 959a3e0d41cSJérôme Glisse do { 960a3e0d41cSJérôme Glisse /* If range is no longer valid force retry. */ 961a3e0d41cSJérôme Glisse if (!range->valid) 962a3e0d41cSJérôme Glisse return -EAGAIN; 963a3e0d41cSJérôme Glisse 964a3e0d41cSJérôme Glisse vma = find_vma(hmm->mm, start); 96563d5066fSJérôme Glisse if (vma == NULL || (vma->vm_flags & device_vma)) 966a3e0d41cSJérôme Glisse return -EFAULT; 967a3e0d41cSJérôme Glisse 96863d5066fSJérôme Glisse if (is_vm_hugetlb_page(vma)) { 9691c2308f0SJason Gunthorpe if (huge_page_shift(hstate_vma(vma)) != 9701c2308f0SJason Gunthorpe range->page_shift && 97163d5066fSJérôme Glisse range->page_shift != PAGE_SHIFT) 97263d5066fSJérôme Glisse return -EINVAL; 97363d5066fSJérôme Glisse } else { 97463d5066fSJérôme Glisse if (range->page_shift != PAGE_SHIFT) 97563d5066fSJérôme Glisse return -EINVAL; 97663d5066fSJérôme Glisse } 97763d5066fSJérôme Glisse 97886586a41SJérôme Glisse if (!(vma->vm_flags & VM_READ)) { 97986586a41SJérôme Glisse /* 980a3e0d41cSJérôme Glisse * If vma do not allow read access, then assume that it 981a3e0d41cSJérôme Glisse * does not allow write access, either. HMM does not 982a3e0d41cSJérôme Glisse * support architecture that allow write without read. 98386586a41SJérôme Glisse */ 984a3e0d41cSJérôme Glisse hmm_pfns_clear(range, range->pfns, 985a3e0d41cSJérôme Glisse range->start, range->end); 98686586a41SJérôme Glisse return -EPERM; 98786586a41SJérôme Glisse } 98886586a41SJérôme Glisse 989a3e0d41cSJérôme Glisse range->vma = vma; 990992de9a8SJérôme Glisse hmm_vma_walk.pgmap = NULL; 991a3e0d41cSJérôme Glisse hmm_vma_walk.last = start; 99274eee180SJérôme Glisse hmm_vma_walk.fault = false; 99374eee180SJérôme Glisse hmm_vma_walk.range = range; 99474eee180SJérôme Glisse mm_walk.private = &hmm_vma_walk; 995a3e0d41cSJérôme Glisse end = min(range->end, vma->vm_end); 99674eee180SJérôme Glisse 997da4c3c73SJérôme Glisse mm_walk.vma = vma; 998da4c3c73SJérôme Glisse mm_walk.mm = vma->vm_mm; 999da4c3c73SJérôme Glisse mm_walk.pte_entry = NULL; 1000da4c3c73SJérôme Glisse mm_walk.test_walk = NULL; 1001da4c3c73SJérôme Glisse mm_walk.hugetlb_entry = NULL; 1002992de9a8SJérôme Glisse mm_walk.pud_entry = hmm_vma_walk_pud; 1003da4c3c73SJérôme Glisse mm_walk.pmd_entry = hmm_vma_walk_pmd; 1004da4c3c73SJérôme Glisse mm_walk.pte_hole = hmm_vma_walk_hole; 100563d5066fSJérôme Glisse mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry; 1006da4c3c73SJérôme Glisse 1007a3e0d41cSJérôme Glisse walk_page_range(start, end, &mm_walk); 1008a3e0d41cSJérôme Glisse start = end; 1009a3e0d41cSJérôme Glisse } while (start < range->end); 1010a3e0d41cSJérôme Glisse 101125f23a0cSJérôme Glisse return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 1012da4c3c73SJérôme Glisse } 101325f23a0cSJérôme Glisse EXPORT_SYMBOL(hmm_range_snapshot); 1014da4c3c73SJérôme Glisse 1015da4c3c73SJérôme Glisse /* 101673231612SJérôme Glisse * hmm_range_fault() - try to fault some address in a virtual address range 101708232a45SJérôme Glisse * @range: range being faulted 101874eee180SJérôme Glisse * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) 1019085ea250SRalph Campbell * Return: number of valid pages in range->pfns[] (from range start 102073231612SJérôme Glisse * address). This may be zero. If the return value is negative, 102173231612SJérôme Glisse * then one of the following values may be returned: 102273231612SJérôme Glisse * 102373231612SJérôme Glisse * -EINVAL invalid arguments or mm or virtual address are in an 102463d5066fSJérôme Glisse * invalid vma (for instance device file vma). 102573231612SJérôme Glisse * -ENOMEM: Out of memory. 102673231612SJérôme Glisse * -EPERM: Invalid permission (for instance asking for write and 102773231612SJérôme Glisse * range is read only). 102873231612SJérôme Glisse * -EAGAIN: If you need to retry and mmap_sem was drop. This can only 102973231612SJérôme Glisse * happens if block argument is false. 103073231612SJérôme Glisse * -EBUSY: If the the range is being invalidated and you should wait 103173231612SJérôme Glisse * for invalidation to finish. 103273231612SJérôme Glisse * -EFAULT: Invalid (ie either no valid vma or it is illegal to access 103373231612SJérôme Glisse * that range), number of valid pages in range->pfns[] (from 103473231612SJérôme Glisse * range start address). 103574eee180SJérôme Glisse * 103674eee180SJérôme Glisse * This is similar to a regular CPU page fault except that it will not trigger 103773231612SJérôme Glisse * any memory migration if the memory being faulted is not accessible by CPUs 103873231612SJérôme Glisse * and caller does not ask for migration. 103974eee180SJérôme Glisse * 1040ff05c0c6SJérôme Glisse * On error, for one virtual address in the range, the function will mark the 1041ff05c0c6SJérôme Glisse * corresponding HMM pfn entry with an error flag. 104274eee180SJérôme Glisse */ 104373231612SJérôme Glisse long hmm_range_fault(struct hmm_range *range, bool block) 104474eee180SJérôme Glisse { 104563d5066fSJérôme Glisse const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; 1046a3e0d41cSJérôme Glisse unsigned long start = range->start, end; 104774eee180SJérôme Glisse struct hmm_vma_walk hmm_vma_walk; 1048a3e0d41cSJérôme Glisse struct hmm *hmm = range->hmm; 1049a3e0d41cSJérôme Glisse struct vm_area_struct *vma; 105074eee180SJérôme Glisse struct mm_walk mm_walk; 105174eee180SJérôme Glisse int ret; 105274eee180SJérôme Glisse 105347f24598SJason Gunthorpe lockdep_assert_held(&hmm->mm->mmap_sem); 1054a3e0d41cSJérôme Glisse 1055a3e0d41cSJérôme Glisse do { 1056a3e0d41cSJérôme Glisse /* If range is no longer valid force retry. */ 1057a3e0d41cSJérôme Glisse if (!range->valid) { 1058a3e0d41cSJérôme Glisse up_read(&hmm->mm->mmap_sem); 1059a3e0d41cSJérôme Glisse return -EAGAIN; 1060704f3f2cSJérôme Glisse } 106174eee180SJérôme Glisse 1062a3e0d41cSJérôme Glisse vma = find_vma(hmm->mm, start); 106363d5066fSJérôme Glisse if (vma == NULL || (vma->vm_flags & device_vma)) 1064a3e0d41cSJérôme Glisse return -EFAULT; 1065a3e0d41cSJérôme Glisse 106663d5066fSJérôme Glisse if (is_vm_hugetlb_page(vma)) { 106763d5066fSJérôme Glisse if (huge_page_shift(hstate_vma(vma)) != 106863d5066fSJérôme Glisse range->page_shift && 106963d5066fSJérôme Glisse range->page_shift != PAGE_SHIFT) 107063d5066fSJérôme Glisse return -EINVAL; 107163d5066fSJérôme Glisse } else { 107263d5066fSJérôme Glisse if (range->page_shift != PAGE_SHIFT) 107363d5066fSJérôme Glisse return -EINVAL; 107463d5066fSJérôme Glisse } 107563d5066fSJérôme Glisse 107686586a41SJérôme Glisse if (!(vma->vm_flags & VM_READ)) { 107786586a41SJérôme Glisse /* 1078a3e0d41cSJérôme Glisse * If vma do not allow read access, then assume that it 1079a3e0d41cSJérôme Glisse * does not allow write access, either. HMM does not 1080a3e0d41cSJérôme Glisse * support architecture that allow write without read. 108186586a41SJérôme Glisse */ 1082a3e0d41cSJérôme Glisse hmm_pfns_clear(range, range->pfns, 1083a3e0d41cSJérôme Glisse range->start, range->end); 108486586a41SJérôme Glisse return -EPERM; 108586586a41SJérôme Glisse } 108674eee180SJérôme Glisse 1087a3e0d41cSJérôme Glisse range->vma = vma; 1088992de9a8SJérôme Glisse hmm_vma_walk.pgmap = NULL; 1089a3e0d41cSJérôme Glisse hmm_vma_walk.last = start; 109074eee180SJérôme Glisse hmm_vma_walk.fault = true; 109174eee180SJérôme Glisse hmm_vma_walk.block = block; 109274eee180SJérôme Glisse hmm_vma_walk.range = range; 109374eee180SJérôme Glisse mm_walk.private = &hmm_vma_walk; 1094a3e0d41cSJérôme Glisse end = min(range->end, vma->vm_end); 109574eee180SJérôme Glisse 109674eee180SJérôme Glisse mm_walk.vma = vma; 109774eee180SJérôme Glisse mm_walk.mm = vma->vm_mm; 109874eee180SJérôme Glisse mm_walk.pte_entry = NULL; 109974eee180SJérôme Glisse mm_walk.test_walk = NULL; 110074eee180SJérôme Glisse mm_walk.hugetlb_entry = NULL; 1101992de9a8SJérôme Glisse mm_walk.pud_entry = hmm_vma_walk_pud; 110274eee180SJérôme Glisse mm_walk.pmd_entry = hmm_vma_walk_pmd; 110374eee180SJérôme Glisse mm_walk.pte_hole = hmm_vma_walk_hole; 110463d5066fSJérôme Glisse mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry; 110574eee180SJérôme Glisse 110674eee180SJérôme Glisse do { 1107a3e0d41cSJérôme Glisse ret = walk_page_range(start, end, &mm_walk); 110874eee180SJérôme Glisse start = hmm_vma_walk.last; 1109a3e0d41cSJérôme Glisse 111073231612SJérôme Glisse /* Keep trying while the range is valid. */ 111173231612SJérôme Glisse } while (ret == -EBUSY && range->valid); 111274eee180SJérôme Glisse 111374eee180SJérôme Glisse if (ret) { 111474eee180SJérôme Glisse unsigned long i; 111574eee180SJérôme Glisse 111674eee180SJérôme Glisse i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 1117a3e0d41cSJérôme Glisse hmm_pfns_clear(range, &range->pfns[i], 1118a3e0d41cSJérôme Glisse hmm_vma_walk.last, range->end); 111973231612SJérôme Glisse return ret; 112074eee180SJérôme Glisse } 1121a3e0d41cSJérôme Glisse start = end; 1122a3e0d41cSJérôme Glisse 1123a3e0d41cSJérôme Glisse } while (start < range->end); 1124704f3f2cSJérôme Glisse 112573231612SJérôme Glisse return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 112674eee180SJérôme Glisse } 112773231612SJérôme Glisse EXPORT_SYMBOL(hmm_range_fault); 112855c0ece8SJérôme Glisse 112955c0ece8SJérôme Glisse /** 113055c0ece8SJérôme Glisse * hmm_range_dma_map() - hmm_range_fault() and dma map page all in one. 113155c0ece8SJérôme Glisse * @range: range being faulted 113255c0ece8SJérôme Glisse * @device: device against to dma map page to 113355c0ece8SJérôme Glisse * @daddrs: dma address of mapped pages 113455c0ece8SJérôme Glisse * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) 1135085ea250SRalph Campbell * Return: number of pages mapped on success, -EAGAIN if mmap_sem have been 113655c0ece8SJérôme Glisse * drop and you need to try again, some other error value otherwise 113755c0ece8SJérôme Glisse * 113855c0ece8SJérôme Glisse * Note same usage pattern as hmm_range_fault(). 113955c0ece8SJérôme Glisse */ 114055c0ece8SJérôme Glisse long hmm_range_dma_map(struct hmm_range *range, 114155c0ece8SJérôme Glisse struct device *device, 114255c0ece8SJérôme Glisse dma_addr_t *daddrs, 114355c0ece8SJérôme Glisse bool block) 114455c0ece8SJérôme Glisse { 114555c0ece8SJérôme Glisse unsigned long i, npages, mapped; 114655c0ece8SJérôme Glisse long ret; 114755c0ece8SJérôme Glisse 114855c0ece8SJérôme Glisse ret = hmm_range_fault(range, block); 114955c0ece8SJérôme Glisse if (ret <= 0) 115055c0ece8SJérôme Glisse return ret ? ret : -EBUSY; 115155c0ece8SJérôme Glisse 115255c0ece8SJérôme Glisse npages = (range->end - range->start) >> PAGE_SHIFT; 115355c0ece8SJérôme Glisse for (i = 0, mapped = 0; i < npages; ++i) { 115455c0ece8SJérôme Glisse enum dma_data_direction dir = DMA_TO_DEVICE; 115555c0ece8SJérôme Glisse struct page *page; 115655c0ece8SJérôme Glisse 115755c0ece8SJérôme Glisse /* 115855c0ece8SJérôme Glisse * FIXME need to update DMA API to provide invalid DMA address 115955c0ece8SJérôme Glisse * value instead of a function to test dma address value. This 116055c0ece8SJérôme Glisse * would remove lot of dumb code duplicated accross many arch. 116155c0ece8SJérôme Glisse * 116255c0ece8SJérôme Glisse * For now setting it to 0 here is good enough as the pfns[] 116355c0ece8SJérôme Glisse * value is what is use to check what is valid and what isn't. 116455c0ece8SJérôme Glisse */ 116555c0ece8SJérôme Glisse daddrs[i] = 0; 116655c0ece8SJérôme Glisse 1167391aab11SJérôme Glisse page = hmm_device_entry_to_page(range, range->pfns[i]); 116855c0ece8SJérôme Glisse if (page == NULL) 116955c0ece8SJérôme Glisse continue; 117055c0ece8SJérôme Glisse 117155c0ece8SJérôme Glisse /* Check if range is being invalidated */ 117255c0ece8SJérôme Glisse if (!range->valid) { 117355c0ece8SJérôme Glisse ret = -EBUSY; 117455c0ece8SJérôme Glisse goto unmap; 117555c0ece8SJérôme Glisse } 117655c0ece8SJérôme Glisse 117755c0ece8SJérôme Glisse /* If it is read and write than map bi-directional. */ 117855c0ece8SJérôme Glisse if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) 117955c0ece8SJérôme Glisse dir = DMA_BIDIRECTIONAL; 118055c0ece8SJérôme Glisse 118155c0ece8SJérôme Glisse daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir); 118255c0ece8SJérôme Glisse if (dma_mapping_error(device, daddrs[i])) { 118355c0ece8SJérôme Glisse ret = -EFAULT; 118455c0ece8SJérôme Glisse goto unmap; 118555c0ece8SJérôme Glisse } 118655c0ece8SJérôme Glisse 118755c0ece8SJérôme Glisse mapped++; 118855c0ece8SJérôme Glisse } 118955c0ece8SJérôme Glisse 119055c0ece8SJérôme Glisse return mapped; 119155c0ece8SJérôme Glisse 119255c0ece8SJérôme Glisse unmap: 119355c0ece8SJérôme Glisse for (npages = i, i = 0; (i < npages) && mapped; ++i) { 119455c0ece8SJérôme Glisse enum dma_data_direction dir = DMA_TO_DEVICE; 119555c0ece8SJérôme Glisse struct page *page; 119655c0ece8SJérôme Glisse 1197391aab11SJérôme Glisse page = hmm_device_entry_to_page(range, range->pfns[i]); 119855c0ece8SJérôme Glisse if (page == NULL) 119955c0ece8SJérôme Glisse continue; 120055c0ece8SJérôme Glisse 120155c0ece8SJérôme Glisse if (dma_mapping_error(device, daddrs[i])) 120255c0ece8SJérôme Glisse continue; 120355c0ece8SJérôme Glisse 120455c0ece8SJérôme Glisse /* If it is read and write than map bi-directional. */ 120555c0ece8SJérôme Glisse if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) 120655c0ece8SJérôme Glisse dir = DMA_BIDIRECTIONAL; 120755c0ece8SJérôme Glisse 120855c0ece8SJérôme Glisse dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir); 120955c0ece8SJérôme Glisse mapped--; 121055c0ece8SJérôme Glisse } 121155c0ece8SJérôme Glisse 121255c0ece8SJérôme Glisse return ret; 121355c0ece8SJérôme Glisse } 121455c0ece8SJérôme Glisse EXPORT_SYMBOL(hmm_range_dma_map); 121555c0ece8SJérôme Glisse 121655c0ece8SJérôme Glisse /** 121755c0ece8SJérôme Glisse * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map() 121855c0ece8SJérôme Glisse * @range: range being unmapped 121955c0ece8SJérôme Glisse * @vma: the vma against which the range (optional) 122055c0ece8SJérôme Glisse * @device: device against which dma map was done 122155c0ece8SJérôme Glisse * @daddrs: dma address of mapped pages 122255c0ece8SJérôme Glisse * @dirty: dirty page if it had the write flag set 1223085ea250SRalph Campbell * Return: number of page unmapped on success, -EINVAL otherwise 122455c0ece8SJérôme Glisse * 122555c0ece8SJérôme Glisse * Note that caller MUST abide by mmu notifier or use HMM mirror and abide 122655c0ece8SJérôme Glisse * to the sync_cpu_device_pagetables() callback so that it is safe here to 122755c0ece8SJérôme Glisse * call set_page_dirty(). Caller must also take appropriate locks to avoid 122855c0ece8SJérôme Glisse * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress. 122955c0ece8SJérôme Glisse */ 123055c0ece8SJérôme Glisse long hmm_range_dma_unmap(struct hmm_range *range, 123155c0ece8SJérôme Glisse struct vm_area_struct *vma, 123255c0ece8SJérôme Glisse struct device *device, 123355c0ece8SJérôme Glisse dma_addr_t *daddrs, 123455c0ece8SJérôme Glisse bool dirty) 123555c0ece8SJérôme Glisse { 123655c0ece8SJérôme Glisse unsigned long i, npages; 123755c0ece8SJérôme Glisse long cpages = 0; 123855c0ece8SJérôme Glisse 123955c0ece8SJérôme Glisse /* Sanity check. */ 124055c0ece8SJérôme Glisse if (range->end <= range->start) 124155c0ece8SJérôme Glisse return -EINVAL; 124255c0ece8SJérôme Glisse if (!daddrs) 124355c0ece8SJérôme Glisse return -EINVAL; 124455c0ece8SJérôme Glisse if (!range->pfns) 124555c0ece8SJérôme Glisse return -EINVAL; 124655c0ece8SJérôme Glisse 124755c0ece8SJérôme Glisse npages = (range->end - range->start) >> PAGE_SHIFT; 124855c0ece8SJérôme Glisse for (i = 0; i < npages; ++i) { 124955c0ece8SJérôme Glisse enum dma_data_direction dir = DMA_TO_DEVICE; 125055c0ece8SJérôme Glisse struct page *page; 125155c0ece8SJérôme Glisse 1252391aab11SJérôme Glisse page = hmm_device_entry_to_page(range, range->pfns[i]); 125355c0ece8SJérôme Glisse if (page == NULL) 125455c0ece8SJérôme Glisse continue; 125555c0ece8SJérôme Glisse 125655c0ece8SJérôme Glisse /* If it is read and write than map bi-directional. */ 125755c0ece8SJérôme Glisse if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) { 125855c0ece8SJérôme Glisse dir = DMA_BIDIRECTIONAL; 125955c0ece8SJérôme Glisse 126055c0ece8SJérôme Glisse /* 126155c0ece8SJérôme Glisse * See comments in function description on why it is 126255c0ece8SJérôme Glisse * safe here to call set_page_dirty() 126355c0ece8SJérôme Glisse */ 126455c0ece8SJérôme Glisse if (dirty) 126555c0ece8SJérôme Glisse set_page_dirty(page); 126655c0ece8SJérôme Glisse } 126755c0ece8SJérôme Glisse 126855c0ece8SJérôme Glisse /* Unmap and clear pfns/dma address */ 126955c0ece8SJérôme Glisse dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir); 127055c0ece8SJérôme Glisse range->pfns[i] = range->values[HMM_PFN_NONE]; 127155c0ece8SJérôme Glisse /* FIXME see comments in hmm_vma_dma_map() */ 127255c0ece8SJérôme Glisse daddrs[i] = 0; 127355c0ece8SJérôme Glisse cpages++; 127455c0ece8SJérôme Glisse } 127555c0ece8SJérôme Glisse 127655c0ece8SJérôme Glisse return cpages; 127755c0ece8SJérôme Glisse } 127855c0ece8SJérôme Glisse EXPORT_SYMBOL(hmm_range_dma_unmap); 1279c0b12405SJérôme Glisse #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ 12804ef589dcSJérôme Glisse 12814ef589dcSJérôme Glisse 1282df6ad698SJérôme Glisse #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) 12834ef589dcSJérôme Glisse struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, 12844ef589dcSJérôme Glisse unsigned long addr) 12854ef589dcSJérôme Glisse { 12864ef589dcSJérôme Glisse struct page *page; 12874ef589dcSJérôme Glisse 12884ef589dcSJérôme Glisse page = alloc_page_vma(GFP_HIGHUSER, vma, addr); 12894ef589dcSJérôme Glisse if (!page) 12904ef589dcSJérôme Glisse return NULL; 12914ef589dcSJérôme Glisse lock_page(page); 12924ef589dcSJérôme Glisse return page; 12934ef589dcSJérôme Glisse } 12944ef589dcSJérôme Glisse EXPORT_SYMBOL(hmm_vma_alloc_locked_page); 12954ef589dcSJérôme Glisse 12964ef589dcSJérôme Glisse 12974ef589dcSJérôme Glisse static void hmm_devmem_ref_release(struct percpu_ref *ref) 12984ef589dcSJérôme Glisse { 12994ef589dcSJérôme Glisse struct hmm_devmem *devmem; 13004ef589dcSJérôme Glisse 13014ef589dcSJérôme Glisse devmem = container_of(ref, struct hmm_devmem, ref); 13024ef589dcSJérôme Glisse complete(&devmem->completion); 13034ef589dcSJérôme Glisse } 13044ef589dcSJérôme Glisse 13054ef589dcSJérôme Glisse static void hmm_devmem_ref_exit(void *data) 13064ef589dcSJérôme Glisse { 13074ef589dcSJérôme Glisse struct percpu_ref *ref = data; 13084ef589dcSJérôme Glisse struct hmm_devmem *devmem; 13094ef589dcSJérôme Glisse 13104ef589dcSJérôme Glisse devmem = container_of(ref, struct hmm_devmem, ref); 1311bbecd94eSDan Williams wait_for_completion(&devmem->completion); 13124ef589dcSJérôme Glisse percpu_ref_exit(ref); 13134ef589dcSJérôme Glisse } 13144ef589dcSJérôme Glisse 1315bbecd94eSDan Williams static void hmm_devmem_ref_kill(struct percpu_ref *ref) 13164ef589dcSJérôme Glisse { 13174ef589dcSJérôme Glisse percpu_ref_kill(ref); 13184ef589dcSJérôme Glisse } 13194ef589dcSJérôme Glisse 1320b57e622eSSouptick Joarder static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma, 13214ef589dcSJérôme Glisse unsigned long addr, 13224ef589dcSJérôme Glisse const struct page *page, 13234ef589dcSJérôme Glisse unsigned int flags, 13244ef589dcSJérôme Glisse pmd_t *pmdp) 13254ef589dcSJérôme Glisse { 13264ef589dcSJérôme Glisse struct hmm_devmem *devmem = page->pgmap->data; 13274ef589dcSJérôme Glisse 13284ef589dcSJérôme Glisse return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp); 13294ef589dcSJérôme Glisse } 13304ef589dcSJérôme Glisse 13314ef589dcSJérôme Glisse static void hmm_devmem_free(struct page *page, void *data) 13324ef589dcSJérôme Glisse { 13334ef589dcSJérôme Glisse struct hmm_devmem *devmem = data; 13344ef589dcSJérôme Glisse 13352fa147bdSDan Williams page->mapping = NULL; 13362fa147bdSDan Williams 13374ef589dcSJérôme Glisse devmem->ops->free(devmem, page); 13384ef589dcSJérôme Glisse } 13394ef589dcSJérôme Glisse 13404ef589dcSJérôme Glisse /* 13414ef589dcSJérôme Glisse * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory 13424ef589dcSJérôme Glisse * 13434ef589dcSJérôme Glisse * @ops: memory event device driver callback (see struct hmm_devmem_ops) 13444ef589dcSJérôme Glisse * @device: device struct to bind the resource too 13454ef589dcSJérôme Glisse * @size: size in bytes of the device memory to add 1346085ea250SRalph Campbell * Return: pointer to new hmm_devmem struct ERR_PTR otherwise 13474ef589dcSJérôme Glisse * 13484ef589dcSJérôme Glisse * This function first finds an empty range of physical address big enough to 13494ef589dcSJérôme Glisse * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which 13504ef589dcSJérôme Glisse * in turn allocates struct pages. It does not do anything beyond that; all 13514ef589dcSJérôme Glisse * events affecting the memory will go through the various callbacks provided 13524ef589dcSJérôme Glisse * by hmm_devmem_ops struct. 13534ef589dcSJérôme Glisse * 13544ef589dcSJérôme Glisse * Device driver should call this function during device initialization and 13554ef589dcSJérôme Glisse * is then responsible of memory management. HMM only provides helpers. 13564ef589dcSJérôme Glisse */ 13574ef589dcSJérôme Glisse struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, 13584ef589dcSJérôme Glisse struct device *device, 13594ef589dcSJérôme Glisse unsigned long size) 13604ef589dcSJérôme Glisse { 13614ef589dcSJérôme Glisse struct hmm_devmem *devmem; 13624ef589dcSJérôme Glisse resource_size_t addr; 1363bbecd94eSDan Williams void *result; 13644ef589dcSJérôme Glisse int ret; 13654ef589dcSJérôme Glisse 1366e7638488SDan Williams dev_pagemap_get_ops(); 13674ef589dcSJérôme Glisse 136858ef15b7SDan Williams devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); 13694ef589dcSJérôme Glisse if (!devmem) 13704ef589dcSJérôme Glisse return ERR_PTR(-ENOMEM); 13714ef589dcSJérôme Glisse 13724ef589dcSJérôme Glisse init_completion(&devmem->completion); 13734ef589dcSJérôme Glisse devmem->pfn_first = -1UL; 13744ef589dcSJérôme Glisse devmem->pfn_last = -1UL; 13754ef589dcSJérôme Glisse devmem->resource = NULL; 13764ef589dcSJérôme Glisse devmem->device = device; 13774ef589dcSJérôme Glisse devmem->ops = ops; 13784ef589dcSJérôme Glisse 13794ef589dcSJérôme Glisse ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, 13804ef589dcSJérôme Glisse 0, GFP_KERNEL); 13814ef589dcSJérôme Glisse if (ret) 138258ef15b7SDan Williams return ERR_PTR(ret); 13834ef589dcSJérôme Glisse 138458ef15b7SDan Williams ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref); 13854ef589dcSJérôme Glisse if (ret) 138658ef15b7SDan Williams return ERR_PTR(ret); 13874ef589dcSJérôme Glisse 13884ef589dcSJérôme Glisse size = ALIGN(size, PA_SECTION_SIZE); 13894ef589dcSJérôme Glisse addr = min((unsigned long)iomem_resource.end, 13904ef589dcSJérôme Glisse (1UL << MAX_PHYSMEM_BITS) - 1); 13914ef589dcSJérôme Glisse addr = addr - size + 1UL; 13924ef589dcSJérôme Glisse 13934ef589dcSJérôme Glisse /* 13944ef589dcSJérôme Glisse * FIXME add a new helper to quickly walk resource tree and find free 13954ef589dcSJérôme Glisse * range 13964ef589dcSJérôme Glisse * 13974ef589dcSJérôme Glisse * FIXME what about ioport_resource resource ? 13984ef589dcSJérôme Glisse */ 13994ef589dcSJérôme Glisse for (; addr > size && addr >= iomem_resource.start; addr -= size) { 14004ef589dcSJérôme Glisse ret = region_intersects(addr, size, 0, IORES_DESC_NONE); 14014ef589dcSJérôme Glisse if (ret != REGION_DISJOINT) 14024ef589dcSJérôme Glisse continue; 14034ef589dcSJérôme Glisse 14044ef589dcSJérôme Glisse devmem->resource = devm_request_mem_region(device, addr, size, 14054ef589dcSJérôme Glisse dev_name(device)); 140658ef15b7SDan Williams if (!devmem->resource) 140758ef15b7SDan Williams return ERR_PTR(-ENOMEM); 14084ef589dcSJérôme Glisse break; 14094ef589dcSJérôme Glisse } 141058ef15b7SDan Williams if (!devmem->resource) 141158ef15b7SDan Williams return ERR_PTR(-ERANGE); 14124ef589dcSJérôme Glisse 14134ef589dcSJérôme Glisse devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; 14144ef589dcSJérôme Glisse devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; 14154ef589dcSJérôme Glisse devmem->pfn_last = devmem->pfn_first + 14164ef589dcSJérôme Glisse (resource_size(devmem->resource) >> PAGE_SHIFT); 1417063a7d1dSDan Williams devmem->page_fault = hmm_devmem_fault; 14184ef589dcSJérôme Glisse 1419bbecd94eSDan Williams devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; 1420bbecd94eSDan Williams devmem->pagemap.res = *devmem->resource; 1421bbecd94eSDan Williams devmem->pagemap.page_free = hmm_devmem_free; 1422bbecd94eSDan Williams devmem->pagemap.altmap_valid = false; 1423bbecd94eSDan Williams devmem->pagemap.ref = &devmem->ref; 1424bbecd94eSDan Williams devmem->pagemap.data = devmem; 1425bbecd94eSDan Williams devmem->pagemap.kill = hmm_devmem_ref_kill; 142658ef15b7SDan Williams 1427bbecd94eSDan Williams result = devm_memremap_pages(devmem->device, &devmem->pagemap); 1428bbecd94eSDan Williams if (IS_ERR(result)) 1429bbecd94eSDan Williams return result; 14304ef589dcSJérôme Glisse return devmem; 14314ef589dcSJérôme Glisse } 143202917e9fSDan Williams EXPORT_SYMBOL_GPL(hmm_devmem_add); 14334ef589dcSJérôme Glisse 1434d3df0a42SJérôme Glisse struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, 1435d3df0a42SJérôme Glisse struct device *device, 1436d3df0a42SJérôme Glisse struct resource *res) 1437d3df0a42SJérôme Glisse { 1438d3df0a42SJérôme Glisse struct hmm_devmem *devmem; 1439bbecd94eSDan Williams void *result; 1440d3df0a42SJérôme Glisse int ret; 1441d3df0a42SJérôme Glisse 1442d3df0a42SJérôme Glisse if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY) 1443d3df0a42SJérôme Glisse return ERR_PTR(-EINVAL); 1444d3df0a42SJérôme Glisse 1445e7638488SDan Williams dev_pagemap_get_ops(); 1446d3df0a42SJérôme Glisse 144758ef15b7SDan Williams devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); 1448d3df0a42SJérôme Glisse if (!devmem) 1449d3df0a42SJérôme Glisse return ERR_PTR(-ENOMEM); 1450d3df0a42SJérôme Glisse 1451d3df0a42SJérôme Glisse init_completion(&devmem->completion); 1452d3df0a42SJérôme Glisse devmem->pfn_first = -1UL; 1453d3df0a42SJérôme Glisse devmem->pfn_last = -1UL; 1454d3df0a42SJérôme Glisse devmem->resource = res; 1455d3df0a42SJérôme Glisse devmem->device = device; 1456d3df0a42SJérôme Glisse devmem->ops = ops; 1457d3df0a42SJérôme Glisse 1458d3df0a42SJérôme Glisse ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, 1459d3df0a42SJérôme Glisse 0, GFP_KERNEL); 1460d3df0a42SJérôme Glisse if (ret) 146158ef15b7SDan Williams return ERR_PTR(ret); 1462d3df0a42SJérôme Glisse 146358ef15b7SDan Williams ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, 146458ef15b7SDan Williams &devmem->ref); 1465d3df0a42SJérôme Glisse if (ret) 146658ef15b7SDan Williams return ERR_PTR(ret); 1467d3df0a42SJérôme Glisse 1468d3df0a42SJérôme Glisse devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; 1469d3df0a42SJérôme Glisse devmem->pfn_last = devmem->pfn_first + 1470d3df0a42SJérôme Glisse (resource_size(devmem->resource) >> PAGE_SHIFT); 1471063a7d1dSDan Williams devmem->page_fault = hmm_devmem_fault; 1472d3df0a42SJérôme Glisse 1473bbecd94eSDan Williams devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; 1474bbecd94eSDan Williams devmem->pagemap.res = *devmem->resource; 1475bbecd94eSDan Williams devmem->pagemap.page_free = hmm_devmem_free; 1476bbecd94eSDan Williams devmem->pagemap.altmap_valid = false; 1477bbecd94eSDan Williams devmem->pagemap.ref = &devmem->ref; 1478bbecd94eSDan Williams devmem->pagemap.data = devmem; 1479bbecd94eSDan Williams devmem->pagemap.kill = hmm_devmem_ref_kill; 148058ef15b7SDan Williams 1481bbecd94eSDan Williams result = devm_memremap_pages(devmem->device, &devmem->pagemap); 1482bbecd94eSDan Williams if (IS_ERR(result)) 1483bbecd94eSDan Williams return result; 1484d3df0a42SJérôme Glisse return devmem; 1485d3df0a42SJérôme Glisse } 148602917e9fSDan Williams EXPORT_SYMBOL_GPL(hmm_devmem_add_resource); 1487d3df0a42SJérôme Glisse 14884ef589dcSJérôme Glisse /* 1489858b54daSJérôme Glisse * A device driver that wants to handle multiple devices memory through a 1490858b54daSJérôme Glisse * single fake device can use hmm_device to do so. This is purely a helper 1491858b54daSJérôme Glisse * and it is not needed to make use of any HMM functionality. 1492858b54daSJérôme Glisse */ 1493858b54daSJérôme Glisse #define HMM_DEVICE_MAX 256 1494858b54daSJérôme Glisse 1495858b54daSJérôme Glisse static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX); 1496858b54daSJérôme Glisse static DEFINE_SPINLOCK(hmm_device_lock); 1497858b54daSJérôme Glisse static struct class *hmm_device_class; 1498858b54daSJérôme Glisse static dev_t hmm_device_devt; 1499858b54daSJérôme Glisse 1500858b54daSJérôme Glisse static void hmm_device_release(struct device *device) 1501858b54daSJérôme Glisse { 1502858b54daSJérôme Glisse struct hmm_device *hmm_device; 1503858b54daSJérôme Glisse 1504858b54daSJérôme Glisse hmm_device = container_of(device, struct hmm_device, device); 1505858b54daSJérôme Glisse spin_lock(&hmm_device_lock); 1506858b54daSJérôme Glisse clear_bit(hmm_device->minor, hmm_device_mask); 1507858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1508858b54daSJérôme Glisse 1509858b54daSJérôme Glisse kfree(hmm_device); 1510858b54daSJérôme Glisse } 1511858b54daSJérôme Glisse 1512858b54daSJérôme Glisse struct hmm_device *hmm_device_new(void *drvdata) 1513858b54daSJérôme Glisse { 1514858b54daSJérôme Glisse struct hmm_device *hmm_device; 1515858b54daSJérôme Glisse 1516858b54daSJérôme Glisse hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL); 1517858b54daSJérôme Glisse if (!hmm_device) 1518858b54daSJérôme Glisse return ERR_PTR(-ENOMEM); 1519858b54daSJérôme Glisse 1520858b54daSJérôme Glisse spin_lock(&hmm_device_lock); 1521858b54daSJérôme Glisse hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX); 1522858b54daSJérôme Glisse if (hmm_device->minor >= HMM_DEVICE_MAX) { 1523858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1524858b54daSJérôme Glisse kfree(hmm_device); 1525858b54daSJérôme Glisse return ERR_PTR(-EBUSY); 1526858b54daSJérôme Glisse } 1527858b54daSJérôme Glisse set_bit(hmm_device->minor, hmm_device_mask); 1528858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1529858b54daSJérôme Glisse 1530858b54daSJérôme Glisse dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor); 1531858b54daSJérôme Glisse hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt), 1532858b54daSJérôme Glisse hmm_device->minor); 1533858b54daSJérôme Glisse hmm_device->device.release = hmm_device_release; 1534858b54daSJérôme Glisse dev_set_drvdata(&hmm_device->device, drvdata); 1535858b54daSJérôme Glisse hmm_device->device.class = hmm_device_class; 1536858b54daSJérôme Glisse device_initialize(&hmm_device->device); 1537858b54daSJérôme Glisse 1538858b54daSJérôme Glisse return hmm_device; 1539858b54daSJérôme Glisse } 1540858b54daSJérôme Glisse EXPORT_SYMBOL(hmm_device_new); 1541858b54daSJérôme Glisse 1542858b54daSJérôme Glisse void hmm_device_put(struct hmm_device *hmm_device) 1543858b54daSJérôme Glisse { 1544858b54daSJérôme Glisse put_device(&hmm_device->device); 1545858b54daSJérôme Glisse } 1546858b54daSJérôme Glisse EXPORT_SYMBOL(hmm_device_put); 1547858b54daSJérôme Glisse 1548858b54daSJérôme Glisse static int __init hmm_init(void) 1549858b54daSJérôme Glisse { 1550858b54daSJérôme Glisse int ret; 1551858b54daSJérôme Glisse 1552858b54daSJérôme Glisse ret = alloc_chrdev_region(&hmm_device_devt, 0, 1553858b54daSJérôme Glisse HMM_DEVICE_MAX, 1554858b54daSJérôme Glisse "hmm_device"); 1555858b54daSJérôme Glisse if (ret) 1556858b54daSJérôme Glisse return ret; 1557858b54daSJérôme Glisse 1558858b54daSJérôme Glisse hmm_device_class = class_create(THIS_MODULE, "hmm_device"); 1559858b54daSJérôme Glisse if (IS_ERR(hmm_device_class)) { 1560858b54daSJérôme Glisse unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX); 1561858b54daSJérôme Glisse return PTR_ERR(hmm_device_class); 1562858b54daSJérôme Glisse } 1563858b54daSJérôme Glisse return 0; 1564858b54daSJérôme Glisse } 1565858b54daSJérôme Glisse 1566858b54daSJérôme Glisse device_initcall(hmm_init); 1567df6ad698SJérôme Glisse #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ 1568