1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2133ff0eaSJérôme Glisse /* 3133ff0eaSJérôme Glisse * Copyright 2013 Red Hat Inc. 4133ff0eaSJérôme Glisse * 5f813f219SJérôme Glisse * Authors: Jérôme Glisse <jglisse@redhat.com> 6133ff0eaSJérôme Glisse */ 7133ff0eaSJérôme Glisse /* 8133ff0eaSJérôme Glisse * Refer to include/linux/hmm.h for information about heterogeneous memory 9133ff0eaSJérôme Glisse * management or HMM for short. 10133ff0eaSJérôme Glisse */ 11133ff0eaSJérôme Glisse #include <linux/mm.h> 12133ff0eaSJérôme Glisse #include <linux/hmm.h> 13858b54daSJérôme Glisse #include <linux/init.h> 14da4c3c73SJérôme Glisse #include <linux/rmap.h> 15da4c3c73SJérôme Glisse #include <linux/swap.h> 16133ff0eaSJérôme Glisse #include <linux/slab.h> 17133ff0eaSJérôme Glisse #include <linux/sched.h> 184ef589dcSJérôme Glisse #include <linux/mmzone.h> 194ef589dcSJérôme Glisse #include <linux/pagemap.h> 20da4c3c73SJérôme Glisse #include <linux/swapops.h> 21da4c3c73SJérôme Glisse #include <linux/hugetlb.h> 224ef589dcSJérôme Glisse #include <linux/memremap.h> 23c8a53b2dSJason Gunthorpe #include <linux/sched/mm.h> 247b2d55d2SJérôme Glisse #include <linux/jump_label.h> 2555c0ece8SJérôme Glisse #include <linux/dma-mapping.h> 26c0b12405SJérôme Glisse #include <linux/mmu_notifier.h> 274ef589dcSJérôme Glisse #include <linux/memory_hotplug.h> 284ef589dcSJérôme Glisse 294ef589dcSJérôme Glisse #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT) 30133ff0eaSJérôme Glisse 316b368cd4SJérôme Glisse #if IS_ENABLED(CONFIG_HMM_MIRROR) 32c0b12405SJérôme Glisse static const struct mmu_notifier_ops hmm_mmu_notifier_ops; 33c0b12405SJérôme Glisse 34704f3f2cSJérôme Glisse /** 35704f3f2cSJérôme Glisse * hmm_get_or_create - register HMM against an mm (HMM internal) 36704f3f2cSJérôme Glisse * 37704f3f2cSJérôme Glisse * @mm: mm struct to attach to 38704f3f2cSJérôme Glisse * Returns: returns an HMM object, either by referencing the existing 39704f3f2cSJérôme Glisse * (per-process) object, or by creating a new one. 40704f3f2cSJérôme Glisse * 41704f3f2cSJérôme Glisse * This is not intended to be used directly by device drivers. If mm already 42704f3f2cSJérôme Glisse * has an HMM struct then it get a reference on it and returns it. Otherwise 43704f3f2cSJérôme Glisse * it allocates an HMM struct, initializes it, associate it with the mm and 44704f3f2cSJérôme Glisse * returns it. 45704f3f2cSJérôme Glisse */ 46704f3f2cSJérôme Glisse static struct hmm *hmm_get_or_create(struct mm_struct *mm) 47704f3f2cSJérôme Glisse { 488a9320b7SJason Gunthorpe struct hmm *hmm; 49133ff0eaSJérôme Glisse 508a9320b7SJason Gunthorpe lockdep_assert_held_exclusive(&mm->mmap_sem); 518a9320b7SJason Gunthorpe 528a9320b7SJason Gunthorpe /* Abuse the page_table_lock to also protect mm->hmm. */ 538a9320b7SJason Gunthorpe spin_lock(&mm->page_table_lock); 548a9320b7SJason Gunthorpe hmm = mm->hmm; 558a9320b7SJason Gunthorpe if (mm->hmm && kref_get_unless_zero(&mm->hmm->kref)) 568a9320b7SJason Gunthorpe goto out_unlock; 578a9320b7SJason Gunthorpe spin_unlock(&mm->page_table_lock); 58c0b12405SJérôme Glisse 59c0b12405SJérôme Glisse hmm = kmalloc(sizeof(*hmm), GFP_KERNEL); 60c0b12405SJérôme Glisse if (!hmm) 61c0b12405SJérôme Glisse return NULL; 62a3e0d41cSJérôme Glisse init_waitqueue_head(&hmm->wq); 63c0b12405SJérôme Glisse INIT_LIST_HEAD(&hmm->mirrors); 64c0b12405SJérôme Glisse init_rwsem(&hmm->mirrors_sem); 65c0b12405SJérôme Glisse hmm->mmu_notifier.ops = NULL; 66da4c3c73SJérôme Glisse INIT_LIST_HEAD(&hmm->ranges); 67a3e0d41cSJérôme Glisse mutex_init(&hmm->lock); 68704f3f2cSJérôme Glisse kref_init(&hmm->kref); 69a3e0d41cSJérôme Glisse hmm->notifiers = 0; 70c0b12405SJérôme Glisse hmm->mm = mm; 71c0b12405SJérôme Glisse 7286a2d598SRalph Campbell hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops; 738a9320b7SJason Gunthorpe if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) { 7486a2d598SRalph Campbell kfree(hmm); 7586a2d598SRalph Campbell return NULL; 76133ff0eaSJérôme Glisse } 77133ff0eaSJérôme Glisse 788a9320b7SJason Gunthorpe mmgrab(hmm->mm); 798a9320b7SJason Gunthorpe 808a9320b7SJason Gunthorpe /* 818a9320b7SJason Gunthorpe * We hold the exclusive mmap_sem here so we know that mm->hmm is 828a9320b7SJason Gunthorpe * still NULL or 0 kref, and is safe to update. 838a9320b7SJason Gunthorpe */ 848a9320b7SJason Gunthorpe spin_lock(&mm->page_table_lock); 858a9320b7SJason Gunthorpe mm->hmm = hmm; 868a9320b7SJason Gunthorpe 878a9320b7SJason Gunthorpe out_unlock: 888a9320b7SJason Gunthorpe spin_unlock(&mm->page_table_lock); 898a9320b7SJason Gunthorpe return hmm; 908a9320b7SJason Gunthorpe } 918a9320b7SJason Gunthorpe 926d7c3cdeSJason Gunthorpe static void hmm_free_rcu(struct rcu_head *rcu) 936d7c3cdeSJason Gunthorpe { 948a9320b7SJason Gunthorpe struct hmm *hmm = container_of(rcu, struct hmm, rcu); 958a9320b7SJason Gunthorpe 968a9320b7SJason Gunthorpe mmdrop(hmm->mm); 978a9320b7SJason Gunthorpe kfree(hmm); 986d7c3cdeSJason Gunthorpe } 996d7c3cdeSJason Gunthorpe 100704f3f2cSJérôme Glisse static void hmm_free(struct kref *kref) 101704f3f2cSJérôme Glisse { 102704f3f2cSJérôme Glisse struct hmm *hmm = container_of(kref, struct hmm, kref); 103704f3f2cSJérôme Glisse 1048a9320b7SJason Gunthorpe spin_lock(&hmm->mm->page_table_lock); 1058a9320b7SJason Gunthorpe if (hmm->mm->hmm == hmm) 1068a9320b7SJason Gunthorpe hmm->mm->hmm = NULL; 1078a9320b7SJason Gunthorpe spin_unlock(&hmm->mm->page_table_lock); 108704f3f2cSJérôme Glisse 1098a9320b7SJason Gunthorpe mmu_notifier_unregister_no_release(&hmm->mmu_notifier, hmm->mm); 1106d7c3cdeSJason Gunthorpe mmu_notifier_call_srcu(&hmm->rcu, hmm_free_rcu); 111704f3f2cSJérôme Glisse } 112704f3f2cSJérôme Glisse 113704f3f2cSJérôme Glisse static inline void hmm_put(struct hmm *hmm) 114704f3f2cSJérôme Glisse { 115704f3f2cSJérôme Glisse kref_put(&hmm->kref, hmm_free); 116704f3f2cSJérôme Glisse } 117704f3f2cSJérôme Glisse 118a3e0d41cSJérôme Glisse static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) 119c0b12405SJérôme Glisse { 1206d7c3cdeSJason Gunthorpe struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 121c0b12405SJérôme Glisse struct hmm_mirror *mirror; 122da4c3c73SJérôme Glisse 1236d7c3cdeSJason Gunthorpe /* Bail out if hmm is in the process of being freed */ 1246d7c3cdeSJason Gunthorpe if (!kref_get_unless_zero(&hmm->kref)) 1256d7c3cdeSJason Gunthorpe return; 1266d7c3cdeSJason Gunthorpe 12747f24598SJason Gunthorpe /* 12847f24598SJason Gunthorpe * Since hmm_range_register() holds the mmget() lock hmm_release() is 12947f24598SJason Gunthorpe * prevented as long as a range exists. 13047f24598SJason Gunthorpe */ 13147f24598SJason Gunthorpe WARN_ON(!list_empty_careful(&hmm->ranges)); 132e1401513SRalph Campbell 133e1401513SRalph Campbell down_write(&hmm->mirrors_sem); 134e1401513SRalph Campbell mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror, 135e1401513SRalph Campbell list); 136e1401513SRalph Campbell while (mirror) { 137e1401513SRalph Campbell list_del_init(&mirror->list); 138e1401513SRalph Campbell if (mirror->ops->release) { 139e1401513SRalph Campbell /* 140085ea250SRalph Campbell * Drop mirrors_sem so the release callback can wait 141085ea250SRalph Campbell * on any pending work that might itself trigger a 142085ea250SRalph Campbell * mmu_notifier callback and thus would deadlock with 143085ea250SRalph Campbell * us. 144e1401513SRalph Campbell */ 145e1401513SRalph Campbell up_write(&hmm->mirrors_sem); 146e1401513SRalph Campbell mirror->ops->release(mirror); 147e1401513SRalph Campbell down_write(&hmm->mirrors_sem); 148e1401513SRalph Campbell } 149e1401513SRalph Campbell mirror = list_first_entry_or_null(&hmm->mirrors, 150e1401513SRalph Campbell struct hmm_mirror, list); 151e1401513SRalph Campbell } 152e1401513SRalph Campbell up_write(&hmm->mirrors_sem); 153704f3f2cSJérôme Glisse 154704f3f2cSJérôme Glisse hmm_put(hmm); 155e1401513SRalph Campbell } 156e1401513SRalph Campbell 15793065ac7SMichal Hocko static int hmm_invalidate_range_start(struct mmu_notifier *mn, 158a3e0d41cSJérôme Glisse const struct mmu_notifier_range *nrange) 159c0b12405SJérôme Glisse { 1606d7c3cdeSJason Gunthorpe struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 161a3e0d41cSJérôme Glisse struct hmm_mirror *mirror; 162ec131b2dSJérôme Glisse struct hmm_update update; 163a3e0d41cSJérôme Glisse struct hmm_range *range; 164a3e0d41cSJérôme Glisse int ret = 0; 165c0b12405SJérôme Glisse 1666d7c3cdeSJason Gunthorpe if (!kref_get_unless_zero(&hmm->kref)) 1676d7c3cdeSJason Gunthorpe return 0; 168c0b12405SJérôme Glisse 169a3e0d41cSJérôme Glisse update.start = nrange->start; 170a3e0d41cSJérôme Glisse update.end = nrange->end; 171ec131b2dSJérôme Glisse update.event = HMM_UPDATE_INVALIDATE; 172dfcd6660SJérôme Glisse update.blockable = mmu_notifier_range_blockable(nrange); 173a3e0d41cSJérôme Glisse 174dfcd6660SJérôme Glisse if (mmu_notifier_range_blockable(nrange)) 175a3e0d41cSJérôme Glisse mutex_lock(&hmm->lock); 176a3e0d41cSJérôme Glisse else if (!mutex_trylock(&hmm->lock)) { 177a3e0d41cSJérôme Glisse ret = -EAGAIN; 178a3e0d41cSJérôme Glisse goto out; 179a3e0d41cSJérôme Glisse } 180a3e0d41cSJérôme Glisse hmm->notifiers++; 181a3e0d41cSJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 182a3e0d41cSJérôme Glisse if (update.end < range->start || update.start >= range->end) 183a3e0d41cSJérôme Glisse continue; 184a3e0d41cSJérôme Glisse 185a3e0d41cSJérôme Glisse range->valid = false; 186a3e0d41cSJérôme Glisse } 187a3e0d41cSJérôme Glisse mutex_unlock(&hmm->lock); 188a3e0d41cSJérôme Glisse 189dfcd6660SJérôme Glisse if (mmu_notifier_range_blockable(nrange)) 190a3e0d41cSJérôme Glisse down_read(&hmm->mirrors_sem); 191a3e0d41cSJérôme Glisse else if (!down_read_trylock(&hmm->mirrors_sem)) { 192a3e0d41cSJérôme Glisse ret = -EAGAIN; 193a3e0d41cSJérôme Glisse goto out; 194a3e0d41cSJérôme Glisse } 195a3e0d41cSJérôme Glisse list_for_each_entry(mirror, &hmm->mirrors, list) { 196a3e0d41cSJérôme Glisse int ret; 197a3e0d41cSJérôme Glisse 198a3e0d41cSJérôme Glisse ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update); 199085ea250SRalph Campbell if (!update.blockable && ret == -EAGAIN) 200085ea250SRalph Campbell break; 201a3e0d41cSJérôme Glisse } 202a3e0d41cSJérôme Glisse up_read(&hmm->mirrors_sem); 203a3e0d41cSJérôme Glisse 204a3e0d41cSJérôme Glisse out: 205704f3f2cSJérôme Glisse hmm_put(hmm); 206704f3f2cSJérôme Glisse return ret; 207c0b12405SJérôme Glisse } 208c0b12405SJérôme Glisse 209c0b12405SJérôme Glisse static void hmm_invalidate_range_end(struct mmu_notifier *mn, 210a3e0d41cSJérôme Glisse const struct mmu_notifier_range *nrange) 211c0b12405SJérôme Glisse { 2126d7c3cdeSJason Gunthorpe struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); 213c0b12405SJérôme Glisse 2146d7c3cdeSJason Gunthorpe if (!kref_get_unless_zero(&hmm->kref)) 2156d7c3cdeSJason Gunthorpe return; 216c0b12405SJérôme Glisse 217a3e0d41cSJérôme Glisse mutex_lock(&hmm->lock); 218a3e0d41cSJérôme Glisse hmm->notifiers--; 219a3e0d41cSJérôme Glisse if (!hmm->notifiers) { 220a3e0d41cSJérôme Glisse struct hmm_range *range; 221a3e0d41cSJérôme Glisse 222a3e0d41cSJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 223a3e0d41cSJérôme Glisse if (range->valid) 224a3e0d41cSJérôme Glisse continue; 225a3e0d41cSJérôme Glisse range->valid = true; 226a3e0d41cSJérôme Glisse } 227a3e0d41cSJérôme Glisse wake_up_all(&hmm->wq); 228a3e0d41cSJérôme Glisse } 229a3e0d41cSJérôme Glisse mutex_unlock(&hmm->lock); 230a3e0d41cSJérôme Glisse 231704f3f2cSJérôme Glisse hmm_put(hmm); 232c0b12405SJérôme Glisse } 233c0b12405SJérôme Glisse 234c0b12405SJérôme Glisse static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { 235e1401513SRalph Campbell .release = hmm_release, 236c0b12405SJérôme Glisse .invalidate_range_start = hmm_invalidate_range_start, 237c0b12405SJérôme Glisse .invalidate_range_end = hmm_invalidate_range_end, 238c0b12405SJérôme Glisse }; 239c0b12405SJérôme Glisse 240c0b12405SJérôme Glisse /* 241c0b12405SJérôme Glisse * hmm_mirror_register() - register a mirror against an mm 242c0b12405SJérôme Glisse * 243c0b12405SJérôme Glisse * @mirror: new mirror struct to register 244c0b12405SJérôme Glisse * @mm: mm to register against 245085ea250SRalph Campbell * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments 246c0b12405SJérôme Glisse * 247c0b12405SJérôme Glisse * To start mirroring a process address space, the device driver must register 248c0b12405SJérôme Glisse * an HMM mirror struct. 249c0b12405SJérôme Glisse */ 250c0b12405SJérôme Glisse int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) 251c0b12405SJérôme Glisse { 2528a1a0cd0SJason Gunthorpe lockdep_assert_held_exclusive(&mm->mmap_sem); 2538a1a0cd0SJason Gunthorpe 254c0b12405SJérôme Glisse /* Sanity check */ 255c0b12405SJérôme Glisse if (!mm || !mirror || !mirror->ops) 256c0b12405SJérôme Glisse return -EINVAL; 257c0b12405SJérôme Glisse 258704f3f2cSJérôme Glisse mirror->hmm = hmm_get_or_create(mm); 259c0b12405SJérôme Glisse if (!mirror->hmm) 260c0b12405SJérôme Glisse return -ENOMEM; 261c0b12405SJérôme Glisse 262c0b12405SJérôme Glisse down_write(&mirror->hmm->mirrors_sem); 263c0b12405SJérôme Glisse list_add(&mirror->list, &mirror->hmm->mirrors); 264c0b12405SJérôme Glisse up_write(&mirror->hmm->mirrors_sem); 265c0b12405SJérôme Glisse 266c0b12405SJérôme Glisse return 0; 267c0b12405SJérôme Glisse } 268c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_register); 269c0b12405SJérôme Glisse 270c0b12405SJérôme Glisse /* 271c0b12405SJérôme Glisse * hmm_mirror_unregister() - unregister a mirror 272c0b12405SJérôme Glisse * 273085ea250SRalph Campbell * @mirror: mirror struct to unregister 274c0b12405SJérôme Glisse * 275c0b12405SJérôme Glisse * Stop mirroring a process address space, and cleanup. 276c0b12405SJérôme Glisse */ 277c0b12405SJérôme Glisse void hmm_mirror_unregister(struct hmm_mirror *mirror) 278c0b12405SJérôme Glisse { 279*187229c2SJason Gunthorpe struct hmm *hmm = mirror->hmm; 280c01cbba2SJérôme Glisse 281c0b12405SJérôme Glisse down_write(&hmm->mirrors_sem); 282e1401513SRalph Campbell list_del_init(&mirror->list); 283c0b12405SJérôme Glisse up_write(&hmm->mirrors_sem); 284704f3f2cSJérôme Glisse hmm_put(hmm); 285c0b12405SJérôme Glisse } 286c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_unregister); 287da4c3c73SJérôme Glisse 28874eee180SJérôme Glisse struct hmm_vma_walk { 28974eee180SJérôme Glisse struct hmm_range *range; 290992de9a8SJérôme Glisse struct dev_pagemap *pgmap; 29174eee180SJérôme Glisse unsigned long last; 29274eee180SJérôme Glisse bool fault; 29374eee180SJérôme Glisse bool block; 29474eee180SJérôme Glisse }; 29574eee180SJérôme Glisse 2962aee09d8SJérôme Glisse static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, 2972aee09d8SJérôme Glisse bool write_fault, uint64_t *pfn) 29874eee180SJérôme Glisse { 2999b1ae605SKuehling, Felix unsigned int flags = FAULT_FLAG_REMOTE; 30074eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 301f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 30274eee180SJérôme Glisse struct vm_area_struct *vma = walk->vma; 30350a7ca3cSSouptick Joarder vm_fault_t ret; 30474eee180SJérôme Glisse 30574eee180SJérôme Glisse flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY; 3062aee09d8SJérôme Glisse flags |= write_fault ? FAULT_FLAG_WRITE : 0; 30750a7ca3cSSouptick Joarder ret = handle_mm_fault(vma, addr, flags); 30850a7ca3cSSouptick Joarder if (ret & VM_FAULT_RETRY) 30973231612SJérôme Glisse return -EAGAIN; 31050a7ca3cSSouptick Joarder if (ret & VM_FAULT_ERROR) { 311f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_ERROR]; 31274eee180SJérôme Glisse return -EFAULT; 31374eee180SJérôme Glisse } 31474eee180SJérôme Glisse 31573231612SJérôme Glisse return -EBUSY; 31674eee180SJérôme Glisse } 31774eee180SJérôme Glisse 318da4c3c73SJérôme Glisse static int hmm_pfns_bad(unsigned long addr, 319da4c3c73SJérôme Glisse unsigned long end, 320da4c3c73SJérôme Glisse struct mm_walk *walk) 321da4c3c73SJérôme Glisse { 322c719547fSJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 323c719547fSJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 324ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 325da4c3c73SJérôme Glisse unsigned long i; 326da4c3c73SJérôme Glisse 327da4c3c73SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 328da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, i++) 329f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_ERROR]; 330da4c3c73SJérôme Glisse 331da4c3c73SJérôme Glisse return 0; 332da4c3c73SJérôme Glisse } 333da4c3c73SJérôme Glisse 3345504ed29SJérôme Glisse /* 3355504ed29SJérôme Glisse * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s) 3365504ed29SJérôme Glisse * @start: range virtual start address (inclusive) 3375504ed29SJérôme Glisse * @end: range virtual end address (exclusive) 3382aee09d8SJérôme Glisse * @fault: should we fault or not ? 3392aee09d8SJérôme Glisse * @write_fault: write fault ? 3405504ed29SJérôme Glisse * @walk: mm_walk structure 341085ea250SRalph Campbell * Return: 0 on success, -EBUSY after page fault, or page fault error 3425504ed29SJérôme Glisse * 3435504ed29SJérôme Glisse * This function will be called whenever pmd_none() or pte_none() returns true, 3445504ed29SJérôme Glisse * or whenever there is no page directory covering the virtual address range. 3455504ed29SJérôme Glisse */ 3462aee09d8SJérôme Glisse static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, 3472aee09d8SJérôme Glisse bool fault, bool write_fault, 348da4c3c73SJérôme Glisse struct mm_walk *walk) 349da4c3c73SJérôme Glisse { 35074eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 35174eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 352ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 35363d5066fSJérôme Glisse unsigned long i, page_size; 354da4c3c73SJérôme Glisse 35574eee180SJérôme Glisse hmm_vma_walk->last = addr; 35663d5066fSJérôme Glisse page_size = hmm_range_page_size(range); 35763d5066fSJérôme Glisse i = (addr - range->start) >> range->page_shift; 35863d5066fSJérôme Glisse 35963d5066fSJérôme Glisse for (; addr < end; addr += page_size, i++) { 360f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_NONE]; 3612aee09d8SJérôme Glisse if (fault || write_fault) { 36274eee180SJérôme Glisse int ret; 363da4c3c73SJérôme Glisse 3642aee09d8SJérôme Glisse ret = hmm_vma_do_fault(walk, addr, write_fault, 3652aee09d8SJérôme Glisse &pfns[i]); 36673231612SJérôme Glisse if (ret != -EBUSY) 36774eee180SJérôme Glisse return ret; 36874eee180SJérôme Glisse } 36974eee180SJérôme Glisse } 37074eee180SJérôme Glisse 37173231612SJérôme Glisse return (fault || write_fault) ? -EBUSY : 0; 3722aee09d8SJérôme Glisse } 3732aee09d8SJérôme Glisse 3742aee09d8SJérôme Glisse static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 3752aee09d8SJérôme Glisse uint64_t pfns, uint64_t cpu_flags, 3762aee09d8SJérôme Glisse bool *fault, bool *write_fault) 3772aee09d8SJérôme Glisse { 378f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 379f88a1e90SJérôme Glisse 3802aee09d8SJérôme Glisse if (!hmm_vma_walk->fault) 3812aee09d8SJérôme Glisse return; 3822aee09d8SJérôme Glisse 383023a019aSJérôme Glisse /* 384023a019aSJérôme Glisse * So we not only consider the individual per page request we also 385023a019aSJérôme Glisse * consider the default flags requested for the range. The API can 386023a019aSJérôme Glisse * be use in 2 fashions. The first one where the HMM user coalesce 387023a019aSJérôme Glisse * multiple page fault into one request and set flags per pfns for 388023a019aSJérôme Glisse * of those faults. The second one where the HMM user want to pre- 389023a019aSJérôme Glisse * fault a range with specific flags. For the latter one it is a 390023a019aSJérôme Glisse * waste to have the user pre-fill the pfn arrays with a default 391023a019aSJérôme Glisse * flags value. 392023a019aSJérôme Glisse */ 393023a019aSJérôme Glisse pfns = (pfns & range->pfn_flags_mask) | range->default_flags; 394023a019aSJérôme Glisse 3952aee09d8SJérôme Glisse /* We aren't ask to do anything ... */ 396f88a1e90SJérôme Glisse if (!(pfns & range->flags[HMM_PFN_VALID])) 3972aee09d8SJérôme Glisse return; 398f88a1e90SJérôme Glisse /* If this is device memory than only fault if explicitly requested */ 399f88a1e90SJérôme Glisse if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { 400f88a1e90SJérôme Glisse /* Do we fault on device memory ? */ 401f88a1e90SJérôme Glisse if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { 402f88a1e90SJérôme Glisse *write_fault = pfns & range->flags[HMM_PFN_WRITE]; 403f88a1e90SJérôme Glisse *fault = true; 404f88a1e90SJérôme Glisse } 4052aee09d8SJérôme Glisse return; 4062aee09d8SJérôme Glisse } 407f88a1e90SJérôme Glisse 408f88a1e90SJérôme Glisse /* If CPU page table is not valid then we need to fault */ 409f88a1e90SJérôme Glisse *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); 410f88a1e90SJérôme Glisse /* Need to write fault ? */ 411f88a1e90SJérôme Glisse if ((pfns & range->flags[HMM_PFN_WRITE]) && 412f88a1e90SJérôme Glisse !(cpu_flags & range->flags[HMM_PFN_WRITE])) { 413f88a1e90SJérôme Glisse *write_fault = true; 4142aee09d8SJérôme Glisse *fault = true; 4152aee09d8SJérôme Glisse } 4162aee09d8SJérôme Glisse } 4172aee09d8SJérôme Glisse 4182aee09d8SJérôme Glisse static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 4192aee09d8SJérôme Glisse const uint64_t *pfns, unsigned long npages, 4202aee09d8SJérôme Glisse uint64_t cpu_flags, bool *fault, 4212aee09d8SJérôme Glisse bool *write_fault) 4222aee09d8SJérôme Glisse { 4232aee09d8SJérôme Glisse unsigned long i; 4242aee09d8SJérôme Glisse 4252aee09d8SJérôme Glisse if (!hmm_vma_walk->fault) { 4262aee09d8SJérôme Glisse *fault = *write_fault = false; 4272aee09d8SJérôme Glisse return; 4282aee09d8SJérôme Glisse } 4292aee09d8SJérôme Glisse 430a3e0d41cSJérôme Glisse *fault = *write_fault = false; 4312aee09d8SJérôme Glisse for (i = 0; i < npages; ++i) { 4322aee09d8SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags, 4332aee09d8SJérôme Glisse fault, write_fault); 434a3e0d41cSJérôme Glisse if ((*write_fault)) 4352aee09d8SJérôme Glisse return; 4362aee09d8SJérôme Glisse } 4372aee09d8SJérôme Glisse } 4382aee09d8SJérôme Glisse 4392aee09d8SJérôme Glisse static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, 4402aee09d8SJérôme Glisse struct mm_walk *walk) 4412aee09d8SJérôme Glisse { 4422aee09d8SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 4432aee09d8SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 4442aee09d8SJérôme Glisse bool fault, write_fault; 4452aee09d8SJérôme Glisse unsigned long i, npages; 4462aee09d8SJérôme Glisse uint64_t *pfns; 4472aee09d8SJérôme Glisse 4482aee09d8SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 4492aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 4502aee09d8SJérôme Glisse pfns = &range->pfns[i]; 4512aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 4522aee09d8SJérôme Glisse 0, &fault, &write_fault); 4532aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 4542aee09d8SJérôme Glisse } 4552aee09d8SJérôme Glisse 456f88a1e90SJérôme Glisse static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) 4572aee09d8SJérôme Glisse { 4582aee09d8SJérôme Glisse if (pmd_protnone(pmd)) 4592aee09d8SJérôme Glisse return 0; 460f88a1e90SJérôme Glisse return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | 461f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 462f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 463da4c3c73SJérôme Glisse } 464da4c3c73SJérôme Glisse 465992de9a8SJérôme Glisse static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud) 466992de9a8SJérôme Glisse { 467992de9a8SJérôme Glisse if (!pud_present(pud)) 468992de9a8SJérôme Glisse return 0; 469992de9a8SJérôme Glisse return pud_write(pud) ? range->flags[HMM_PFN_VALID] | 470992de9a8SJérôme Glisse range->flags[HMM_PFN_WRITE] : 471992de9a8SJérôme Glisse range->flags[HMM_PFN_VALID]; 472992de9a8SJérôme Glisse } 473992de9a8SJérôme Glisse 47453f5c3f4SJérôme Glisse static int hmm_vma_handle_pmd(struct mm_walk *walk, 47553f5c3f4SJérôme Glisse unsigned long addr, 47653f5c3f4SJérôme Glisse unsigned long end, 47753f5c3f4SJérôme Glisse uint64_t *pfns, 47853f5c3f4SJérôme Glisse pmd_t pmd) 47953f5c3f4SJérôme Glisse { 480992de9a8SJérôme Glisse #ifdef CONFIG_TRANSPARENT_HUGEPAGE 48153f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 482f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 4832aee09d8SJérôme Glisse unsigned long pfn, npages, i; 4842aee09d8SJérôme Glisse bool fault, write_fault; 485f88a1e90SJérôme Glisse uint64_t cpu_flags; 48653f5c3f4SJérôme Glisse 4872aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 488f88a1e90SJérôme Glisse cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); 4892aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, 4902aee09d8SJérôme Glisse &fault, &write_fault); 49153f5c3f4SJérôme Glisse 4922aee09d8SJérôme Glisse if (pmd_protnone(pmd) || fault || write_fault) 4932aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 49453f5c3f4SJérôme Glisse 49553f5c3f4SJérôme Glisse pfn = pmd_pfn(pmd) + pte_index(addr); 496992de9a8SJérôme Glisse for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { 497992de9a8SJérôme Glisse if (pmd_devmap(pmd)) { 498992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pfn, 499992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 500992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 501992de9a8SJérôme Glisse return -EBUSY; 502992de9a8SJérôme Glisse } 503391aab11SJérôme Glisse pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags; 504992de9a8SJérôme Glisse } 505992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 506992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 507992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 508992de9a8SJérôme Glisse } 50953f5c3f4SJérôme Glisse hmm_vma_walk->last = end; 51053f5c3f4SJérôme Glisse return 0; 511992de9a8SJérôme Glisse #else 512992de9a8SJérôme Glisse /* If THP is not enabled then we should never reach that code ! */ 513992de9a8SJérôme Glisse return -EINVAL; 514992de9a8SJérôme Glisse #endif 51553f5c3f4SJérôme Glisse } 51653f5c3f4SJérôme Glisse 517f88a1e90SJérôme Glisse static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) 5182aee09d8SJérôme Glisse { 519789c2af8SPhilip Yang if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) 5202aee09d8SJérôme Glisse return 0; 521f88a1e90SJérôme Glisse return pte_write(pte) ? range->flags[HMM_PFN_VALID] | 522f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 523f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 5242aee09d8SJérôme Glisse } 5252aee09d8SJérôme Glisse 52653f5c3f4SJérôme Glisse static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, 52753f5c3f4SJérôme Glisse unsigned long end, pmd_t *pmdp, pte_t *ptep, 52853f5c3f4SJérôme Glisse uint64_t *pfn) 52953f5c3f4SJérôme Glisse { 53053f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 531f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 53253f5c3f4SJérôme Glisse struct vm_area_struct *vma = walk->vma; 5332aee09d8SJérôme Glisse bool fault, write_fault; 5342aee09d8SJérôme Glisse uint64_t cpu_flags; 53553f5c3f4SJérôme Glisse pte_t pte = *ptep; 536f88a1e90SJérôme Glisse uint64_t orig_pfn = *pfn; 53753f5c3f4SJérôme Glisse 538f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_NONE]; 53973231612SJérôme Glisse fault = write_fault = false; 54053f5c3f4SJérôme Glisse 54153f5c3f4SJérôme Glisse if (pte_none(pte)) { 54273231612SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, 54373231612SJérôme Glisse &fault, &write_fault); 5442aee09d8SJérôme Glisse if (fault || write_fault) 54553f5c3f4SJérôme Glisse goto fault; 54653f5c3f4SJérôme Glisse return 0; 54753f5c3f4SJérôme Glisse } 54853f5c3f4SJérôme Glisse 54953f5c3f4SJérôme Glisse if (!pte_present(pte)) { 55053f5c3f4SJérôme Glisse swp_entry_t entry = pte_to_swp_entry(pte); 55153f5c3f4SJérôme Glisse 55253f5c3f4SJérôme Glisse if (!non_swap_entry(entry)) { 5532aee09d8SJérôme Glisse if (fault || write_fault) 55453f5c3f4SJérôme Glisse goto fault; 55553f5c3f4SJérôme Glisse return 0; 55653f5c3f4SJérôme Glisse } 55753f5c3f4SJérôme Glisse 55853f5c3f4SJérôme Glisse /* 55953f5c3f4SJérôme Glisse * This is a special swap entry, ignore migration, use 56053f5c3f4SJérôme Glisse * device and report anything else as error. 56153f5c3f4SJérôme Glisse */ 56253f5c3f4SJérôme Glisse if (is_device_private_entry(entry)) { 563f88a1e90SJérôme Glisse cpu_flags = range->flags[HMM_PFN_VALID] | 564f88a1e90SJérôme Glisse range->flags[HMM_PFN_DEVICE_PRIVATE]; 5652aee09d8SJérôme Glisse cpu_flags |= is_write_device_private_entry(entry) ? 566f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 0; 567f88a1e90SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 568f88a1e90SJérôme Glisse &fault, &write_fault); 569f88a1e90SJérôme Glisse if (fault || write_fault) 570f88a1e90SJérôme Glisse goto fault; 571391aab11SJérôme Glisse *pfn = hmm_device_entry_from_pfn(range, 572391aab11SJérôme Glisse swp_offset(entry)); 573f88a1e90SJérôme Glisse *pfn |= cpu_flags; 57453f5c3f4SJérôme Glisse return 0; 57553f5c3f4SJérôme Glisse } 57653f5c3f4SJérôme Glisse 57753f5c3f4SJérôme Glisse if (is_migration_entry(entry)) { 5782aee09d8SJérôme Glisse if (fault || write_fault) { 57953f5c3f4SJérôme Glisse pte_unmap(ptep); 58053f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 58153f5c3f4SJérôme Glisse migration_entry_wait(vma->vm_mm, 58253f5c3f4SJérôme Glisse pmdp, addr); 58373231612SJérôme Glisse return -EBUSY; 58453f5c3f4SJérôme Glisse } 58553f5c3f4SJérôme Glisse return 0; 58653f5c3f4SJérôme Glisse } 58753f5c3f4SJérôme Glisse 58853f5c3f4SJérôme Glisse /* Report error for everything else */ 589f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_ERROR]; 59053f5c3f4SJérôme Glisse return -EFAULT; 59173231612SJérôme Glisse } else { 59273231612SJérôme Glisse cpu_flags = pte_to_hmm_pfn_flags(range, pte); 59373231612SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 59473231612SJérôme Glisse &fault, &write_fault); 59553f5c3f4SJérôme Glisse } 59653f5c3f4SJérôme Glisse 5972aee09d8SJérôme Glisse if (fault || write_fault) 59853f5c3f4SJérôme Glisse goto fault; 59953f5c3f4SJérôme Glisse 600992de9a8SJérôme Glisse if (pte_devmap(pte)) { 601992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte), 602992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 603992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 604992de9a8SJérôme Glisse return -EBUSY; 605992de9a8SJérôme Glisse } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) { 606992de9a8SJérôme Glisse *pfn = range->values[HMM_PFN_SPECIAL]; 607992de9a8SJérôme Glisse return -EFAULT; 608992de9a8SJérôme Glisse } 609992de9a8SJérôme Glisse 610391aab11SJérôme Glisse *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags; 61153f5c3f4SJérôme Glisse return 0; 61253f5c3f4SJérôme Glisse 61353f5c3f4SJérôme Glisse fault: 614992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 615992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 616992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 617992de9a8SJérôme Glisse } 61853f5c3f4SJérôme Glisse pte_unmap(ptep); 61953f5c3f4SJérôme Glisse /* Fault any virtual address we were asked to fault */ 6202aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 62153f5c3f4SJérôme Glisse } 62253f5c3f4SJérôme Glisse 623da4c3c73SJérôme Glisse static int hmm_vma_walk_pmd(pmd_t *pmdp, 624da4c3c73SJérôme Glisse unsigned long start, 625da4c3c73SJérôme Glisse unsigned long end, 626da4c3c73SJérôme Glisse struct mm_walk *walk) 627da4c3c73SJérôme Glisse { 62874eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 62974eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 630d08faca0SJérôme Glisse struct vm_area_struct *vma = walk->vma; 631ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 632da4c3c73SJérôme Glisse unsigned long addr = start, i; 633da4c3c73SJérôme Glisse pte_t *ptep; 634da4c3c73SJérôme Glisse pmd_t pmd; 635da4c3c73SJérôme Glisse 636d08faca0SJérôme Glisse 637d08faca0SJérôme Glisse again: 638d08faca0SJérôme Glisse pmd = READ_ONCE(*pmdp); 639d08faca0SJérôme Glisse if (pmd_none(pmd)) 640d08faca0SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 641d08faca0SJérôme Glisse 642d08faca0SJérôme Glisse if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB)) 643d08faca0SJérôme Glisse return hmm_pfns_bad(start, end, walk); 644d08faca0SJérôme Glisse 645d08faca0SJérôme Glisse if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { 646d08faca0SJérôme Glisse bool fault, write_fault; 647d08faca0SJérôme Glisse unsigned long npages; 648d08faca0SJérôme Glisse uint64_t *pfns; 649d08faca0SJérôme Glisse 650d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 651d08faca0SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 652d08faca0SJérôme Glisse pfns = &range->pfns[i]; 653d08faca0SJérôme Glisse 654d08faca0SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 655d08faca0SJérôme Glisse 0, &fault, &write_fault); 656d08faca0SJérôme Glisse if (fault || write_fault) { 657d08faca0SJérôme Glisse hmm_vma_walk->last = addr; 658d08faca0SJérôme Glisse pmd_migration_entry_wait(vma->vm_mm, pmdp); 65973231612SJérôme Glisse return -EBUSY; 660d08faca0SJérôme Glisse } 661d08faca0SJérôme Glisse return 0; 662d08faca0SJérôme Glisse } else if (!pmd_present(pmd)) 663d08faca0SJérôme Glisse return hmm_pfns_bad(start, end, walk); 664d08faca0SJérôme Glisse 665d08faca0SJérôme Glisse if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { 666da4c3c73SJérôme Glisse /* 667da4c3c73SJérôme Glisse * No need to take pmd_lock here, even if some other threads 668da4c3c73SJérôme Glisse * is splitting the huge pmd we will get that event through 669da4c3c73SJérôme Glisse * mmu_notifier callback. 670da4c3c73SJérôme Glisse * 671da4c3c73SJérôme Glisse * So just read pmd value and check again its a transparent 672da4c3c73SJérôme Glisse * huge or device mapping one and compute corresponding pfn 673da4c3c73SJérôme Glisse * values. 674da4c3c73SJérôme Glisse */ 675da4c3c73SJérôme Glisse pmd = pmd_read_atomic(pmdp); 676da4c3c73SJérôme Glisse barrier(); 677da4c3c73SJérôme Glisse if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) 678da4c3c73SJérôme Glisse goto again; 679da4c3c73SJérôme Glisse 680d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 68153f5c3f4SJérôme Glisse return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd); 682da4c3c73SJérôme Glisse } 683da4c3c73SJérôme Glisse 684d08faca0SJérôme Glisse /* 685d08faca0SJérôme Glisse * We have handled all the valid case above ie either none, migration, 686d08faca0SJérôme Glisse * huge or transparent huge. At this point either it is a valid pmd 687d08faca0SJérôme Glisse * entry pointing to pte directory or it is a bad pmd that will not 688d08faca0SJérôme Glisse * recover. 689d08faca0SJérôme Glisse */ 690d08faca0SJérôme Glisse if (pmd_bad(pmd)) 691da4c3c73SJérôme Glisse return hmm_pfns_bad(start, end, walk); 692da4c3c73SJérôme Glisse 693da4c3c73SJérôme Glisse ptep = pte_offset_map(pmdp, addr); 694d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 695da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { 69653f5c3f4SJérôme Glisse int r; 697da4c3c73SJérôme Glisse 69853f5c3f4SJérôme Glisse r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]); 69953f5c3f4SJérôme Glisse if (r) { 70053f5c3f4SJérôme Glisse /* hmm_vma_handle_pte() did unmap pte directory */ 70174eee180SJérôme Glisse hmm_vma_walk->last = addr; 70253f5c3f4SJérôme Glisse return r; 70374eee180SJérôme Glisse } 704da4c3c73SJérôme Glisse } 705992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 706992de9a8SJérôme Glisse /* 707992de9a8SJérôme Glisse * We do put_dev_pagemap() here and not in hmm_vma_handle_pte() 708992de9a8SJérôme Glisse * so that we can leverage get_dev_pagemap() optimization which 709992de9a8SJérôme Glisse * will not re-take a reference on a pgmap if we already have 710992de9a8SJérôme Glisse * one. 711992de9a8SJérôme Glisse */ 712992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 713992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 714992de9a8SJérôme Glisse } 715da4c3c73SJérôme Glisse pte_unmap(ptep - 1); 716da4c3c73SJérôme Glisse 71753f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 718da4c3c73SJérôme Glisse return 0; 719da4c3c73SJérôme Glisse } 720da4c3c73SJérôme Glisse 721992de9a8SJérôme Glisse static int hmm_vma_walk_pud(pud_t *pudp, 722992de9a8SJérôme Glisse unsigned long start, 723992de9a8SJérôme Glisse unsigned long end, 724992de9a8SJérôme Glisse struct mm_walk *walk) 725992de9a8SJérôme Glisse { 726992de9a8SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 727992de9a8SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 728992de9a8SJérôme Glisse unsigned long addr = start, next; 729992de9a8SJérôme Glisse pmd_t *pmdp; 730992de9a8SJérôme Glisse pud_t pud; 731992de9a8SJérôme Glisse int ret; 732992de9a8SJérôme Glisse 733992de9a8SJérôme Glisse again: 734992de9a8SJérôme Glisse pud = READ_ONCE(*pudp); 735992de9a8SJérôme Glisse if (pud_none(pud)) 736992de9a8SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 737992de9a8SJérôme Glisse 738992de9a8SJérôme Glisse if (pud_huge(pud) && pud_devmap(pud)) { 739992de9a8SJérôme Glisse unsigned long i, npages, pfn; 740992de9a8SJérôme Glisse uint64_t *pfns, cpu_flags; 741992de9a8SJérôme Glisse bool fault, write_fault; 742992de9a8SJérôme Glisse 743992de9a8SJérôme Glisse if (!pud_present(pud)) 744992de9a8SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 745992de9a8SJérôme Glisse 746992de9a8SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 747992de9a8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 748992de9a8SJérôme Glisse pfns = &range->pfns[i]; 749992de9a8SJérôme Glisse 750992de9a8SJérôme Glisse cpu_flags = pud_to_hmm_pfn_flags(range, pud); 751992de9a8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 752992de9a8SJérôme Glisse cpu_flags, &fault, &write_fault); 753992de9a8SJérôme Glisse if (fault || write_fault) 754992de9a8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, 755992de9a8SJérôme Glisse write_fault, walk); 756992de9a8SJérôme Glisse 757992de9a8SJérôme Glisse pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 758992de9a8SJérôme Glisse for (i = 0; i < npages; ++i, ++pfn) { 759992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pfn, 760992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 761992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 762992de9a8SJérôme Glisse return -EBUSY; 763391aab11SJérôme Glisse pfns[i] = hmm_device_entry_from_pfn(range, pfn) | 764391aab11SJérôme Glisse cpu_flags; 765992de9a8SJérôme Glisse } 766992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 767992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 768992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 769992de9a8SJérôme Glisse } 770992de9a8SJérôme Glisse hmm_vma_walk->last = end; 771992de9a8SJérôme Glisse return 0; 772992de9a8SJérôme Glisse } 773992de9a8SJérôme Glisse 774992de9a8SJérôme Glisse split_huge_pud(walk->vma, pudp, addr); 775992de9a8SJérôme Glisse if (pud_none(*pudp)) 776992de9a8SJérôme Glisse goto again; 777992de9a8SJérôme Glisse 778992de9a8SJérôme Glisse pmdp = pmd_offset(pudp, addr); 779992de9a8SJérôme Glisse do { 780992de9a8SJérôme Glisse next = pmd_addr_end(addr, end); 781992de9a8SJérôme Glisse ret = hmm_vma_walk_pmd(pmdp, addr, next, walk); 782992de9a8SJérôme Glisse if (ret) 783992de9a8SJérôme Glisse return ret; 784992de9a8SJérôme Glisse } while (pmdp++, addr = next, addr != end); 785992de9a8SJérôme Glisse 786992de9a8SJérôme Glisse return 0; 787992de9a8SJérôme Glisse } 788992de9a8SJérôme Glisse 78963d5066fSJérôme Glisse static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, 79063d5066fSJérôme Glisse unsigned long start, unsigned long end, 79163d5066fSJérôme Glisse struct mm_walk *walk) 79263d5066fSJérôme Glisse { 79363d5066fSJérôme Glisse #ifdef CONFIG_HUGETLB_PAGE 79463d5066fSJérôme Glisse unsigned long addr = start, i, pfn, mask, size, pfn_inc; 79563d5066fSJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 79663d5066fSJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 79763d5066fSJérôme Glisse struct vm_area_struct *vma = walk->vma; 79863d5066fSJérôme Glisse struct hstate *h = hstate_vma(vma); 79963d5066fSJérôme Glisse uint64_t orig_pfn, cpu_flags; 80063d5066fSJérôme Glisse bool fault, write_fault; 80163d5066fSJérôme Glisse spinlock_t *ptl; 80263d5066fSJérôme Glisse pte_t entry; 80363d5066fSJérôme Glisse int ret = 0; 80463d5066fSJérôme Glisse 80563d5066fSJérôme Glisse size = 1UL << huge_page_shift(h); 80663d5066fSJérôme Glisse mask = size - 1; 80763d5066fSJérôme Glisse if (range->page_shift != PAGE_SHIFT) { 80863d5066fSJérôme Glisse /* Make sure we are looking at full page. */ 80963d5066fSJérôme Glisse if (start & mask) 81063d5066fSJérôme Glisse return -EINVAL; 81163d5066fSJérôme Glisse if (end < (start + size)) 81263d5066fSJérôme Glisse return -EINVAL; 81363d5066fSJérôme Glisse pfn_inc = size >> PAGE_SHIFT; 81463d5066fSJérôme Glisse } else { 81563d5066fSJérôme Glisse pfn_inc = 1; 81663d5066fSJérôme Glisse size = PAGE_SIZE; 81763d5066fSJérôme Glisse } 81863d5066fSJérôme Glisse 81963d5066fSJérôme Glisse 82063d5066fSJérôme Glisse ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 82163d5066fSJérôme Glisse entry = huge_ptep_get(pte); 82263d5066fSJérôme Glisse 82363d5066fSJérôme Glisse i = (start - range->start) >> range->page_shift; 82463d5066fSJérôme Glisse orig_pfn = range->pfns[i]; 82563d5066fSJérôme Glisse range->pfns[i] = range->values[HMM_PFN_NONE]; 82663d5066fSJérôme Glisse cpu_flags = pte_to_hmm_pfn_flags(range, entry); 82763d5066fSJérôme Glisse fault = write_fault = false; 82863d5066fSJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 82963d5066fSJérôme Glisse &fault, &write_fault); 83063d5066fSJérôme Glisse if (fault || write_fault) { 83163d5066fSJérôme Glisse ret = -ENOENT; 83263d5066fSJérôme Glisse goto unlock; 83363d5066fSJérôme Glisse } 83463d5066fSJérôme Glisse 83563d5066fSJérôme Glisse pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift); 83663d5066fSJérôme Glisse for (; addr < end; addr += size, i++, pfn += pfn_inc) 837391aab11SJérôme Glisse range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) | 838391aab11SJérôme Glisse cpu_flags; 83963d5066fSJérôme Glisse hmm_vma_walk->last = end; 84063d5066fSJérôme Glisse 84163d5066fSJérôme Glisse unlock: 84263d5066fSJérôme Glisse spin_unlock(ptl); 84363d5066fSJérôme Glisse 84463d5066fSJérôme Glisse if (ret == -ENOENT) 84563d5066fSJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 84663d5066fSJérôme Glisse 84763d5066fSJérôme Glisse return ret; 84863d5066fSJérôme Glisse #else /* CONFIG_HUGETLB_PAGE */ 84963d5066fSJérôme Glisse return -EINVAL; 85063d5066fSJérôme Glisse #endif 85163d5066fSJérôme Glisse } 85263d5066fSJérôme Glisse 853f88a1e90SJérôme Glisse static void hmm_pfns_clear(struct hmm_range *range, 854f88a1e90SJérôme Glisse uint64_t *pfns, 85533cd47dcSJérôme Glisse unsigned long addr, 85633cd47dcSJérôme Glisse unsigned long end) 85733cd47dcSJérôme Glisse { 85833cd47dcSJérôme Glisse for (; addr < end; addr += PAGE_SIZE, pfns++) 859f88a1e90SJérôme Glisse *pfns = range->values[HMM_PFN_NONE]; 86033cd47dcSJérôme Glisse } 86133cd47dcSJérôme Glisse 862da4c3c73SJérôme Glisse /* 863a3e0d41cSJérôme Glisse * hmm_range_register() - start tracking change to CPU page table over a range 864a3e0d41cSJérôme Glisse * @range: range 865a3e0d41cSJérôme Glisse * @mm: the mm struct for the range of virtual address 866a3e0d41cSJérôme Glisse * @start: start virtual address (inclusive) 867a3e0d41cSJérôme Glisse * @end: end virtual address (exclusive) 86863d5066fSJérôme Glisse * @page_shift: expect page shift for the range 869a3e0d41cSJérôme Glisse * Returns 0 on success, -EFAULT if the address space is no longer valid 870a3e0d41cSJérôme Glisse * 871a3e0d41cSJérôme Glisse * Track updates to the CPU page table see include/linux/hmm.h 872a3e0d41cSJérôme Glisse */ 873a3e0d41cSJérôme Glisse int hmm_range_register(struct hmm_range *range, 874e36acfe6SJason Gunthorpe struct hmm_mirror *mirror, 875a3e0d41cSJérôme Glisse unsigned long start, 87663d5066fSJérôme Glisse unsigned long end, 87763d5066fSJérôme Glisse unsigned page_shift) 878a3e0d41cSJérôme Glisse { 87963d5066fSJérôme Glisse unsigned long mask = ((1UL << page_shift) - 1UL); 880e36acfe6SJason Gunthorpe struct hmm *hmm = mirror->hmm; 88163d5066fSJérôme Glisse 882a3e0d41cSJérôme Glisse range->valid = false; 883a3e0d41cSJérôme Glisse range->hmm = NULL; 884a3e0d41cSJérôme Glisse 88563d5066fSJérôme Glisse if ((start & mask) || (end & mask)) 88663d5066fSJérôme Glisse return -EINVAL; 88763d5066fSJérôme Glisse if (start >= end) 888a3e0d41cSJérôme Glisse return -EINVAL; 889a3e0d41cSJérôme Glisse 89063d5066fSJérôme Glisse range->page_shift = page_shift; 891a3e0d41cSJérôme Glisse range->start = start; 892a3e0d41cSJérôme Glisse range->end = end; 893a3e0d41cSJérôme Glisse 89447f24598SJason Gunthorpe /* Prevent hmm_release() from running while the range is valid */ 89547f24598SJason Gunthorpe if (!mmget_not_zero(hmm->mm)) 896a3e0d41cSJérôme Glisse return -EFAULT; 897a3e0d41cSJérôme Glisse 898085ea250SRalph Campbell /* Initialize range to track CPU page table updates. */ 899085ea250SRalph Campbell mutex_lock(&hmm->lock); 900a3e0d41cSJérôme Glisse 901085ea250SRalph Campbell range->hmm = hmm; 902e36acfe6SJason Gunthorpe kref_get(&hmm->kref); 903157816f3SJason Gunthorpe list_add(&range->list, &hmm->ranges); 904a3e0d41cSJérôme Glisse 905a3e0d41cSJérôme Glisse /* 906a3e0d41cSJérôme Glisse * If there are any concurrent notifiers we have to wait for them for 907a3e0d41cSJérôme Glisse * the range to be valid (see hmm_range_wait_until_valid()). 908a3e0d41cSJérôme Glisse */ 909085ea250SRalph Campbell if (!hmm->notifiers) 910a3e0d41cSJérôme Glisse range->valid = true; 911085ea250SRalph Campbell mutex_unlock(&hmm->lock); 912a3e0d41cSJérôme Glisse 913a3e0d41cSJérôme Glisse return 0; 914a3e0d41cSJérôme Glisse } 915a3e0d41cSJérôme Glisse EXPORT_SYMBOL(hmm_range_register); 916a3e0d41cSJérôme Glisse 917a3e0d41cSJérôme Glisse /* 918a3e0d41cSJérôme Glisse * hmm_range_unregister() - stop tracking change to CPU page table over a range 919a3e0d41cSJérôme Glisse * @range: range 920a3e0d41cSJérôme Glisse * 921a3e0d41cSJérôme Glisse * Range struct is used to track updates to the CPU page table after a call to 922a3e0d41cSJérôme Glisse * hmm_range_register(). See include/linux/hmm.h for how to use it. 923a3e0d41cSJérôme Glisse */ 924a3e0d41cSJérôme Glisse void hmm_range_unregister(struct hmm_range *range) 925a3e0d41cSJérôme Glisse { 926085ea250SRalph Campbell struct hmm *hmm = range->hmm; 927085ea250SRalph Campbell 928a3e0d41cSJérôme Glisse /* Sanity check this really should not happen. */ 929085ea250SRalph Campbell if (hmm == NULL || range->end <= range->start) 930a3e0d41cSJérôme Glisse return; 931a3e0d41cSJérôme Glisse 932085ea250SRalph Campbell mutex_lock(&hmm->lock); 93347f24598SJason Gunthorpe list_del_init(&range->list); 934085ea250SRalph Campbell mutex_unlock(&hmm->lock); 935a3e0d41cSJérôme Glisse 936a3e0d41cSJérôme Glisse /* Drop reference taken by hmm_range_register() */ 937a3e0d41cSJérôme Glisse range->valid = false; 93847f24598SJason Gunthorpe mmput(hmm->mm); 939085ea250SRalph Campbell hmm_put(hmm); 940a3e0d41cSJérôme Glisse range->hmm = NULL; 941a3e0d41cSJérôme Glisse } 942a3e0d41cSJérôme Glisse EXPORT_SYMBOL(hmm_range_unregister); 943a3e0d41cSJérôme Glisse 944a3e0d41cSJérôme Glisse /* 94525f23a0cSJérôme Glisse * hmm_range_snapshot() - snapshot CPU page table for a range 94625f23a0cSJérôme Glisse * @range: range 947085ea250SRalph Campbell * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid 948a3e0d41cSJérôme Glisse * permission (for instance asking for write and range is read only), 949a3e0d41cSJérôme Glisse * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid 950a3e0d41cSJérôme Glisse * vma or it is illegal to access that range), number of valid pages 951a3e0d41cSJérôme Glisse * in range->pfns[] (from range start address). 952da4c3c73SJérôme Glisse * 953da4c3c73SJérôme Glisse * This snapshots the CPU page table for a range of virtual addresses. Snapshot 954a3e0d41cSJérôme Glisse * validity is tracked by range struct. See in include/linux/hmm.h for example 955a3e0d41cSJérôme Glisse * on how to use. 956da4c3c73SJérôme Glisse */ 95725f23a0cSJérôme Glisse long hmm_range_snapshot(struct hmm_range *range) 958da4c3c73SJérôme Glisse { 95963d5066fSJérôme Glisse const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; 960a3e0d41cSJérôme Glisse unsigned long start = range->start, end; 96174eee180SJérôme Glisse struct hmm_vma_walk hmm_vma_walk; 962a3e0d41cSJérôme Glisse struct hmm *hmm = range->hmm; 963a3e0d41cSJérôme Glisse struct vm_area_struct *vma; 964da4c3c73SJérôme Glisse struct mm_walk mm_walk; 965704f3f2cSJérôme Glisse 96647f24598SJason Gunthorpe lockdep_assert_held(&hmm->mm->mmap_sem); 967a3e0d41cSJérôme Glisse do { 968a3e0d41cSJérôme Glisse /* If range is no longer valid force retry. */ 969a3e0d41cSJérôme Glisse if (!range->valid) 970a3e0d41cSJérôme Glisse return -EAGAIN; 971a3e0d41cSJérôme Glisse 972a3e0d41cSJérôme Glisse vma = find_vma(hmm->mm, start); 97363d5066fSJérôme Glisse if (vma == NULL || (vma->vm_flags & device_vma)) 974a3e0d41cSJérôme Glisse return -EFAULT; 975a3e0d41cSJérôme Glisse 97663d5066fSJérôme Glisse if (is_vm_hugetlb_page(vma)) { 9771c2308f0SJason Gunthorpe if (huge_page_shift(hstate_vma(vma)) != 9781c2308f0SJason Gunthorpe range->page_shift && 97963d5066fSJérôme Glisse range->page_shift != PAGE_SHIFT) 98063d5066fSJérôme Glisse return -EINVAL; 98163d5066fSJérôme Glisse } else { 98263d5066fSJérôme Glisse if (range->page_shift != PAGE_SHIFT) 98363d5066fSJérôme Glisse return -EINVAL; 98463d5066fSJérôme Glisse } 98563d5066fSJérôme Glisse 98686586a41SJérôme Glisse if (!(vma->vm_flags & VM_READ)) { 98786586a41SJérôme Glisse /* 988a3e0d41cSJérôme Glisse * If vma do not allow read access, then assume that it 989a3e0d41cSJérôme Glisse * does not allow write access, either. HMM does not 990a3e0d41cSJérôme Glisse * support architecture that allow write without read. 99186586a41SJérôme Glisse */ 992a3e0d41cSJérôme Glisse hmm_pfns_clear(range, range->pfns, 993a3e0d41cSJérôme Glisse range->start, range->end); 99486586a41SJérôme Glisse return -EPERM; 99586586a41SJérôme Glisse } 99686586a41SJérôme Glisse 997a3e0d41cSJérôme Glisse range->vma = vma; 998992de9a8SJérôme Glisse hmm_vma_walk.pgmap = NULL; 999a3e0d41cSJérôme Glisse hmm_vma_walk.last = start; 100074eee180SJérôme Glisse hmm_vma_walk.fault = false; 100174eee180SJérôme Glisse hmm_vma_walk.range = range; 100274eee180SJérôme Glisse mm_walk.private = &hmm_vma_walk; 1003a3e0d41cSJérôme Glisse end = min(range->end, vma->vm_end); 100474eee180SJérôme Glisse 1005da4c3c73SJérôme Glisse mm_walk.vma = vma; 1006da4c3c73SJérôme Glisse mm_walk.mm = vma->vm_mm; 1007da4c3c73SJérôme Glisse mm_walk.pte_entry = NULL; 1008da4c3c73SJérôme Glisse mm_walk.test_walk = NULL; 1009da4c3c73SJérôme Glisse mm_walk.hugetlb_entry = NULL; 1010992de9a8SJérôme Glisse mm_walk.pud_entry = hmm_vma_walk_pud; 1011da4c3c73SJérôme Glisse mm_walk.pmd_entry = hmm_vma_walk_pmd; 1012da4c3c73SJérôme Glisse mm_walk.pte_hole = hmm_vma_walk_hole; 101363d5066fSJérôme Glisse mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry; 1014da4c3c73SJérôme Glisse 1015a3e0d41cSJérôme Glisse walk_page_range(start, end, &mm_walk); 1016a3e0d41cSJérôme Glisse start = end; 1017a3e0d41cSJérôme Glisse } while (start < range->end); 1018a3e0d41cSJérôme Glisse 101925f23a0cSJérôme Glisse return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 1020da4c3c73SJérôme Glisse } 102125f23a0cSJérôme Glisse EXPORT_SYMBOL(hmm_range_snapshot); 1022da4c3c73SJérôme Glisse 1023da4c3c73SJérôme Glisse /* 102473231612SJérôme Glisse * hmm_range_fault() - try to fault some address in a virtual address range 102508232a45SJérôme Glisse * @range: range being faulted 102674eee180SJérôme Glisse * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) 1027085ea250SRalph Campbell * Return: number of valid pages in range->pfns[] (from range start 102873231612SJérôme Glisse * address). This may be zero. If the return value is negative, 102973231612SJérôme Glisse * then one of the following values may be returned: 103073231612SJérôme Glisse * 103173231612SJérôme Glisse * -EINVAL invalid arguments or mm or virtual address are in an 103263d5066fSJérôme Glisse * invalid vma (for instance device file vma). 103373231612SJérôme Glisse * -ENOMEM: Out of memory. 103473231612SJérôme Glisse * -EPERM: Invalid permission (for instance asking for write and 103573231612SJérôme Glisse * range is read only). 103673231612SJérôme Glisse * -EAGAIN: If you need to retry and mmap_sem was drop. This can only 103773231612SJérôme Glisse * happens if block argument is false. 103873231612SJérôme Glisse * -EBUSY: If the the range is being invalidated and you should wait 103973231612SJérôme Glisse * for invalidation to finish. 104073231612SJérôme Glisse * -EFAULT: Invalid (ie either no valid vma or it is illegal to access 104173231612SJérôme Glisse * that range), number of valid pages in range->pfns[] (from 104273231612SJérôme Glisse * range start address). 104374eee180SJérôme Glisse * 104474eee180SJérôme Glisse * This is similar to a regular CPU page fault except that it will not trigger 104573231612SJérôme Glisse * any memory migration if the memory being faulted is not accessible by CPUs 104673231612SJérôme Glisse * and caller does not ask for migration. 104774eee180SJérôme Glisse * 1048ff05c0c6SJérôme Glisse * On error, for one virtual address in the range, the function will mark the 1049ff05c0c6SJérôme Glisse * corresponding HMM pfn entry with an error flag. 105074eee180SJérôme Glisse */ 105173231612SJérôme Glisse long hmm_range_fault(struct hmm_range *range, bool block) 105274eee180SJérôme Glisse { 105363d5066fSJérôme Glisse const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; 1054a3e0d41cSJérôme Glisse unsigned long start = range->start, end; 105574eee180SJérôme Glisse struct hmm_vma_walk hmm_vma_walk; 1056a3e0d41cSJérôme Glisse struct hmm *hmm = range->hmm; 1057a3e0d41cSJérôme Glisse struct vm_area_struct *vma; 105874eee180SJérôme Glisse struct mm_walk mm_walk; 105974eee180SJérôme Glisse int ret; 106074eee180SJérôme Glisse 106147f24598SJason Gunthorpe lockdep_assert_held(&hmm->mm->mmap_sem); 1062a3e0d41cSJérôme Glisse 1063a3e0d41cSJérôme Glisse do { 1064a3e0d41cSJérôme Glisse /* If range is no longer valid force retry. */ 1065a3e0d41cSJérôme Glisse if (!range->valid) { 1066a3e0d41cSJérôme Glisse up_read(&hmm->mm->mmap_sem); 1067a3e0d41cSJérôme Glisse return -EAGAIN; 1068704f3f2cSJérôme Glisse } 106974eee180SJérôme Glisse 1070a3e0d41cSJérôme Glisse vma = find_vma(hmm->mm, start); 107163d5066fSJérôme Glisse if (vma == NULL || (vma->vm_flags & device_vma)) 1072a3e0d41cSJérôme Glisse return -EFAULT; 1073a3e0d41cSJérôme Glisse 107463d5066fSJérôme Glisse if (is_vm_hugetlb_page(vma)) { 107563d5066fSJérôme Glisse if (huge_page_shift(hstate_vma(vma)) != 107663d5066fSJérôme Glisse range->page_shift && 107763d5066fSJérôme Glisse range->page_shift != PAGE_SHIFT) 107863d5066fSJérôme Glisse return -EINVAL; 107963d5066fSJérôme Glisse } else { 108063d5066fSJérôme Glisse if (range->page_shift != PAGE_SHIFT) 108163d5066fSJérôme Glisse return -EINVAL; 108263d5066fSJérôme Glisse } 108363d5066fSJérôme Glisse 108486586a41SJérôme Glisse if (!(vma->vm_flags & VM_READ)) { 108586586a41SJérôme Glisse /* 1086a3e0d41cSJérôme Glisse * If vma do not allow read access, then assume that it 1087a3e0d41cSJérôme Glisse * does not allow write access, either. HMM does not 1088a3e0d41cSJérôme Glisse * support architecture that allow write without read. 108986586a41SJérôme Glisse */ 1090a3e0d41cSJérôme Glisse hmm_pfns_clear(range, range->pfns, 1091a3e0d41cSJérôme Glisse range->start, range->end); 109286586a41SJérôme Glisse return -EPERM; 109386586a41SJérôme Glisse } 109474eee180SJérôme Glisse 1095a3e0d41cSJérôme Glisse range->vma = vma; 1096992de9a8SJérôme Glisse hmm_vma_walk.pgmap = NULL; 1097a3e0d41cSJérôme Glisse hmm_vma_walk.last = start; 109874eee180SJérôme Glisse hmm_vma_walk.fault = true; 109974eee180SJérôme Glisse hmm_vma_walk.block = block; 110074eee180SJérôme Glisse hmm_vma_walk.range = range; 110174eee180SJérôme Glisse mm_walk.private = &hmm_vma_walk; 1102a3e0d41cSJérôme Glisse end = min(range->end, vma->vm_end); 110374eee180SJérôme Glisse 110474eee180SJérôme Glisse mm_walk.vma = vma; 110574eee180SJérôme Glisse mm_walk.mm = vma->vm_mm; 110674eee180SJérôme Glisse mm_walk.pte_entry = NULL; 110774eee180SJérôme Glisse mm_walk.test_walk = NULL; 110874eee180SJérôme Glisse mm_walk.hugetlb_entry = NULL; 1109992de9a8SJérôme Glisse mm_walk.pud_entry = hmm_vma_walk_pud; 111074eee180SJérôme Glisse mm_walk.pmd_entry = hmm_vma_walk_pmd; 111174eee180SJérôme Glisse mm_walk.pte_hole = hmm_vma_walk_hole; 111263d5066fSJérôme Glisse mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry; 111374eee180SJérôme Glisse 111474eee180SJérôme Glisse do { 1115a3e0d41cSJérôme Glisse ret = walk_page_range(start, end, &mm_walk); 111674eee180SJérôme Glisse start = hmm_vma_walk.last; 1117a3e0d41cSJérôme Glisse 111873231612SJérôme Glisse /* Keep trying while the range is valid. */ 111973231612SJérôme Glisse } while (ret == -EBUSY && range->valid); 112074eee180SJérôme Glisse 112174eee180SJérôme Glisse if (ret) { 112274eee180SJérôme Glisse unsigned long i; 112374eee180SJérôme Glisse 112474eee180SJérôme Glisse i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 1125a3e0d41cSJérôme Glisse hmm_pfns_clear(range, &range->pfns[i], 1126a3e0d41cSJérôme Glisse hmm_vma_walk.last, range->end); 112773231612SJérôme Glisse return ret; 112874eee180SJérôme Glisse } 1129a3e0d41cSJérôme Glisse start = end; 1130a3e0d41cSJérôme Glisse 1131a3e0d41cSJérôme Glisse } while (start < range->end); 1132704f3f2cSJérôme Glisse 113373231612SJérôme Glisse return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 113474eee180SJérôme Glisse } 113573231612SJérôme Glisse EXPORT_SYMBOL(hmm_range_fault); 113655c0ece8SJérôme Glisse 113755c0ece8SJérôme Glisse /** 113855c0ece8SJérôme Glisse * hmm_range_dma_map() - hmm_range_fault() and dma map page all in one. 113955c0ece8SJérôme Glisse * @range: range being faulted 114055c0ece8SJérôme Glisse * @device: device against to dma map page to 114155c0ece8SJérôme Glisse * @daddrs: dma address of mapped pages 114255c0ece8SJérôme Glisse * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) 1143085ea250SRalph Campbell * Return: number of pages mapped on success, -EAGAIN if mmap_sem have been 114455c0ece8SJérôme Glisse * drop and you need to try again, some other error value otherwise 114555c0ece8SJérôme Glisse * 114655c0ece8SJérôme Glisse * Note same usage pattern as hmm_range_fault(). 114755c0ece8SJérôme Glisse */ 114855c0ece8SJérôme Glisse long hmm_range_dma_map(struct hmm_range *range, 114955c0ece8SJérôme Glisse struct device *device, 115055c0ece8SJérôme Glisse dma_addr_t *daddrs, 115155c0ece8SJérôme Glisse bool block) 115255c0ece8SJérôme Glisse { 115355c0ece8SJérôme Glisse unsigned long i, npages, mapped; 115455c0ece8SJérôme Glisse long ret; 115555c0ece8SJérôme Glisse 115655c0ece8SJérôme Glisse ret = hmm_range_fault(range, block); 115755c0ece8SJérôme Glisse if (ret <= 0) 115855c0ece8SJérôme Glisse return ret ? ret : -EBUSY; 115955c0ece8SJérôme Glisse 116055c0ece8SJérôme Glisse npages = (range->end - range->start) >> PAGE_SHIFT; 116155c0ece8SJérôme Glisse for (i = 0, mapped = 0; i < npages; ++i) { 116255c0ece8SJérôme Glisse enum dma_data_direction dir = DMA_TO_DEVICE; 116355c0ece8SJérôme Glisse struct page *page; 116455c0ece8SJérôme Glisse 116555c0ece8SJérôme Glisse /* 116655c0ece8SJérôme Glisse * FIXME need to update DMA API to provide invalid DMA address 116755c0ece8SJérôme Glisse * value instead of a function to test dma address value. This 116855c0ece8SJérôme Glisse * would remove lot of dumb code duplicated accross many arch. 116955c0ece8SJérôme Glisse * 117055c0ece8SJérôme Glisse * For now setting it to 0 here is good enough as the pfns[] 117155c0ece8SJérôme Glisse * value is what is use to check what is valid and what isn't. 117255c0ece8SJérôme Glisse */ 117355c0ece8SJérôme Glisse daddrs[i] = 0; 117455c0ece8SJérôme Glisse 1175391aab11SJérôme Glisse page = hmm_device_entry_to_page(range, range->pfns[i]); 117655c0ece8SJérôme Glisse if (page == NULL) 117755c0ece8SJérôme Glisse continue; 117855c0ece8SJérôme Glisse 117955c0ece8SJérôme Glisse /* Check if range is being invalidated */ 118055c0ece8SJérôme Glisse if (!range->valid) { 118155c0ece8SJérôme Glisse ret = -EBUSY; 118255c0ece8SJérôme Glisse goto unmap; 118355c0ece8SJérôme Glisse } 118455c0ece8SJérôme Glisse 118555c0ece8SJérôme Glisse /* If it is read and write than map bi-directional. */ 118655c0ece8SJérôme Glisse if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) 118755c0ece8SJérôme Glisse dir = DMA_BIDIRECTIONAL; 118855c0ece8SJérôme Glisse 118955c0ece8SJérôme Glisse daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir); 119055c0ece8SJérôme Glisse if (dma_mapping_error(device, daddrs[i])) { 119155c0ece8SJérôme Glisse ret = -EFAULT; 119255c0ece8SJérôme Glisse goto unmap; 119355c0ece8SJérôme Glisse } 119455c0ece8SJérôme Glisse 119555c0ece8SJérôme Glisse mapped++; 119655c0ece8SJérôme Glisse } 119755c0ece8SJérôme Glisse 119855c0ece8SJérôme Glisse return mapped; 119955c0ece8SJérôme Glisse 120055c0ece8SJérôme Glisse unmap: 120155c0ece8SJérôme Glisse for (npages = i, i = 0; (i < npages) && mapped; ++i) { 120255c0ece8SJérôme Glisse enum dma_data_direction dir = DMA_TO_DEVICE; 120355c0ece8SJérôme Glisse struct page *page; 120455c0ece8SJérôme Glisse 1205391aab11SJérôme Glisse page = hmm_device_entry_to_page(range, range->pfns[i]); 120655c0ece8SJérôme Glisse if (page == NULL) 120755c0ece8SJérôme Glisse continue; 120855c0ece8SJérôme Glisse 120955c0ece8SJérôme Glisse if (dma_mapping_error(device, daddrs[i])) 121055c0ece8SJérôme Glisse continue; 121155c0ece8SJérôme Glisse 121255c0ece8SJérôme Glisse /* If it is read and write than map bi-directional. */ 121355c0ece8SJérôme Glisse if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) 121455c0ece8SJérôme Glisse dir = DMA_BIDIRECTIONAL; 121555c0ece8SJérôme Glisse 121655c0ece8SJérôme Glisse dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir); 121755c0ece8SJérôme Glisse mapped--; 121855c0ece8SJérôme Glisse } 121955c0ece8SJérôme Glisse 122055c0ece8SJérôme Glisse return ret; 122155c0ece8SJérôme Glisse } 122255c0ece8SJérôme Glisse EXPORT_SYMBOL(hmm_range_dma_map); 122355c0ece8SJérôme Glisse 122455c0ece8SJérôme Glisse /** 122555c0ece8SJérôme Glisse * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map() 122655c0ece8SJérôme Glisse * @range: range being unmapped 122755c0ece8SJérôme Glisse * @vma: the vma against which the range (optional) 122855c0ece8SJérôme Glisse * @device: device against which dma map was done 122955c0ece8SJérôme Glisse * @daddrs: dma address of mapped pages 123055c0ece8SJérôme Glisse * @dirty: dirty page if it had the write flag set 1231085ea250SRalph Campbell * Return: number of page unmapped on success, -EINVAL otherwise 123255c0ece8SJérôme Glisse * 123355c0ece8SJérôme Glisse * Note that caller MUST abide by mmu notifier or use HMM mirror and abide 123455c0ece8SJérôme Glisse * to the sync_cpu_device_pagetables() callback so that it is safe here to 123555c0ece8SJérôme Glisse * call set_page_dirty(). Caller must also take appropriate locks to avoid 123655c0ece8SJérôme Glisse * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress. 123755c0ece8SJérôme Glisse */ 123855c0ece8SJérôme Glisse long hmm_range_dma_unmap(struct hmm_range *range, 123955c0ece8SJérôme Glisse struct vm_area_struct *vma, 124055c0ece8SJérôme Glisse struct device *device, 124155c0ece8SJérôme Glisse dma_addr_t *daddrs, 124255c0ece8SJérôme Glisse bool dirty) 124355c0ece8SJérôme Glisse { 124455c0ece8SJérôme Glisse unsigned long i, npages; 124555c0ece8SJérôme Glisse long cpages = 0; 124655c0ece8SJérôme Glisse 124755c0ece8SJérôme Glisse /* Sanity check. */ 124855c0ece8SJérôme Glisse if (range->end <= range->start) 124955c0ece8SJérôme Glisse return -EINVAL; 125055c0ece8SJérôme Glisse if (!daddrs) 125155c0ece8SJérôme Glisse return -EINVAL; 125255c0ece8SJérôme Glisse if (!range->pfns) 125355c0ece8SJérôme Glisse return -EINVAL; 125455c0ece8SJérôme Glisse 125555c0ece8SJérôme Glisse npages = (range->end - range->start) >> PAGE_SHIFT; 125655c0ece8SJérôme Glisse for (i = 0; i < npages; ++i) { 125755c0ece8SJérôme Glisse enum dma_data_direction dir = DMA_TO_DEVICE; 125855c0ece8SJérôme Glisse struct page *page; 125955c0ece8SJérôme Glisse 1260391aab11SJérôme Glisse page = hmm_device_entry_to_page(range, range->pfns[i]); 126155c0ece8SJérôme Glisse if (page == NULL) 126255c0ece8SJérôme Glisse continue; 126355c0ece8SJérôme Glisse 126455c0ece8SJérôme Glisse /* If it is read and write than map bi-directional. */ 126555c0ece8SJérôme Glisse if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) { 126655c0ece8SJérôme Glisse dir = DMA_BIDIRECTIONAL; 126755c0ece8SJérôme Glisse 126855c0ece8SJérôme Glisse /* 126955c0ece8SJérôme Glisse * See comments in function description on why it is 127055c0ece8SJérôme Glisse * safe here to call set_page_dirty() 127155c0ece8SJérôme Glisse */ 127255c0ece8SJérôme Glisse if (dirty) 127355c0ece8SJérôme Glisse set_page_dirty(page); 127455c0ece8SJérôme Glisse } 127555c0ece8SJérôme Glisse 127655c0ece8SJérôme Glisse /* Unmap and clear pfns/dma address */ 127755c0ece8SJérôme Glisse dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir); 127855c0ece8SJérôme Glisse range->pfns[i] = range->values[HMM_PFN_NONE]; 127955c0ece8SJérôme Glisse /* FIXME see comments in hmm_vma_dma_map() */ 128055c0ece8SJérôme Glisse daddrs[i] = 0; 128155c0ece8SJérôme Glisse cpages++; 128255c0ece8SJérôme Glisse } 128355c0ece8SJérôme Glisse 128455c0ece8SJérôme Glisse return cpages; 128555c0ece8SJérôme Glisse } 128655c0ece8SJérôme Glisse EXPORT_SYMBOL(hmm_range_dma_unmap); 1287c0b12405SJérôme Glisse #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ 12884ef589dcSJérôme Glisse 12894ef589dcSJérôme Glisse 1290df6ad698SJérôme Glisse #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) 12914ef589dcSJérôme Glisse struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, 12924ef589dcSJérôme Glisse unsigned long addr) 12934ef589dcSJérôme Glisse { 12944ef589dcSJérôme Glisse struct page *page; 12954ef589dcSJérôme Glisse 12964ef589dcSJérôme Glisse page = alloc_page_vma(GFP_HIGHUSER, vma, addr); 12974ef589dcSJérôme Glisse if (!page) 12984ef589dcSJérôme Glisse return NULL; 12994ef589dcSJérôme Glisse lock_page(page); 13004ef589dcSJérôme Glisse return page; 13014ef589dcSJérôme Glisse } 13024ef589dcSJérôme Glisse EXPORT_SYMBOL(hmm_vma_alloc_locked_page); 13034ef589dcSJérôme Glisse 13044ef589dcSJérôme Glisse 13054ef589dcSJérôme Glisse static void hmm_devmem_ref_release(struct percpu_ref *ref) 13064ef589dcSJérôme Glisse { 13074ef589dcSJérôme Glisse struct hmm_devmem *devmem; 13084ef589dcSJérôme Glisse 13094ef589dcSJérôme Glisse devmem = container_of(ref, struct hmm_devmem, ref); 13104ef589dcSJérôme Glisse complete(&devmem->completion); 13114ef589dcSJérôme Glisse } 13124ef589dcSJérôme Glisse 13134ef589dcSJérôme Glisse static void hmm_devmem_ref_exit(void *data) 13144ef589dcSJérôme Glisse { 13154ef589dcSJérôme Glisse struct percpu_ref *ref = data; 13164ef589dcSJérôme Glisse struct hmm_devmem *devmem; 13174ef589dcSJérôme Glisse 13184ef589dcSJérôme Glisse devmem = container_of(ref, struct hmm_devmem, ref); 1319bbecd94eSDan Williams wait_for_completion(&devmem->completion); 13204ef589dcSJérôme Glisse percpu_ref_exit(ref); 13214ef589dcSJérôme Glisse } 13224ef589dcSJérôme Glisse 1323bbecd94eSDan Williams static void hmm_devmem_ref_kill(struct percpu_ref *ref) 13244ef589dcSJérôme Glisse { 13254ef589dcSJérôme Glisse percpu_ref_kill(ref); 13264ef589dcSJérôme Glisse } 13274ef589dcSJérôme Glisse 1328b57e622eSSouptick Joarder static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma, 13294ef589dcSJérôme Glisse unsigned long addr, 13304ef589dcSJérôme Glisse const struct page *page, 13314ef589dcSJérôme Glisse unsigned int flags, 13324ef589dcSJérôme Glisse pmd_t *pmdp) 13334ef589dcSJérôme Glisse { 13344ef589dcSJérôme Glisse struct hmm_devmem *devmem = page->pgmap->data; 13354ef589dcSJérôme Glisse 13364ef589dcSJérôme Glisse return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp); 13374ef589dcSJérôme Glisse } 13384ef589dcSJérôme Glisse 13394ef589dcSJérôme Glisse static void hmm_devmem_free(struct page *page, void *data) 13404ef589dcSJérôme Glisse { 13414ef589dcSJérôme Glisse struct hmm_devmem *devmem = data; 13424ef589dcSJérôme Glisse 13432fa147bdSDan Williams page->mapping = NULL; 13442fa147bdSDan Williams 13454ef589dcSJérôme Glisse devmem->ops->free(devmem, page); 13464ef589dcSJérôme Glisse } 13474ef589dcSJérôme Glisse 13484ef589dcSJérôme Glisse /* 13494ef589dcSJérôme Glisse * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory 13504ef589dcSJérôme Glisse * 13514ef589dcSJérôme Glisse * @ops: memory event device driver callback (see struct hmm_devmem_ops) 13524ef589dcSJérôme Glisse * @device: device struct to bind the resource too 13534ef589dcSJérôme Glisse * @size: size in bytes of the device memory to add 1354085ea250SRalph Campbell * Return: pointer to new hmm_devmem struct ERR_PTR otherwise 13554ef589dcSJérôme Glisse * 13564ef589dcSJérôme Glisse * This function first finds an empty range of physical address big enough to 13574ef589dcSJérôme Glisse * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which 13584ef589dcSJérôme Glisse * in turn allocates struct pages. It does not do anything beyond that; all 13594ef589dcSJérôme Glisse * events affecting the memory will go through the various callbacks provided 13604ef589dcSJérôme Glisse * by hmm_devmem_ops struct. 13614ef589dcSJérôme Glisse * 13624ef589dcSJérôme Glisse * Device driver should call this function during device initialization and 13634ef589dcSJérôme Glisse * is then responsible of memory management. HMM only provides helpers. 13644ef589dcSJérôme Glisse */ 13654ef589dcSJérôme Glisse struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, 13664ef589dcSJérôme Glisse struct device *device, 13674ef589dcSJérôme Glisse unsigned long size) 13684ef589dcSJérôme Glisse { 13694ef589dcSJérôme Glisse struct hmm_devmem *devmem; 13704ef589dcSJérôme Glisse resource_size_t addr; 1371bbecd94eSDan Williams void *result; 13724ef589dcSJérôme Glisse int ret; 13734ef589dcSJérôme Glisse 1374e7638488SDan Williams dev_pagemap_get_ops(); 13754ef589dcSJérôme Glisse 137658ef15b7SDan Williams devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); 13774ef589dcSJérôme Glisse if (!devmem) 13784ef589dcSJérôme Glisse return ERR_PTR(-ENOMEM); 13794ef589dcSJérôme Glisse 13804ef589dcSJérôme Glisse init_completion(&devmem->completion); 13814ef589dcSJérôme Glisse devmem->pfn_first = -1UL; 13824ef589dcSJérôme Glisse devmem->pfn_last = -1UL; 13834ef589dcSJérôme Glisse devmem->resource = NULL; 13844ef589dcSJérôme Glisse devmem->device = device; 13854ef589dcSJérôme Glisse devmem->ops = ops; 13864ef589dcSJérôme Glisse 13874ef589dcSJérôme Glisse ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, 13884ef589dcSJérôme Glisse 0, GFP_KERNEL); 13894ef589dcSJérôme Glisse if (ret) 139058ef15b7SDan Williams return ERR_PTR(ret); 13914ef589dcSJérôme Glisse 139258ef15b7SDan Williams ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref); 13934ef589dcSJérôme Glisse if (ret) 139458ef15b7SDan Williams return ERR_PTR(ret); 13954ef589dcSJérôme Glisse 13964ef589dcSJérôme Glisse size = ALIGN(size, PA_SECTION_SIZE); 13974ef589dcSJérôme Glisse addr = min((unsigned long)iomem_resource.end, 13984ef589dcSJérôme Glisse (1UL << MAX_PHYSMEM_BITS) - 1); 13994ef589dcSJérôme Glisse addr = addr - size + 1UL; 14004ef589dcSJérôme Glisse 14014ef589dcSJérôme Glisse /* 14024ef589dcSJérôme Glisse * FIXME add a new helper to quickly walk resource tree and find free 14034ef589dcSJérôme Glisse * range 14044ef589dcSJérôme Glisse * 14054ef589dcSJérôme Glisse * FIXME what about ioport_resource resource ? 14064ef589dcSJérôme Glisse */ 14074ef589dcSJérôme Glisse for (; addr > size && addr >= iomem_resource.start; addr -= size) { 14084ef589dcSJérôme Glisse ret = region_intersects(addr, size, 0, IORES_DESC_NONE); 14094ef589dcSJérôme Glisse if (ret != REGION_DISJOINT) 14104ef589dcSJérôme Glisse continue; 14114ef589dcSJérôme Glisse 14124ef589dcSJérôme Glisse devmem->resource = devm_request_mem_region(device, addr, size, 14134ef589dcSJérôme Glisse dev_name(device)); 141458ef15b7SDan Williams if (!devmem->resource) 141558ef15b7SDan Williams return ERR_PTR(-ENOMEM); 14164ef589dcSJérôme Glisse break; 14174ef589dcSJérôme Glisse } 141858ef15b7SDan Williams if (!devmem->resource) 141958ef15b7SDan Williams return ERR_PTR(-ERANGE); 14204ef589dcSJérôme Glisse 14214ef589dcSJérôme Glisse devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; 14224ef589dcSJérôme Glisse devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; 14234ef589dcSJérôme Glisse devmem->pfn_last = devmem->pfn_first + 14244ef589dcSJérôme Glisse (resource_size(devmem->resource) >> PAGE_SHIFT); 1425063a7d1dSDan Williams devmem->page_fault = hmm_devmem_fault; 14264ef589dcSJérôme Glisse 1427bbecd94eSDan Williams devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; 1428bbecd94eSDan Williams devmem->pagemap.res = *devmem->resource; 1429bbecd94eSDan Williams devmem->pagemap.page_free = hmm_devmem_free; 1430bbecd94eSDan Williams devmem->pagemap.altmap_valid = false; 1431bbecd94eSDan Williams devmem->pagemap.ref = &devmem->ref; 1432bbecd94eSDan Williams devmem->pagemap.data = devmem; 1433bbecd94eSDan Williams devmem->pagemap.kill = hmm_devmem_ref_kill; 143458ef15b7SDan Williams 1435bbecd94eSDan Williams result = devm_memremap_pages(devmem->device, &devmem->pagemap); 1436bbecd94eSDan Williams if (IS_ERR(result)) 1437bbecd94eSDan Williams return result; 14384ef589dcSJérôme Glisse return devmem; 14394ef589dcSJérôme Glisse } 144002917e9fSDan Williams EXPORT_SYMBOL_GPL(hmm_devmem_add); 14414ef589dcSJérôme Glisse 1442d3df0a42SJérôme Glisse struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, 1443d3df0a42SJérôme Glisse struct device *device, 1444d3df0a42SJérôme Glisse struct resource *res) 1445d3df0a42SJérôme Glisse { 1446d3df0a42SJérôme Glisse struct hmm_devmem *devmem; 1447bbecd94eSDan Williams void *result; 1448d3df0a42SJérôme Glisse int ret; 1449d3df0a42SJérôme Glisse 1450d3df0a42SJérôme Glisse if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY) 1451d3df0a42SJérôme Glisse return ERR_PTR(-EINVAL); 1452d3df0a42SJérôme Glisse 1453e7638488SDan Williams dev_pagemap_get_ops(); 1454d3df0a42SJérôme Glisse 145558ef15b7SDan Williams devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); 1456d3df0a42SJérôme Glisse if (!devmem) 1457d3df0a42SJérôme Glisse return ERR_PTR(-ENOMEM); 1458d3df0a42SJérôme Glisse 1459d3df0a42SJérôme Glisse init_completion(&devmem->completion); 1460d3df0a42SJérôme Glisse devmem->pfn_first = -1UL; 1461d3df0a42SJérôme Glisse devmem->pfn_last = -1UL; 1462d3df0a42SJérôme Glisse devmem->resource = res; 1463d3df0a42SJérôme Glisse devmem->device = device; 1464d3df0a42SJérôme Glisse devmem->ops = ops; 1465d3df0a42SJérôme Glisse 1466d3df0a42SJérôme Glisse ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, 1467d3df0a42SJérôme Glisse 0, GFP_KERNEL); 1468d3df0a42SJérôme Glisse if (ret) 146958ef15b7SDan Williams return ERR_PTR(ret); 1470d3df0a42SJérôme Glisse 147158ef15b7SDan Williams ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, 147258ef15b7SDan Williams &devmem->ref); 1473d3df0a42SJérôme Glisse if (ret) 147458ef15b7SDan Williams return ERR_PTR(ret); 1475d3df0a42SJérôme Glisse 1476d3df0a42SJérôme Glisse devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; 1477d3df0a42SJérôme Glisse devmem->pfn_last = devmem->pfn_first + 1478d3df0a42SJérôme Glisse (resource_size(devmem->resource) >> PAGE_SHIFT); 1479063a7d1dSDan Williams devmem->page_fault = hmm_devmem_fault; 1480d3df0a42SJérôme Glisse 1481bbecd94eSDan Williams devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; 1482bbecd94eSDan Williams devmem->pagemap.res = *devmem->resource; 1483bbecd94eSDan Williams devmem->pagemap.page_free = hmm_devmem_free; 1484bbecd94eSDan Williams devmem->pagemap.altmap_valid = false; 1485bbecd94eSDan Williams devmem->pagemap.ref = &devmem->ref; 1486bbecd94eSDan Williams devmem->pagemap.data = devmem; 1487bbecd94eSDan Williams devmem->pagemap.kill = hmm_devmem_ref_kill; 148858ef15b7SDan Williams 1489bbecd94eSDan Williams result = devm_memremap_pages(devmem->device, &devmem->pagemap); 1490bbecd94eSDan Williams if (IS_ERR(result)) 1491bbecd94eSDan Williams return result; 1492d3df0a42SJérôme Glisse return devmem; 1493d3df0a42SJérôme Glisse } 149402917e9fSDan Williams EXPORT_SYMBOL_GPL(hmm_devmem_add_resource); 1495d3df0a42SJérôme Glisse 14964ef589dcSJérôme Glisse /* 1497858b54daSJérôme Glisse * A device driver that wants to handle multiple devices memory through a 1498858b54daSJérôme Glisse * single fake device can use hmm_device to do so. This is purely a helper 1499858b54daSJérôme Glisse * and it is not needed to make use of any HMM functionality. 1500858b54daSJérôme Glisse */ 1501858b54daSJérôme Glisse #define HMM_DEVICE_MAX 256 1502858b54daSJérôme Glisse 1503858b54daSJérôme Glisse static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX); 1504858b54daSJérôme Glisse static DEFINE_SPINLOCK(hmm_device_lock); 1505858b54daSJérôme Glisse static struct class *hmm_device_class; 1506858b54daSJérôme Glisse static dev_t hmm_device_devt; 1507858b54daSJérôme Glisse 1508858b54daSJérôme Glisse static void hmm_device_release(struct device *device) 1509858b54daSJérôme Glisse { 1510858b54daSJérôme Glisse struct hmm_device *hmm_device; 1511858b54daSJérôme Glisse 1512858b54daSJérôme Glisse hmm_device = container_of(device, struct hmm_device, device); 1513858b54daSJérôme Glisse spin_lock(&hmm_device_lock); 1514858b54daSJérôme Glisse clear_bit(hmm_device->minor, hmm_device_mask); 1515858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1516858b54daSJérôme Glisse 1517858b54daSJérôme Glisse kfree(hmm_device); 1518858b54daSJérôme Glisse } 1519858b54daSJérôme Glisse 1520858b54daSJérôme Glisse struct hmm_device *hmm_device_new(void *drvdata) 1521858b54daSJérôme Glisse { 1522858b54daSJérôme Glisse struct hmm_device *hmm_device; 1523858b54daSJérôme Glisse 1524858b54daSJérôme Glisse hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL); 1525858b54daSJérôme Glisse if (!hmm_device) 1526858b54daSJérôme Glisse return ERR_PTR(-ENOMEM); 1527858b54daSJérôme Glisse 1528858b54daSJérôme Glisse spin_lock(&hmm_device_lock); 1529858b54daSJérôme Glisse hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX); 1530858b54daSJérôme Glisse if (hmm_device->minor >= HMM_DEVICE_MAX) { 1531858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1532858b54daSJérôme Glisse kfree(hmm_device); 1533858b54daSJérôme Glisse return ERR_PTR(-EBUSY); 1534858b54daSJérôme Glisse } 1535858b54daSJérôme Glisse set_bit(hmm_device->minor, hmm_device_mask); 1536858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1537858b54daSJérôme Glisse 1538858b54daSJérôme Glisse dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor); 1539858b54daSJérôme Glisse hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt), 1540858b54daSJérôme Glisse hmm_device->minor); 1541858b54daSJérôme Glisse hmm_device->device.release = hmm_device_release; 1542858b54daSJérôme Glisse dev_set_drvdata(&hmm_device->device, drvdata); 1543858b54daSJérôme Glisse hmm_device->device.class = hmm_device_class; 1544858b54daSJérôme Glisse device_initialize(&hmm_device->device); 1545858b54daSJérôme Glisse 1546858b54daSJérôme Glisse return hmm_device; 1547858b54daSJérôme Glisse } 1548858b54daSJérôme Glisse EXPORT_SYMBOL(hmm_device_new); 1549858b54daSJérôme Glisse 1550858b54daSJérôme Glisse void hmm_device_put(struct hmm_device *hmm_device) 1551858b54daSJérôme Glisse { 1552858b54daSJérôme Glisse put_device(&hmm_device->device); 1553858b54daSJérôme Glisse } 1554858b54daSJérôme Glisse EXPORT_SYMBOL(hmm_device_put); 1555858b54daSJérôme Glisse 1556858b54daSJérôme Glisse static int __init hmm_init(void) 1557858b54daSJérôme Glisse { 1558858b54daSJérôme Glisse int ret; 1559858b54daSJérôme Glisse 1560858b54daSJérôme Glisse ret = alloc_chrdev_region(&hmm_device_devt, 0, 1561858b54daSJérôme Glisse HMM_DEVICE_MAX, 1562858b54daSJérôme Glisse "hmm_device"); 1563858b54daSJérôme Glisse if (ret) 1564858b54daSJérôme Glisse return ret; 1565858b54daSJérôme Glisse 1566858b54daSJérôme Glisse hmm_device_class = class_create(THIS_MODULE, "hmm_device"); 1567858b54daSJérôme Glisse if (IS_ERR(hmm_device_class)) { 1568858b54daSJérôme Glisse unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX); 1569858b54daSJérôme Glisse return PTR_ERR(hmm_device_class); 1570858b54daSJérôme Glisse } 1571858b54daSJérôme Glisse return 0; 1572858b54daSJérôme Glisse } 1573858b54daSJérôme Glisse 1574858b54daSJérôme Glisse device_initcall(hmm_init); 1575df6ad698SJérôme Glisse #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ 1576