1133ff0eaSJérôme Glisse /* 2133ff0eaSJérôme Glisse * Copyright 2013 Red Hat Inc. 3133ff0eaSJérôme Glisse * 4133ff0eaSJérôme Glisse * This program is free software; you can redistribute it and/or modify 5133ff0eaSJérôme Glisse * it under the terms of the GNU General Public License as published by 6133ff0eaSJérôme Glisse * the Free Software Foundation; either version 2 of the License, or 7133ff0eaSJérôme Glisse * (at your option) any later version. 8133ff0eaSJérôme Glisse * 9133ff0eaSJérôme Glisse * This program is distributed in the hope that it will be useful, 10133ff0eaSJérôme Glisse * but WITHOUT ANY WARRANTY; without even the implied warranty of 11133ff0eaSJérôme Glisse * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12133ff0eaSJérôme Glisse * GNU General Public License for more details. 13133ff0eaSJérôme Glisse * 14f813f219SJérôme Glisse * Authors: Jérôme Glisse <jglisse@redhat.com> 15133ff0eaSJérôme Glisse */ 16133ff0eaSJérôme Glisse /* 17133ff0eaSJérôme Glisse * Refer to include/linux/hmm.h for information about heterogeneous memory 18133ff0eaSJérôme Glisse * management or HMM for short. 19133ff0eaSJérôme Glisse */ 20133ff0eaSJérôme Glisse #include <linux/mm.h> 21133ff0eaSJérôme Glisse #include <linux/hmm.h> 22858b54daSJérôme Glisse #include <linux/init.h> 23da4c3c73SJérôme Glisse #include <linux/rmap.h> 24da4c3c73SJérôme Glisse #include <linux/swap.h> 25133ff0eaSJérôme Glisse #include <linux/slab.h> 26133ff0eaSJérôme Glisse #include <linux/sched.h> 274ef589dcSJérôme Glisse #include <linux/mmzone.h> 284ef589dcSJérôme Glisse #include <linux/pagemap.h> 29da4c3c73SJérôme Glisse #include <linux/swapops.h> 30da4c3c73SJérôme Glisse #include <linux/hugetlb.h> 314ef589dcSJérôme Glisse #include <linux/memremap.h> 327b2d55d2SJérôme Glisse #include <linux/jump_label.h> 33c0b12405SJérôme Glisse #include <linux/mmu_notifier.h> 344ef589dcSJérôme Glisse #include <linux/memory_hotplug.h> 354ef589dcSJérôme Glisse 364ef589dcSJérôme Glisse #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT) 37133ff0eaSJérôme Glisse 386b368cd4SJérôme Glisse #if IS_ENABLED(CONFIG_HMM_MIRROR) 39c0b12405SJérôme Glisse static const struct mmu_notifier_ops hmm_mmu_notifier_ops; 40c0b12405SJérôme Glisse 41704f3f2cSJérôme Glisse static inline struct hmm *mm_get_hmm(struct mm_struct *mm) 42133ff0eaSJérôme Glisse { 43c0b12405SJérôme Glisse struct hmm *hmm = READ_ONCE(mm->hmm); 44704f3f2cSJérôme Glisse 45704f3f2cSJérôme Glisse if (hmm && kref_get_unless_zero(&hmm->kref)) 46704f3f2cSJérôme Glisse return hmm; 47704f3f2cSJérôme Glisse 48704f3f2cSJérôme Glisse return NULL; 49704f3f2cSJérôme Glisse } 50704f3f2cSJérôme Glisse 51704f3f2cSJérôme Glisse /** 52704f3f2cSJérôme Glisse * hmm_get_or_create - register HMM against an mm (HMM internal) 53704f3f2cSJérôme Glisse * 54704f3f2cSJérôme Glisse * @mm: mm struct to attach to 55704f3f2cSJérôme Glisse * Returns: returns an HMM object, either by referencing the existing 56704f3f2cSJérôme Glisse * (per-process) object, or by creating a new one. 57704f3f2cSJérôme Glisse * 58704f3f2cSJérôme Glisse * This is not intended to be used directly by device drivers. If mm already 59704f3f2cSJérôme Glisse * has an HMM struct then it get a reference on it and returns it. Otherwise 60704f3f2cSJérôme Glisse * it allocates an HMM struct, initializes it, associate it with the mm and 61704f3f2cSJérôme Glisse * returns it. 62704f3f2cSJérôme Glisse */ 63704f3f2cSJérôme Glisse static struct hmm *hmm_get_or_create(struct mm_struct *mm) 64704f3f2cSJérôme Glisse { 65704f3f2cSJérôme Glisse struct hmm *hmm = mm_get_hmm(mm); 66c0b12405SJérôme Glisse bool cleanup = false; 67133ff0eaSJérôme Glisse 68c0b12405SJérôme Glisse if (hmm) 69c0b12405SJérôme Glisse return hmm; 70c0b12405SJérôme Glisse 71c0b12405SJérôme Glisse hmm = kmalloc(sizeof(*hmm), GFP_KERNEL); 72c0b12405SJérôme Glisse if (!hmm) 73c0b12405SJérôme Glisse return NULL; 74a3e0d41cSJérôme Glisse init_waitqueue_head(&hmm->wq); 75c0b12405SJérôme Glisse INIT_LIST_HEAD(&hmm->mirrors); 76c0b12405SJérôme Glisse init_rwsem(&hmm->mirrors_sem); 77c0b12405SJérôme Glisse hmm->mmu_notifier.ops = NULL; 78da4c3c73SJérôme Glisse INIT_LIST_HEAD(&hmm->ranges); 79a3e0d41cSJérôme Glisse mutex_init(&hmm->lock); 80704f3f2cSJérôme Glisse kref_init(&hmm->kref); 81a3e0d41cSJérôme Glisse hmm->notifiers = 0; 82a3e0d41cSJérôme Glisse hmm->dead = false; 83c0b12405SJérôme Glisse hmm->mm = mm; 84c0b12405SJérôme Glisse 85c0b12405SJérôme Glisse spin_lock(&mm->page_table_lock); 86c0b12405SJérôme Glisse if (!mm->hmm) 87c0b12405SJérôme Glisse mm->hmm = hmm; 88c0b12405SJérôme Glisse else 89c0b12405SJérôme Glisse cleanup = true; 90c0b12405SJérôme Glisse spin_unlock(&mm->page_table_lock); 91c0b12405SJérôme Glisse 9286a2d598SRalph Campbell if (cleanup) 9386a2d598SRalph Campbell goto error; 9486a2d598SRalph Campbell 9586a2d598SRalph Campbell /* 9686a2d598SRalph Campbell * We should only get here if hold the mmap_sem in write mode ie on 9786a2d598SRalph Campbell * registration of first mirror through hmm_mirror_register() 9886a2d598SRalph Campbell */ 9986a2d598SRalph Campbell hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops; 10086a2d598SRalph Campbell if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) 10186a2d598SRalph Campbell goto error_mm; 102c0b12405SJérôme Glisse 103704f3f2cSJérôme Glisse return hmm; 10486a2d598SRalph Campbell 10586a2d598SRalph Campbell error_mm: 10686a2d598SRalph Campbell spin_lock(&mm->page_table_lock); 10786a2d598SRalph Campbell if (mm->hmm == hmm) 10886a2d598SRalph Campbell mm->hmm = NULL; 10986a2d598SRalph Campbell spin_unlock(&mm->page_table_lock); 11086a2d598SRalph Campbell error: 11186a2d598SRalph Campbell kfree(hmm); 11286a2d598SRalph Campbell return NULL; 113133ff0eaSJérôme Glisse } 114133ff0eaSJérôme Glisse 115704f3f2cSJérôme Glisse static void hmm_free(struct kref *kref) 116704f3f2cSJérôme Glisse { 117704f3f2cSJérôme Glisse struct hmm *hmm = container_of(kref, struct hmm, kref); 118704f3f2cSJérôme Glisse struct mm_struct *mm = hmm->mm; 119704f3f2cSJérôme Glisse 120704f3f2cSJérôme Glisse mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm); 121704f3f2cSJérôme Glisse 122704f3f2cSJérôme Glisse spin_lock(&mm->page_table_lock); 123704f3f2cSJérôme Glisse if (mm->hmm == hmm) 124704f3f2cSJérôme Glisse mm->hmm = NULL; 125704f3f2cSJérôme Glisse spin_unlock(&mm->page_table_lock); 126704f3f2cSJérôme Glisse 127704f3f2cSJérôme Glisse kfree(hmm); 128704f3f2cSJérôme Glisse } 129704f3f2cSJérôme Glisse 130704f3f2cSJérôme Glisse static inline void hmm_put(struct hmm *hmm) 131704f3f2cSJérôme Glisse { 132704f3f2cSJérôme Glisse kref_put(&hmm->kref, hmm_free); 133704f3f2cSJérôme Glisse } 134704f3f2cSJérôme Glisse 135133ff0eaSJérôme Glisse void hmm_mm_destroy(struct mm_struct *mm) 136133ff0eaSJérôme Glisse { 137704f3f2cSJérôme Glisse struct hmm *hmm; 138704f3f2cSJérôme Glisse 139704f3f2cSJérôme Glisse spin_lock(&mm->page_table_lock); 140704f3f2cSJérôme Glisse hmm = mm_get_hmm(mm); 141704f3f2cSJérôme Glisse mm->hmm = NULL; 142704f3f2cSJérôme Glisse if (hmm) { 143704f3f2cSJérôme Glisse hmm->mm = NULL; 144a3e0d41cSJérôme Glisse hmm->dead = true; 145704f3f2cSJérôme Glisse spin_unlock(&mm->page_table_lock); 146704f3f2cSJérôme Glisse hmm_put(hmm); 147704f3f2cSJérôme Glisse return; 148704f3f2cSJérôme Glisse } 149704f3f2cSJérôme Glisse 150704f3f2cSJérôme Glisse spin_unlock(&mm->page_table_lock); 151133ff0eaSJérôme Glisse } 152c0b12405SJérôme Glisse 153a3e0d41cSJérôme Glisse static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) 154c0b12405SJérôme Glisse { 155a3e0d41cSJérôme Glisse struct hmm *hmm = mm_get_hmm(mm); 156c0b12405SJérôme Glisse struct hmm_mirror *mirror; 157da4c3c73SJérôme Glisse struct hmm_range *range; 158da4c3c73SJérôme Glisse 159a3e0d41cSJérôme Glisse /* Report this HMM as dying. */ 160a3e0d41cSJérôme Glisse hmm->dead = true; 161da4c3c73SJérôme Glisse 162a3e0d41cSJérôme Glisse /* Wake-up everyone waiting on any range. */ 163a3e0d41cSJérôme Glisse mutex_lock(&hmm->lock); 164a3e0d41cSJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 165da4c3c73SJérôme Glisse range->valid = false; 166da4c3c73SJérôme Glisse } 167a3e0d41cSJérôme Glisse wake_up_all(&hmm->wq); 168a3e0d41cSJérôme Glisse mutex_unlock(&hmm->lock); 169e1401513SRalph Campbell 170e1401513SRalph Campbell down_write(&hmm->mirrors_sem); 171e1401513SRalph Campbell mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror, 172e1401513SRalph Campbell list); 173e1401513SRalph Campbell while (mirror) { 174e1401513SRalph Campbell list_del_init(&mirror->list); 175e1401513SRalph Campbell if (mirror->ops->release) { 176e1401513SRalph Campbell /* 177e1401513SRalph Campbell * Drop mirrors_sem so callback can wait on any pending 178e1401513SRalph Campbell * work that might itself trigger mmu_notifier callback 179e1401513SRalph Campbell * and thus would deadlock with us. 180e1401513SRalph Campbell */ 181e1401513SRalph Campbell up_write(&hmm->mirrors_sem); 182e1401513SRalph Campbell mirror->ops->release(mirror); 183e1401513SRalph Campbell down_write(&hmm->mirrors_sem); 184e1401513SRalph Campbell } 185e1401513SRalph Campbell mirror = list_first_entry_or_null(&hmm->mirrors, 186e1401513SRalph Campbell struct hmm_mirror, list); 187e1401513SRalph Campbell } 188e1401513SRalph Campbell up_write(&hmm->mirrors_sem); 189704f3f2cSJérôme Glisse 190704f3f2cSJérôme Glisse hmm_put(hmm); 191e1401513SRalph Campbell } 192e1401513SRalph Campbell 19393065ac7SMichal Hocko static int hmm_invalidate_range_start(struct mmu_notifier *mn, 194a3e0d41cSJérôme Glisse const struct mmu_notifier_range *nrange) 195c0b12405SJérôme Glisse { 196a3e0d41cSJérôme Glisse struct hmm *hmm = mm_get_hmm(nrange->mm); 197a3e0d41cSJérôme Glisse struct hmm_mirror *mirror; 198ec131b2dSJérôme Glisse struct hmm_update update; 199a3e0d41cSJérôme Glisse struct hmm_range *range; 200a3e0d41cSJérôme Glisse int ret = 0; 201c0b12405SJérôme Glisse 202c0b12405SJérôme Glisse VM_BUG_ON(!hmm); 203c0b12405SJérôme Glisse 204a3e0d41cSJérôme Glisse update.start = nrange->start; 205a3e0d41cSJérôme Glisse update.end = nrange->end; 206ec131b2dSJérôme Glisse update.event = HMM_UPDATE_INVALIDATE; 207a3e0d41cSJérôme Glisse update.blockable = nrange->blockable; 208a3e0d41cSJérôme Glisse 209a3e0d41cSJérôme Glisse if (nrange->blockable) 210a3e0d41cSJérôme Glisse mutex_lock(&hmm->lock); 211a3e0d41cSJérôme Glisse else if (!mutex_trylock(&hmm->lock)) { 212a3e0d41cSJérôme Glisse ret = -EAGAIN; 213a3e0d41cSJérôme Glisse goto out; 214a3e0d41cSJérôme Glisse } 215a3e0d41cSJérôme Glisse hmm->notifiers++; 216a3e0d41cSJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 217a3e0d41cSJérôme Glisse if (update.end < range->start || update.start >= range->end) 218a3e0d41cSJérôme Glisse continue; 219a3e0d41cSJérôme Glisse 220a3e0d41cSJérôme Glisse range->valid = false; 221a3e0d41cSJérôme Glisse } 222a3e0d41cSJérôme Glisse mutex_unlock(&hmm->lock); 223a3e0d41cSJérôme Glisse 224a3e0d41cSJérôme Glisse if (nrange->blockable) 225a3e0d41cSJérôme Glisse down_read(&hmm->mirrors_sem); 226a3e0d41cSJérôme Glisse else if (!down_read_trylock(&hmm->mirrors_sem)) { 227a3e0d41cSJérôme Glisse ret = -EAGAIN; 228a3e0d41cSJérôme Glisse goto out; 229a3e0d41cSJérôme Glisse } 230a3e0d41cSJérôme Glisse list_for_each_entry(mirror, &hmm->mirrors, list) { 231a3e0d41cSJérôme Glisse int ret; 232a3e0d41cSJérôme Glisse 233a3e0d41cSJérôme Glisse ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update); 234a3e0d41cSJérôme Glisse if (!update.blockable && ret == -EAGAIN) { 235a3e0d41cSJérôme Glisse up_read(&hmm->mirrors_sem); 236a3e0d41cSJérôme Glisse ret = -EAGAIN; 237a3e0d41cSJérôme Glisse goto out; 238a3e0d41cSJérôme Glisse } 239a3e0d41cSJérôme Glisse } 240a3e0d41cSJérôme Glisse up_read(&hmm->mirrors_sem); 241a3e0d41cSJérôme Glisse 242a3e0d41cSJérôme Glisse out: 243704f3f2cSJérôme Glisse hmm_put(hmm); 244704f3f2cSJérôme Glisse return ret; 245c0b12405SJérôme Glisse } 246c0b12405SJérôme Glisse 247c0b12405SJérôme Glisse static void hmm_invalidate_range_end(struct mmu_notifier *mn, 248a3e0d41cSJérôme Glisse const struct mmu_notifier_range *nrange) 249c0b12405SJérôme Glisse { 250a3e0d41cSJérôme Glisse struct hmm *hmm = mm_get_hmm(nrange->mm); 251c0b12405SJérôme Glisse 252c0b12405SJérôme Glisse VM_BUG_ON(!hmm); 253c0b12405SJérôme Glisse 254a3e0d41cSJérôme Glisse mutex_lock(&hmm->lock); 255a3e0d41cSJérôme Glisse hmm->notifiers--; 256a3e0d41cSJérôme Glisse if (!hmm->notifiers) { 257a3e0d41cSJérôme Glisse struct hmm_range *range; 258a3e0d41cSJérôme Glisse 259a3e0d41cSJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 260a3e0d41cSJérôme Glisse if (range->valid) 261a3e0d41cSJérôme Glisse continue; 262a3e0d41cSJérôme Glisse range->valid = true; 263a3e0d41cSJérôme Glisse } 264a3e0d41cSJérôme Glisse wake_up_all(&hmm->wq); 265a3e0d41cSJérôme Glisse } 266a3e0d41cSJérôme Glisse mutex_unlock(&hmm->lock); 267a3e0d41cSJérôme Glisse 268704f3f2cSJérôme Glisse hmm_put(hmm); 269c0b12405SJérôme Glisse } 270c0b12405SJérôme Glisse 271c0b12405SJérôme Glisse static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { 272e1401513SRalph Campbell .release = hmm_release, 273c0b12405SJérôme Glisse .invalidate_range_start = hmm_invalidate_range_start, 274c0b12405SJérôme Glisse .invalidate_range_end = hmm_invalidate_range_end, 275c0b12405SJérôme Glisse }; 276c0b12405SJérôme Glisse 277c0b12405SJérôme Glisse /* 278c0b12405SJérôme Glisse * hmm_mirror_register() - register a mirror against an mm 279c0b12405SJérôme Glisse * 280c0b12405SJérôme Glisse * @mirror: new mirror struct to register 281c0b12405SJérôme Glisse * @mm: mm to register against 282c0b12405SJérôme Glisse * 283c0b12405SJérôme Glisse * To start mirroring a process address space, the device driver must register 284c0b12405SJérôme Glisse * an HMM mirror struct. 285c0b12405SJérôme Glisse * 286c0b12405SJérôme Glisse * THE mm->mmap_sem MUST BE HELD IN WRITE MODE ! 287c0b12405SJérôme Glisse */ 288c0b12405SJérôme Glisse int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) 289c0b12405SJérôme Glisse { 290c0b12405SJérôme Glisse /* Sanity check */ 291c0b12405SJérôme Glisse if (!mm || !mirror || !mirror->ops) 292c0b12405SJérôme Glisse return -EINVAL; 293c0b12405SJérôme Glisse 294704f3f2cSJérôme Glisse mirror->hmm = hmm_get_or_create(mm); 295c0b12405SJérôme Glisse if (!mirror->hmm) 296c0b12405SJérôme Glisse return -ENOMEM; 297c0b12405SJérôme Glisse 298c0b12405SJérôme Glisse down_write(&mirror->hmm->mirrors_sem); 299c0b12405SJérôme Glisse list_add(&mirror->list, &mirror->hmm->mirrors); 300c0b12405SJérôme Glisse up_write(&mirror->hmm->mirrors_sem); 301c0b12405SJérôme Glisse 302c0b12405SJérôme Glisse return 0; 303c0b12405SJérôme Glisse } 304c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_register); 305c0b12405SJérôme Glisse 306c0b12405SJérôme Glisse /* 307c0b12405SJérôme Glisse * hmm_mirror_unregister() - unregister a mirror 308c0b12405SJérôme Glisse * 309c0b12405SJérôme Glisse * @mirror: new mirror struct to register 310c0b12405SJérôme Glisse * 311c0b12405SJérôme Glisse * Stop mirroring a process address space, and cleanup. 312c0b12405SJérôme Glisse */ 313c0b12405SJérôme Glisse void hmm_mirror_unregister(struct hmm_mirror *mirror) 314c0b12405SJérôme Glisse { 315704f3f2cSJérôme Glisse struct hmm *hmm = READ_ONCE(mirror->hmm); 316c0b12405SJérôme Glisse 317704f3f2cSJérôme Glisse if (hmm == NULL) 318c01cbba2SJérôme Glisse return; 319c01cbba2SJérôme Glisse 320c0b12405SJérôme Glisse down_write(&hmm->mirrors_sem); 321e1401513SRalph Campbell list_del_init(&mirror->list); 322704f3f2cSJérôme Glisse /* To protect us against double unregister ... */ 323c01cbba2SJérôme Glisse mirror->hmm = NULL; 324c0b12405SJérôme Glisse up_write(&hmm->mirrors_sem); 325c01cbba2SJérôme Glisse 326704f3f2cSJérôme Glisse hmm_put(hmm); 327c0b12405SJérôme Glisse } 328c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_unregister); 329da4c3c73SJérôme Glisse 33074eee180SJérôme Glisse struct hmm_vma_walk { 33174eee180SJérôme Glisse struct hmm_range *range; 33274eee180SJérôme Glisse unsigned long last; 33374eee180SJérôme Glisse bool fault; 33474eee180SJérôme Glisse bool block; 33574eee180SJérôme Glisse }; 33674eee180SJérôme Glisse 3372aee09d8SJérôme Glisse static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, 3382aee09d8SJérôme Glisse bool write_fault, uint64_t *pfn) 33974eee180SJérôme Glisse { 34074eee180SJérôme Glisse unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE; 34174eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 342f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 34374eee180SJérôme Glisse struct vm_area_struct *vma = walk->vma; 34450a7ca3cSSouptick Joarder vm_fault_t ret; 34574eee180SJérôme Glisse 34674eee180SJérôme Glisse flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY; 3472aee09d8SJérôme Glisse flags |= write_fault ? FAULT_FLAG_WRITE : 0; 34850a7ca3cSSouptick Joarder ret = handle_mm_fault(vma, addr, flags); 34950a7ca3cSSouptick Joarder if (ret & VM_FAULT_RETRY) 35073231612SJérôme Glisse return -EAGAIN; 35150a7ca3cSSouptick Joarder if (ret & VM_FAULT_ERROR) { 352f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_ERROR]; 35374eee180SJérôme Glisse return -EFAULT; 35474eee180SJérôme Glisse } 35574eee180SJérôme Glisse 35673231612SJérôme Glisse return -EBUSY; 35774eee180SJérôme Glisse } 35874eee180SJérôme Glisse 359da4c3c73SJérôme Glisse static int hmm_pfns_bad(unsigned long addr, 360da4c3c73SJérôme Glisse unsigned long end, 361da4c3c73SJérôme Glisse struct mm_walk *walk) 362da4c3c73SJérôme Glisse { 363c719547fSJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 364c719547fSJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 365ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 366da4c3c73SJérôme Glisse unsigned long i; 367da4c3c73SJérôme Glisse 368da4c3c73SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 369da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, i++) 370f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_ERROR]; 371da4c3c73SJérôme Glisse 372da4c3c73SJérôme Glisse return 0; 373da4c3c73SJérôme Glisse } 374da4c3c73SJérôme Glisse 3755504ed29SJérôme Glisse /* 3765504ed29SJérôme Glisse * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s) 3775504ed29SJérôme Glisse * @start: range virtual start address (inclusive) 3785504ed29SJérôme Glisse * @end: range virtual end address (exclusive) 3792aee09d8SJérôme Glisse * @fault: should we fault or not ? 3802aee09d8SJérôme Glisse * @write_fault: write fault ? 3815504ed29SJérôme Glisse * @walk: mm_walk structure 38273231612SJérôme Glisse * Returns: 0 on success, -EBUSY after page fault, or page fault error 3835504ed29SJérôme Glisse * 3845504ed29SJérôme Glisse * This function will be called whenever pmd_none() or pte_none() returns true, 3855504ed29SJérôme Glisse * or whenever there is no page directory covering the virtual address range. 3865504ed29SJérôme Glisse */ 3872aee09d8SJérôme Glisse static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, 3882aee09d8SJérôme Glisse bool fault, bool write_fault, 389da4c3c73SJérôme Glisse struct mm_walk *walk) 390da4c3c73SJérôme Glisse { 39174eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 39274eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 393ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 394*63d5066fSJérôme Glisse unsigned long i, page_size; 395da4c3c73SJérôme Glisse 39674eee180SJérôme Glisse hmm_vma_walk->last = addr; 397*63d5066fSJérôme Glisse page_size = hmm_range_page_size(range); 398*63d5066fSJérôme Glisse i = (addr - range->start) >> range->page_shift; 399*63d5066fSJérôme Glisse 400*63d5066fSJérôme Glisse for (; addr < end; addr += page_size, i++) { 401f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_NONE]; 4022aee09d8SJérôme Glisse if (fault || write_fault) { 40374eee180SJérôme Glisse int ret; 404da4c3c73SJérôme Glisse 4052aee09d8SJérôme Glisse ret = hmm_vma_do_fault(walk, addr, write_fault, 4062aee09d8SJérôme Glisse &pfns[i]); 40773231612SJérôme Glisse if (ret != -EBUSY) 40874eee180SJérôme Glisse return ret; 40974eee180SJérôme Glisse } 41074eee180SJérôme Glisse } 41174eee180SJérôme Glisse 41273231612SJérôme Glisse return (fault || write_fault) ? -EBUSY : 0; 4132aee09d8SJérôme Glisse } 4142aee09d8SJérôme Glisse 4152aee09d8SJérôme Glisse static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 4162aee09d8SJérôme Glisse uint64_t pfns, uint64_t cpu_flags, 4172aee09d8SJérôme Glisse bool *fault, bool *write_fault) 4182aee09d8SJérôme Glisse { 419f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 420f88a1e90SJérôme Glisse 4212aee09d8SJérôme Glisse if (!hmm_vma_walk->fault) 4222aee09d8SJérôme Glisse return; 4232aee09d8SJérôme Glisse 424023a019aSJérôme Glisse /* 425023a019aSJérôme Glisse * So we not only consider the individual per page request we also 426023a019aSJérôme Glisse * consider the default flags requested for the range. The API can 427023a019aSJérôme Glisse * be use in 2 fashions. The first one where the HMM user coalesce 428023a019aSJérôme Glisse * multiple page fault into one request and set flags per pfns for 429023a019aSJérôme Glisse * of those faults. The second one where the HMM user want to pre- 430023a019aSJérôme Glisse * fault a range with specific flags. For the latter one it is a 431023a019aSJérôme Glisse * waste to have the user pre-fill the pfn arrays with a default 432023a019aSJérôme Glisse * flags value. 433023a019aSJérôme Glisse */ 434023a019aSJérôme Glisse pfns = (pfns & range->pfn_flags_mask) | range->default_flags; 435023a019aSJérôme Glisse 4362aee09d8SJérôme Glisse /* We aren't ask to do anything ... */ 437f88a1e90SJérôme Glisse if (!(pfns & range->flags[HMM_PFN_VALID])) 4382aee09d8SJérôme Glisse return; 439f88a1e90SJérôme Glisse /* If this is device memory than only fault if explicitly requested */ 440f88a1e90SJérôme Glisse if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { 441f88a1e90SJérôme Glisse /* Do we fault on device memory ? */ 442f88a1e90SJérôme Glisse if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { 443f88a1e90SJérôme Glisse *write_fault = pfns & range->flags[HMM_PFN_WRITE]; 444f88a1e90SJérôme Glisse *fault = true; 445f88a1e90SJérôme Glisse } 4462aee09d8SJérôme Glisse return; 4472aee09d8SJérôme Glisse } 448f88a1e90SJérôme Glisse 449f88a1e90SJérôme Glisse /* If CPU page table is not valid then we need to fault */ 450f88a1e90SJérôme Glisse *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); 451f88a1e90SJérôme Glisse /* Need to write fault ? */ 452f88a1e90SJérôme Glisse if ((pfns & range->flags[HMM_PFN_WRITE]) && 453f88a1e90SJérôme Glisse !(cpu_flags & range->flags[HMM_PFN_WRITE])) { 454f88a1e90SJérôme Glisse *write_fault = true; 4552aee09d8SJérôme Glisse *fault = true; 4562aee09d8SJérôme Glisse } 4572aee09d8SJérôme Glisse } 4582aee09d8SJérôme Glisse 4592aee09d8SJérôme Glisse static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 4602aee09d8SJérôme Glisse const uint64_t *pfns, unsigned long npages, 4612aee09d8SJérôme Glisse uint64_t cpu_flags, bool *fault, 4622aee09d8SJérôme Glisse bool *write_fault) 4632aee09d8SJérôme Glisse { 4642aee09d8SJérôme Glisse unsigned long i; 4652aee09d8SJérôme Glisse 4662aee09d8SJérôme Glisse if (!hmm_vma_walk->fault) { 4672aee09d8SJérôme Glisse *fault = *write_fault = false; 4682aee09d8SJérôme Glisse return; 4692aee09d8SJérôme Glisse } 4702aee09d8SJérôme Glisse 471a3e0d41cSJérôme Glisse *fault = *write_fault = false; 4722aee09d8SJérôme Glisse for (i = 0; i < npages; ++i) { 4732aee09d8SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags, 4742aee09d8SJérôme Glisse fault, write_fault); 475a3e0d41cSJérôme Glisse if ((*write_fault)) 4762aee09d8SJérôme Glisse return; 4772aee09d8SJérôme Glisse } 4782aee09d8SJérôme Glisse } 4792aee09d8SJérôme Glisse 4802aee09d8SJérôme Glisse static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, 4812aee09d8SJérôme Glisse struct mm_walk *walk) 4822aee09d8SJérôme Glisse { 4832aee09d8SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 4842aee09d8SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 4852aee09d8SJérôme Glisse bool fault, write_fault; 4862aee09d8SJérôme Glisse unsigned long i, npages; 4872aee09d8SJérôme Glisse uint64_t *pfns; 4882aee09d8SJérôme Glisse 4892aee09d8SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 4902aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 4912aee09d8SJérôme Glisse pfns = &range->pfns[i]; 4922aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 4932aee09d8SJérôme Glisse 0, &fault, &write_fault); 4942aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 4952aee09d8SJérôme Glisse } 4962aee09d8SJérôme Glisse 497f88a1e90SJérôme Glisse static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) 4982aee09d8SJérôme Glisse { 4992aee09d8SJérôme Glisse if (pmd_protnone(pmd)) 5002aee09d8SJérôme Glisse return 0; 501f88a1e90SJérôme Glisse return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | 502f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 503f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 504da4c3c73SJérôme Glisse } 505da4c3c73SJérôme Glisse 50653f5c3f4SJérôme Glisse static int hmm_vma_handle_pmd(struct mm_walk *walk, 50753f5c3f4SJérôme Glisse unsigned long addr, 50853f5c3f4SJérôme Glisse unsigned long end, 50953f5c3f4SJérôme Glisse uint64_t *pfns, 51053f5c3f4SJérôme Glisse pmd_t pmd) 51153f5c3f4SJérôme Glisse { 51253f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 513f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 5142aee09d8SJérôme Glisse unsigned long pfn, npages, i; 5152aee09d8SJérôme Glisse bool fault, write_fault; 516f88a1e90SJérôme Glisse uint64_t cpu_flags; 51753f5c3f4SJérôme Glisse 5182aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 519f88a1e90SJérôme Glisse cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); 5202aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, 5212aee09d8SJérôme Glisse &fault, &write_fault); 52253f5c3f4SJérôme Glisse 5232aee09d8SJérôme Glisse if (pmd_protnone(pmd) || fault || write_fault) 5242aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 52553f5c3f4SJérôme Glisse 52653f5c3f4SJérôme Glisse pfn = pmd_pfn(pmd) + pte_index(addr); 52753f5c3f4SJérôme Glisse for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) 528f88a1e90SJérôme Glisse pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags; 52953f5c3f4SJérôme Glisse hmm_vma_walk->last = end; 53053f5c3f4SJérôme Glisse return 0; 53153f5c3f4SJérôme Glisse } 53253f5c3f4SJérôme Glisse 533f88a1e90SJérôme Glisse static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) 5342aee09d8SJérôme Glisse { 5352aee09d8SJérôme Glisse if (pte_none(pte) || !pte_present(pte)) 5362aee09d8SJérôme Glisse return 0; 537f88a1e90SJérôme Glisse return pte_write(pte) ? range->flags[HMM_PFN_VALID] | 538f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 539f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 5402aee09d8SJérôme Glisse } 5412aee09d8SJérôme Glisse 54253f5c3f4SJérôme Glisse static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, 54353f5c3f4SJérôme Glisse unsigned long end, pmd_t *pmdp, pte_t *ptep, 54453f5c3f4SJérôme Glisse uint64_t *pfn) 54553f5c3f4SJérôme Glisse { 54653f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 547f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 54853f5c3f4SJérôme Glisse struct vm_area_struct *vma = walk->vma; 5492aee09d8SJérôme Glisse bool fault, write_fault; 5502aee09d8SJérôme Glisse uint64_t cpu_flags; 55153f5c3f4SJérôme Glisse pte_t pte = *ptep; 552f88a1e90SJérôme Glisse uint64_t orig_pfn = *pfn; 55353f5c3f4SJérôme Glisse 554f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_NONE]; 55573231612SJérôme Glisse fault = write_fault = false; 55653f5c3f4SJérôme Glisse 55753f5c3f4SJérôme Glisse if (pte_none(pte)) { 55873231612SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, 55973231612SJérôme Glisse &fault, &write_fault); 5602aee09d8SJérôme Glisse if (fault || write_fault) 56153f5c3f4SJérôme Glisse goto fault; 56253f5c3f4SJérôme Glisse return 0; 56353f5c3f4SJérôme Glisse } 56453f5c3f4SJérôme Glisse 56553f5c3f4SJérôme Glisse if (!pte_present(pte)) { 56653f5c3f4SJérôme Glisse swp_entry_t entry = pte_to_swp_entry(pte); 56753f5c3f4SJérôme Glisse 56853f5c3f4SJérôme Glisse if (!non_swap_entry(entry)) { 5692aee09d8SJérôme Glisse if (fault || write_fault) 57053f5c3f4SJérôme Glisse goto fault; 57153f5c3f4SJérôme Glisse return 0; 57253f5c3f4SJérôme Glisse } 57353f5c3f4SJérôme Glisse 57453f5c3f4SJérôme Glisse /* 57553f5c3f4SJérôme Glisse * This is a special swap entry, ignore migration, use 57653f5c3f4SJérôme Glisse * device and report anything else as error. 57753f5c3f4SJérôme Glisse */ 57853f5c3f4SJérôme Glisse if (is_device_private_entry(entry)) { 579f88a1e90SJérôme Glisse cpu_flags = range->flags[HMM_PFN_VALID] | 580f88a1e90SJérôme Glisse range->flags[HMM_PFN_DEVICE_PRIVATE]; 5812aee09d8SJérôme Glisse cpu_flags |= is_write_device_private_entry(entry) ? 582f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 0; 583f88a1e90SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 584f88a1e90SJérôme Glisse &fault, &write_fault); 585f88a1e90SJérôme Glisse if (fault || write_fault) 586f88a1e90SJérôme Glisse goto fault; 587f88a1e90SJérôme Glisse *pfn = hmm_pfn_from_pfn(range, swp_offset(entry)); 588f88a1e90SJérôme Glisse *pfn |= cpu_flags; 58953f5c3f4SJérôme Glisse return 0; 59053f5c3f4SJérôme Glisse } 59153f5c3f4SJérôme Glisse 59253f5c3f4SJérôme Glisse if (is_migration_entry(entry)) { 5932aee09d8SJérôme Glisse if (fault || write_fault) { 59453f5c3f4SJérôme Glisse pte_unmap(ptep); 59553f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 59653f5c3f4SJérôme Glisse migration_entry_wait(vma->vm_mm, 59753f5c3f4SJérôme Glisse pmdp, addr); 59873231612SJérôme Glisse return -EBUSY; 59953f5c3f4SJérôme Glisse } 60053f5c3f4SJérôme Glisse return 0; 60153f5c3f4SJérôme Glisse } 60253f5c3f4SJérôme Glisse 60353f5c3f4SJérôme Glisse /* Report error for everything else */ 604f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_ERROR]; 60553f5c3f4SJérôme Glisse return -EFAULT; 60673231612SJérôme Glisse } else { 60773231612SJérôme Glisse cpu_flags = pte_to_hmm_pfn_flags(range, pte); 60873231612SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 60973231612SJérôme Glisse &fault, &write_fault); 61053f5c3f4SJérôme Glisse } 61153f5c3f4SJérôme Glisse 6122aee09d8SJérôme Glisse if (fault || write_fault) 61353f5c3f4SJérôme Glisse goto fault; 61453f5c3f4SJérôme Glisse 615f88a1e90SJérôme Glisse *pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags; 61653f5c3f4SJérôme Glisse return 0; 61753f5c3f4SJérôme Glisse 61853f5c3f4SJérôme Glisse fault: 61953f5c3f4SJérôme Glisse pte_unmap(ptep); 62053f5c3f4SJérôme Glisse /* Fault any virtual address we were asked to fault */ 6212aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 62253f5c3f4SJérôme Glisse } 62353f5c3f4SJérôme Glisse 624da4c3c73SJérôme Glisse static int hmm_vma_walk_pmd(pmd_t *pmdp, 625da4c3c73SJérôme Glisse unsigned long start, 626da4c3c73SJérôme Glisse unsigned long end, 627da4c3c73SJérôme Glisse struct mm_walk *walk) 628da4c3c73SJérôme Glisse { 62974eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 63074eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 631d08faca0SJérôme Glisse struct vm_area_struct *vma = walk->vma; 632ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 633da4c3c73SJérôme Glisse unsigned long addr = start, i; 634da4c3c73SJérôme Glisse pte_t *ptep; 635da4c3c73SJérôme Glisse pmd_t pmd; 636da4c3c73SJérôme Glisse 637d08faca0SJérôme Glisse 638d08faca0SJérôme Glisse again: 639d08faca0SJérôme Glisse pmd = READ_ONCE(*pmdp); 640d08faca0SJérôme Glisse if (pmd_none(pmd)) 641d08faca0SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 642d08faca0SJérôme Glisse 643d08faca0SJérôme Glisse if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB)) 644d08faca0SJérôme Glisse return hmm_pfns_bad(start, end, walk); 645d08faca0SJérôme Glisse 646d08faca0SJérôme Glisse if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { 647d08faca0SJérôme Glisse bool fault, write_fault; 648d08faca0SJérôme Glisse unsigned long npages; 649d08faca0SJérôme Glisse uint64_t *pfns; 650d08faca0SJérôme Glisse 651d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 652d08faca0SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 653d08faca0SJérôme Glisse pfns = &range->pfns[i]; 654d08faca0SJérôme Glisse 655d08faca0SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 656d08faca0SJérôme Glisse 0, &fault, &write_fault); 657d08faca0SJérôme Glisse if (fault || write_fault) { 658d08faca0SJérôme Glisse hmm_vma_walk->last = addr; 659d08faca0SJérôme Glisse pmd_migration_entry_wait(vma->vm_mm, pmdp); 66073231612SJérôme Glisse return -EBUSY; 661d08faca0SJérôme Glisse } 662d08faca0SJérôme Glisse return 0; 663d08faca0SJérôme Glisse } else if (!pmd_present(pmd)) 664d08faca0SJérôme Glisse return hmm_pfns_bad(start, end, walk); 665d08faca0SJérôme Glisse 666d08faca0SJérôme Glisse if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { 667da4c3c73SJérôme Glisse /* 668da4c3c73SJérôme Glisse * No need to take pmd_lock here, even if some other threads 669da4c3c73SJérôme Glisse * is splitting the huge pmd we will get that event through 670da4c3c73SJérôme Glisse * mmu_notifier callback. 671da4c3c73SJérôme Glisse * 672da4c3c73SJérôme Glisse * So just read pmd value and check again its a transparent 673da4c3c73SJérôme Glisse * huge or device mapping one and compute corresponding pfn 674da4c3c73SJérôme Glisse * values. 675da4c3c73SJérôme Glisse */ 676da4c3c73SJérôme Glisse pmd = pmd_read_atomic(pmdp); 677da4c3c73SJérôme Glisse barrier(); 678da4c3c73SJérôme Glisse if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) 679da4c3c73SJérôme Glisse goto again; 680da4c3c73SJérôme Glisse 681d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 68253f5c3f4SJérôme Glisse return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd); 683da4c3c73SJérôme Glisse } 684da4c3c73SJérôme Glisse 685d08faca0SJérôme Glisse /* 686d08faca0SJérôme Glisse * We have handled all the valid case above ie either none, migration, 687d08faca0SJérôme Glisse * huge or transparent huge. At this point either it is a valid pmd 688d08faca0SJérôme Glisse * entry pointing to pte directory or it is a bad pmd that will not 689d08faca0SJérôme Glisse * recover. 690d08faca0SJérôme Glisse */ 691d08faca0SJérôme Glisse if (pmd_bad(pmd)) 692da4c3c73SJérôme Glisse return hmm_pfns_bad(start, end, walk); 693da4c3c73SJérôme Glisse 694da4c3c73SJérôme Glisse ptep = pte_offset_map(pmdp, addr); 695d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 696da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { 69753f5c3f4SJérôme Glisse int r; 698da4c3c73SJérôme Glisse 69953f5c3f4SJérôme Glisse r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]); 70053f5c3f4SJérôme Glisse if (r) { 70153f5c3f4SJérôme Glisse /* hmm_vma_handle_pte() did unmap pte directory */ 70274eee180SJérôme Glisse hmm_vma_walk->last = addr; 70353f5c3f4SJérôme Glisse return r; 70474eee180SJérôme Glisse } 705da4c3c73SJérôme Glisse } 706da4c3c73SJérôme Glisse pte_unmap(ptep - 1); 707da4c3c73SJérôme Glisse 70853f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 709da4c3c73SJérôme Glisse return 0; 710da4c3c73SJérôme Glisse } 711da4c3c73SJérôme Glisse 712*63d5066fSJérôme Glisse static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, 713*63d5066fSJérôme Glisse unsigned long start, unsigned long end, 714*63d5066fSJérôme Glisse struct mm_walk *walk) 715*63d5066fSJérôme Glisse { 716*63d5066fSJérôme Glisse #ifdef CONFIG_HUGETLB_PAGE 717*63d5066fSJérôme Glisse unsigned long addr = start, i, pfn, mask, size, pfn_inc; 718*63d5066fSJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 719*63d5066fSJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 720*63d5066fSJérôme Glisse struct vm_area_struct *vma = walk->vma; 721*63d5066fSJérôme Glisse struct hstate *h = hstate_vma(vma); 722*63d5066fSJérôme Glisse uint64_t orig_pfn, cpu_flags; 723*63d5066fSJérôme Glisse bool fault, write_fault; 724*63d5066fSJérôme Glisse spinlock_t *ptl; 725*63d5066fSJérôme Glisse pte_t entry; 726*63d5066fSJérôme Glisse int ret = 0; 727*63d5066fSJérôme Glisse 728*63d5066fSJérôme Glisse size = 1UL << huge_page_shift(h); 729*63d5066fSJérôme Glisse mask = size - 1; 730*63d5066fSJérôme Glisse if (range->page_shift != PAGE_SHIFT) { 731*63d5066fSJérôme Glisse /* Make sure we are looking at full page. */ 732*63d5066fSJérôme Glisse if (start & mask) 733*63d5066fSJérôme Glisse return -EINVAL; 734*63d5066fSJérôme Glisse if (end < (start + size)) 735*63d5066fSJérôme Glisse return -EINVAL; 736*63d5066fSJérôme Glisse pfn_inc = size >> PAGE_SHIFT; 737*63d5066fSJérôme Glisse } else { 738*63d5066fSJérôme Glisse pfn_inc = 1; 739*63d5066fSJérôme Glisse size = PAGE_SIZE; 740*63d5066fSJérôme Glisse } 741*63d5066fSJérôme Glisse 742*63d5066fSJérôme Glisse 743*63d5066fSJérôme Glisse ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 744*63d5066fSJérôme Glisse entry = huge_ptep_get(pte); 745*63d5066fSJérôme Glisse 746*63d5066fSJérôme Glisse i = (start - range->start) >> range->page_shift; 747*63d5066fSJérôme Glisse orig_pfn = range->pfns[i]; 748*63d5066fSJérôme Glisse range->pfns[i] = range->values[HMM_PFN_NONE]; 749*63d5066fSJérôme Glisse cpu_flags = pte_to_hmm_pfn_flags(range, entry); 750*63d5066fSJérôme Glisse fault = write_fault = false; 751*63d5066fSJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 752*63d5066fSJérôme Glisse &fault, &write_fault); 753*63d5066fSJérôme Glisse if (fault || write_fault) { 754*63d5066fSJérôme Glisse ret = -ENOENT; 755*63d5066fSJérôme Glisse goto unlock; 756*63d5066fSJérôme Glisse } 757*63d5066fSJérôme Glisse 758*63d5066fSJérôme Glisse pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift); 759*63d5066fSJérôme Glisse for (; addr < end; addr += size, i++, pfn += pfn_inc) 760*63d5066fSJérôme Glisse range->pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags; 761*63d5066fSJérôme Glisse hmm_vma_walk->last = end; 762*63d5066fSJérôme Glisse 763*63d5066fSJérôme Glisse unlock: 764*63d5066fSJérôme Glisse spin_unlock(ptl); 765*63d5066fSJérôme Glisse 766*63d5066fSJérôme Glisse if (ret == -ENOENT) 767*63d5066fSJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 768*63d5066fSJérôme Glisse 769*63d5066fSJérôme Glisse return ret; 770*63d5066fSJérôme Glisse #else /* CONFIG_HUGETLB_PAGE */ 771*63d5066fSJérôme Glisse return -EINVAL; 772*63d5066fSJérôme Glisse #endif 773*63d5066fSJérôme Glisse } 774*63d5066fSJérôme Glisse 775f88a1e90SJérôme Glisse static void hmm_pfns_clear(struct hmm_range *range, 776f88a1e90SJérôme Glisse uint64_t *pfns, 77733cd47dcSJérôme Glisse unsigned long addr, 77833cd47dcSJérôme Glisse unsigned long end) 77933cd47dcSJérôme Glisse { 78033cd47dcSJérôme Glisse for (; addr < end; addr += PAGE_SIZE, pfns++) 781f88a1e90SJérôme Glisse *pfns = range->values[HMM_PFN_NONE]; 78233cd47dcSJérôme Glisse } 78333cd47dcSJérôme Glisse 784855ce7d2SJérôme Glisse static void hmm_pfns_special(struct hmm_range *range) 785855ce7d2SJérôme Glisse { 786855ce7d2SJérôme Glisse unsigned long addr = range->start, i = 0; 787855ce7d2SJérôme Glisse 788855ce7d2SJérôme Glisse for (; addr < range->end; addr += PAGE_SIZE, i++) 789f88a1e90SJérôme Glisse range->pfns[i] = range->values[HMM_PFN_SPECIAL]; 790855ce7d2SJérôme Glisse } 791855ce7d2SJérôme Glisse 792da4c3c73SJérôme Glisse /* 793a3e0d41cSJérôme Glisse * hmm_range_register() - start tracking change to CPU page table over a range 794a3e0d41cSJérôme Glisse * @range: range 795a3e0d41cSJérôme Glisse * @mm: the mm struct for the range of virtual address 796a3e0d41cSJérôme Glisse * @start: start virtual address (inclusive) 797a3e0d41cSJérôme Glisse * @end: end virtual address (exclusive) 798*63d5066fSJérôme Glisse * @page_shift: expect page shift for the range 799a3e0d41cSJérôme Glisse * Returns 0 on success, -EFAULT if the address space is no longer valid 800a3e0d41cSJérôme Glisse * 801a3e0d41cSJérôme Glisse * Track updates to the CPU page table see include/linux/hmm.h 802a3e0d41cSJérôme Glisse */ 803a3e0d41cSJérôme Glisse int hmm_range_register(struct hmm_range *range, 804a3e0d41cSJérôme Glisse struct mm_struct *mm, 805a3e0d41cSJérôme Glisse unsigned long start, 806*63d5066fSJérôme Glisse unsigned long end, 807*63d5066fSJérôme Glisse unsigned page_shift) 808a3e0d41cSJérôme Glisse { 809*63d5066fSJérôme Glisse unsigned long mask = ((1UL << page_shift) - 1UL); 810*63d5066fSJérôme Glisse 811a3e0d41cSJérôme Glisse range->valid = false; 812a3e0d41cSJérôme Glisse range->hmm = NULL; 813a3e0d41cSJérôme Glisse 814*63d5066fSJérôme Glisse if ((start & mask) || (end & mask)) 815*63d5066fSJérôme Glisse return -EINVAL; 816*63d5066fSJérôme Glisse if (start >= end) 817a3e0d41cSJérôme Glisse return -EINVAL; 818a3e0d41cSJérôme Glisse 819*63d5066fSJérôme Glisse range->page_shift = page_shift; 820a3e0d41cSJérôme Glisse range->start = start; 821a3e0d41cSJérôme Glisse range->end = end; 822a3e0d41cSJérôme Glisse 823a3e0d41cSJérôme Glisse range->hmm = hmm_get_or_create(mm); 824a3e0d41cSJérôme Glisse if (!range->hmm) 825a3e0d41cSJérôme Glisse return -EFAULT; 826a3e0d41cSJérôme Glisse 827a3e0d41cSJérôme Glisse /* Check if hmm_mm_destroy() was call. */ 828a3e0d41cSJérôme Glisse if (range->hmm->mm == NULL || range->hmm->dead) { 829a3e0d41cSJérôme Glisse hmm_put(range->hmm); 830a3e0d41cSJérôme Glisse return -EFAULT; 831a3e0d41cSJérôme Glisse } 832a3e0d41cSJérôme Glisse 833a3e0d41cSJérôme Glisse /* Initialize range to track CPU page table update */ 834a3e0d41cSJérôme Glisse mutex_lock(&range->hmm->lock); 835a3e0d41cSJérôme Glisse 836a3e0d41cSJérôme Glisse list_add_rcu(&range->list, &range->hmm->ranges); 837a3e0d41cSJérôme Glisse 838a3e0d41cSJérôme Glisse /* 839a3e0d41cSJérôme Glisse * If there are any concurrent notifiers we have to wait for them for 840a3e0d41cSJérôme Glisse * the range to be valid (see hmm_range_wait_until_valid()). 841a3e0d41cSJérôme Glisse */ 842a3e0d41cSJérôme Glisse if (!range->hmm->notifiers) 843a3e0d41cSJérôme Glisse range->valid = true; 844a3e0d41cSJérôme Glisse mutex_unlock(&range->hmm->lock); 845a3e0d41cSJérôme Glisse 846a3e0d41cSJérôme Glisse return 0; 847a3e0d41cSJérôme Glisse } 848a3e0d41cSJérôme Glisse EXPORT_SYMBOL(hmm_range_register); 849a3e0d41cSJérôme Glisse 850a3e0d41cSJérôme Glisse /* 851a3e0d41cSJérôme Glisse * hmm_range_unregister() - stop tracking change to CPU page table over a range 852a3e0d41cSJérôme Glisse * @range: range 853a3e0d41cSJérôme Glisse * 854a3e0d41cSJérôme Glisse * Range struct is used to track updates to the CPU page table after a call to 855a3e0d41cSJérôme Glisse * hmm_range_register(). See include/linux/hmm.h for how to use it. 856a3e0d41cSJérôme Glisse */ 857a3e0d41cSJérôme Glisse void hmm_range_unregister(struct hmm_range *range) 858a3e0d41cSJérôme Glisse { 859a3e0d41cSJérôme Glisse /* Sanity check this really should not happen. */ 860a3e0d41cSJérôme Glisse if (range->hmm == NULL || range->end <= range->start) 861a3e0d41cSJérôme Glisse return; 862a3e0d41cSJérôme Glisse 863a3e0d41cSJérôme Glisse mutex_lock(&range->hmm->lock); 864a3e0d41cSJérôme Glisse list_del_rcu(&range->list); 865a3e0d41cSJérôme Glisse mutex_unlock(&range->hmm->lock); 866a3e0d41cSJérôme Glisse 867a3e0d41cSJérôme Glisse /* Drop reference taken by hmm_range_register() */ 868a3e0d41cSJérôme Glisse range->valid = false; 869a3e0d41cSJérôme Glisse hmm_put(range->hmm); 870a3e0d41cSJérôme Glisse range->hmm = NULL; 871a3e0d41cSJérôme Glisse } 872a3e0d41cSJérôme Glisse EXPORT_SYMBOL(hmm_range_unregister); 873a3e0d41cSJérôme Glisse 874a3e0d41cSJérôme Glisse /* 87525f23a0cSJérôme Glisse * hmm_range_snapshot() - snapshot CPU page table for a range 87625f23a0cSJérôme Glisse * @range: range 877a3e0d41cSJérôme Glisse * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid 878a3e0d41cSJérôme Glisse * permission (for instance asking for write and range is read only), 879a3e0d41cSJérôme Glisse * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid 880a3e0d41cSJérôme Glisse * vma or it is illegal to access that range), number of valid pages 881a3e0d41cSJérôme Glisse * in range->pfns[] (from range start address). 882da4c3c73SJérôme Glisse * 883da4c3c73SJérôme Glisse * This snapshots the CPU page table for a range of virtual addresses. Snapshot 884a3e0d41cSJérôme Glisse * validity is tracked by range struct. See in include/linux/hmm.h for example 885a3e0d41cSJérôme Glisse * on how to use. 886da4c3c73SJérôme Glisse */ 88725f23a0cSJérôme Glisse long hmm_range_snapshot(struct hmm_range *range) 888da4c3c73SJérôme Glisse { 889*63d5066fSJérôme Glisse const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; 890a3e0d41cSJérôme Glisse unsigned long start = range->start, end; 89174eee180SJérôme Glisse struct hmm_vma_walk hmm_vma_walk; 892a3e0d41cSJérôme Glisse struct hmm *hmm = range->hmm; 893a3e0d41cSJérôme Glisse struct vm_area_struct *vma; 894da4c3c73SJérôme Glisse struct mm_walk mm_walk; 895704f3f2cSJérôme Glisse 896704f3f2cSJérôme Glisse /* Check if hmm_mm_destroy() was call. */ 897a3e0d41cSJérôme Glisse if (hmm->mm == NULL || hmm->dead) 898a3e0d41cSJérôme Glisse return -EFAULT; 899da4c3c73SJérôme Glisse 900a3e0d41cSJérôme Glisse do { 901a3e0d41cSJérôme Glisse /* If range is no longer valid force retry. */ 902a3e0d41cSJérôme Glisse if (!range->valid) 903a3e0d41cSJérôme Glisse return -EAGAIN; 904a3e0d41cSJérôme Glisse 905a3e0d41cSJérôme Glisse vma = find_vma(hmm->mm, start); 906*63d5066fSJérôme Glisse if (vma == NULL || (vma->vm_flags & device_vma)) 907a3e0d41cSJérôme Glisse return -EFAULT; 908a3e0d41cSJérôme Glisse 909*63d5066fSJérôme Glisse /* FIXME support dax */ 910*63d5066fSJérôme Glisse if (vma_is_dax(vma)) { 911855ce7d2SJérôme Glisse hmm_pfns_special(range); 912855ce7d2SJérôme Glisse return -EINVAL; 913855ce7d2SJérôme Glisse } 914855ce7d2SJérôme Glisse 915*63d5066fSJérôme Glisse if (is_vm_hugetlb_page(vma)) { 916*63d5066fSJérôme Glisse struct hstate *h = hstate_vma(vma); 917*63d5066fSJérôme Glisse 918*63d5066fSJérôme Glisse if (huge_page_shift(h) != range->page_shift && 919*63d5066fSJérôme Glisse range->page_shift != PAGE_SHIFT) 920*63d5066fSJérôme Glisse return -EINVAL; 921*63d5066fSJérôme Glisse } else { 922*63d5066fSJérôme Glisse if (range->page_shift != PAGE_SHIFT) 923*63d5066fSJérôme Glisse return -EINVAL; 924*63d5066fSJérôme Glisse } 925*63d5066fSJérôme Glisse 92686586a41SJérôme Glisse if (!(vma->vm_flags & VM_READ)) { 92786586a41SJérôme Glisse /* 928a3e0d41cSJérôme Glisse * If vma do not allow read access, then assume that it 929a3e0d41cSJérôme Glisse * does not allow write access, either. HMM does not 930a3e0d41cSJérôme Glisse * support architecture that allow write without read. 93186586a41SJérôme Glisse */ 932a3e0d41cSJérôme Glisse hmm_pfns_clear(range, range->pfns, 933a3e0d41cSJérôme Glisse range->start, range->end); 93486586a41SJérôme Glisse return -EPERM; 93586586a41SJérôme Glisse } 93686586a41SJérôme Glisse 937a3e0d41cSJérôme Glisse range->vma = vma; 938a3e0d41cSJérôme Glisse hmm_vma_walk.last = start; 93974eee180SJérôme Glisse hmm_vma_walk.fault = false; 94074eee180SJérôme Glisse hmm_vma_walk.range = range; 94174eee180SJérôme Glisse mm_walk.private = &hmm_vma_walk; 942a3e0d41cSJérôme Glisse end = min(range->end, vma->vm_end); 94374eee180SJérôme Glisse 944da4c3c73SJérôme Glisse mm_walk.vma = vma; 945da4c3c73SJérôme Glisse mm_walk.mm = vma->vm_mm; 946da4c3c73SJérôme Glisse mm_walk.pte_entry = NULL; 947da4c3c73SJérôme Glisse mm_walk.test_walk = NULL; 948da4c3c73SJérôme Glisse mm_walk.hugetlb_entry = NULL; 949da4c3c73SJérôme Glisse mm_walk.pmd_entry = hmm_vma_walk_pmd; 950da4c3c73SJérôme Glisse mm_walk.pte_hole = hmm_vma_walk_hole; 951*63d5066fSJérôme Glisse mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry; 952da4c3c73SJérôme Glisse 953a3e0d41cSJérôme Glisse walk_page_range(start, end, &mm_walk); 954a3e0d41cSJérôme Glisse start = end; 955a3e0d41cSJérôme Glisse } while (start < range->end); 956a3e0d41cSJérôme Glisse 95725f23a0cSJérôme Glisse return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 958da4c3c73SJérôme Glisse } 95925f23a0cSJérôme Glisse EXPORT_SYMBOL(hmm_range_snapshot); 960da4c3c73SJérôme Glisse 961da4c3c73SJérôme Glisse /* 96273231612SJérôme Glisse * hmm_range_fault() - try to fault some address in a virtual address range 96308232a45SJérôme Glisse * @range: range being faulted 96474eee180SJérôme Glisse * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) 96573231612SJérôme Glisse * Returns: number of valid pages in range->pfns[] (from range start 96673231612SJérôme Glisse * address). This may be zero. If the return value is negative, 96773231612SJérôme Glisse * then one of the following values may be returned: 96873231612SJérôme Glisse * 96973231612SJérôme Glisse * -EINVAL invalid arguments or mm or virtual address are in an 970*63d5066fSJérôme Glisse * invalid vma (for instance device file vma). 97173231612SJérôme Glisse * -ENOMEM: Out of memory. 97273231612SJérôme Glisse * -EPERM: Invalid permission (for instance asking for write and 97373231612SJérôme Glisse * range is read only). 97473231612SJérôme Glisse * -EAGAIN: If you need to retry and mmap_sem was drop. This can only 97573231612SJérôme Glisse * happens if block argument is false. 97673231612SJérôme Glisse * -EBUSY: If the the range is being invalidated and you should wait 97773231612SJérôme Glisse * for invalidation to finish. 97873231612SJérôme Glisse * -EFAULT: Invalid (ie either no valid vma or it is illegal to access 97973231612SJérôme Glisse * that range), number of valid pages in range->pfns[] (from 98073231612SJérôme Glisse * range start address). 98174eee180SJérôme Glisse * 98274eee180SJérôme Glisse * This is similar to a regular CPU page fault except that it will not trigger 98373231612SJérôme Glisse * any memory migration if the memory being faulted is not accessible by CPUs 98473231612SJérôme Glisse * and caller does not ask for migration. 98574eee180SJérôme Glisse * 986ff05c0c6SJérôme Glisse * On error, for one virtual address in the range, the function will mark the 987ff05c0c6SJérôme Glisse * corresponding HMM pfn entry with an error flag. 98874eee180SJérôme Glisse */ 98973231612SJérôme Glisse long hmm_range_fault(struct hmm_range *range, bool block) 99074eee180SJérôme Glisse { 991*63d5066fSJérôme Glisse const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; 992a3e0d41cSJérôme Glisse unsigned long start = range->start, end; 99374eee180SJérôme Glisse struct hmm_vma_walk hmm_vma_walk; 994a3e0d41cSJérôme Glisse struct hmm *hmm = range->hmm; 995a3e0d41cSJérôme Glisse struct vm_area_struct *vma; 99674eee180SJérôme Glisse struct mm_walk mm_walk; 99774eee180SJérôme Glisse int ret; 99874eee180SJérôme Glisse 999704f3f2cSJérôme Glisse /* Check if hmm_mm_destroy() was call. */ 1000a3e0d41cSJérôme Glisse if (hmm->mm == NULL || hmm->dead) 1001a3e0d41cSJérôme Glisse return -EFAULT; 1002a3e0d41cSJérôme Glisse 1003a3e0d41cSJérôme Glisse do { 1004a3e0d41cSJérôme Glisse /* If range is no longer valid force retry. */ 1005a3e0d41cSJérôme Glisse if (!range->valid) { 1006a3e0d41cSJérôme Glisse up_read(&hmm->mm->mmap_sem); 1007a3e0d41cSJérôme Glisse return -EAGAIN; 1008704f3f2cSJérôme Glisse } 100974eee180SJérôme Glisse 1010a3e0d41cSJérôme Glisse vma = find_vma(hmm->mm, start); 1011*63d5066fSJérôme Glisse if (vma == NULL || (vma->vm_flags & device_vma)) 1012a3e0d41cSJérôme Glisse return -EFAULT; 1013a3e0d41cSJérôme Glisse 1014*63d5066fSJérôme Glisse /* FIXME support dax */ 1015*63d5066fSJérôme Glisse if (vma_is_dax(vma)) { 1016855ce7d2SJérôme Glisse hmm_pfns_special(range); 1017855ce7d2SJérôme Glisse return -EINVAL; 1018855ce7d2SJérôme Glisse } 1019855ce7d2SJérôme Glisse 1020*63d5066fSJérôme Glisse if (is_vm_hugetlb_page(vma)) { 1021*63d5066fSJérôme Glisse if (huge_page_shift(hstate_vma(vma)) != 1022*63d5066fSJérôme Glisse range->page_shift && 1023*63d5066fSJérôme Glisse range->page_shift != PAGE_SHIFT) 1024*63d5066fSJérôme Glisse return -EINVAL; 1025*63d5066fSJérôme Glisse } else { 1026*63d5066fSJérôme Glisse if (range->page_shift != PAGE_SHIFT) 1027*63d5066fSJérôme Glisse return -EINVAL; 1028*63d5066fSJérôme Glisse } 1029*63d5066fSJérôme Glisse 103086586a41SJérôme Glisse if (!(vma->vm_flags & VM_READ)) { 103186586a41SJérôme Glisse /* 1032a3e0d41cSJérôme Glisse * If vma do not allow read access, then assume that it 1033a3e0d41cSJérôme Glisse * does not allow write access, either. HMM does not 1034a3e0d41cSJérôme Glisse * support architecture that allow write without read. 103586586a41SJérôme Glisse */ 1036a3e0d41cSJérôme Glisse hmm_pfns_clear(range, range->pfns, 1037a3e0d41cSJérôme Glisse range->start, range->end); 103886586a41SJérôme Glisse return -EPERM; 103986586a41SJérôme Glisse } 104074eee180SJérôme Glisse 1041a3e0d41cSJérôme Glisse range->vma = vma; 1042a3e0d41cSJérôme Glisse hmm_vma_walk.last = start; 104374eee180SJérôme Glisse hmm_vma_walk.fault = true; 104474eee180SJérôme Glisse hmm_vma_walk.block = block; 104574eee180SJérôme Glisse hmm_vma_walk.range = range; 104674eee180SJérôme Glisse mm_walk.private = &hmm_vma_walk; 1047a3e0d41cSJérôme Glisse end = min(range->end, vma->vm_end); 104874eee180SJérôme Glisse 104974eee180SJérôme Glisse mm_walk.vma = vma; 105074eee180SJérôme Glisse mm_walk.mm = vma->vm_mm; 105174eee180SJérôme Glisse mm_walk.pte_entry = NULL; 105274eee180SJérôme Glisse mm_walk.test_walk = NULL; 105374eee180SJérôme Glisse mm_walk.hugetlb_entry = NULL; 105474eee180SJérôme Glisse mm_walk.pmd_entry = hmm_vma_walk_pmd; 105574eee180SJérôme Glisse mm_walk.pte_hole = hmm_vma_walk_hole; 1056*63d5066fSJérôme Glisse mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry; 105774eee180SJérôme Glisse 105874eee180SJérôme Glisse do { 1059a3e0d41cSJérôme Glisse ret = walk_page_range(start, end, &mm_walk); 106074eee180SJérôme Glisse start = hmm_vma_walk.last; 1061a3e0d41cSJérôme Glisse 106273231612SJérôme Glisse /* Keep trying while the range is valid. */ 106373231612SJérôme Glisse } while (ret == -EBUSY && range->valid); 106474eee180SJérôme Glisse 106574eee180SJérôme Glisse if (ret) { 106674eee180SJérôme Glisse unsigned long i; 106774eee180SJérôme Glisse 106874eee180SJérôme Glisse i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 1069a3e0d41cSJérôme Glisse hmm_pfns_clear(range, &range->pfns[i], 1070a3e0d41cSJérôme Glisse hmm_vma_walk.last, range->end); 107173231612SJérôme Glisse return ret; 107274eee180SJérôme Glisse } 1073a3e0d41cSJérôme Glisse start = end; 1074a3e0d41cSJérôme Glisse 1075a3e0d41cSJérôme Glisse } while (start < range->end); 1076704f3f2cSJérôme Glisse 107773231612SJérôme Glisse return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 107874eee180SJérôme Glisse } 107973231612SJérôme Glisse EXPORT_SYMBOL(hmm_range_fault); 1080c0b12405SJérôme Glisse #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ 10814ef589dcSJérôme Glisse 10824ef589dcSJérôme Glisse 1083df6ad698SJérôme Glisse #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) 10844ef589dcSJérôme Glisse struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, 10854ef589dcSJérôme Glisse unsigned long addr) 10864ef589dcSJérôme Glisse { 10874ef589dcSJérôme Glisse struct page *page; 10884ef589dcSJérôme Glisse 10894ef589dcSJérôme Glisse page = alloc_page_vma(GFP_HIGHUSER, vma, addr); 10904ef589dcSJérôme Glisse if (!page) 10914ef589dcSJérôme Glisse return NULL; 10924ef589dcSJérôme Glisse lock_page(page); 10934ef589dcSJérôme Glisse return page; 10944ef589dcSJérôme Glisse } 10954ef589dcSJérôme Glisse EXPORT_SYMBOL(hmm_vma_alloc_locked_page); 10964ef589dcSJérôme Glisse 10974ef589dcSJérôme Glisse 10984ef589dcSJérôme Glisse static void hmm_devmem_ref_release(struct percpu_ref *ref) 10994ef589dcSJérôme Glisse { 11004ef589dcSJérôme Glisse struct hmm_devmem *devmem; 11014ef589dcSJérôme Glisse 11024ef589dcSJérôme Glisse devmem = container_of(ref, struct hmm_devmem, ref); 11034ef589dcSJérôme Glisse complete(&devmem->completion); 11044ef589dcSJérôme Glisse } 11054ef589dcSJérôme Glisse 11064ef589dcSJérôme Glisse static void hmm_devmem_ref_exit(void *data) 11074ef589dcSJérôme Glisse { 11084ef589dcSJérôme Glisse struct percpu_ref *ref = data; 11094ef589dcSJérôme Glisse struct hmm_devmem *devmem; 11104ef589dcSJérôme Glisse 11114ef589dcSJérôme Glisse devmem = container_of(ref, struct hmm_devmem, ref); 1112bbecd94eSDan Williams wait_for_completion(&devmem->completion); 11134ef589dcSJérôme Glisse percpu_ref_exit(ref); 11144ef589dcSJérôme Glisse } 11154ef589dcSJérôme Glisse 1116bbecd94eSDan Williams static void hmm_devmem_ref_kill(struct percpu_ref *ref) 11174ef589dcSJérôme Glisse { 11184ef589dcSJérôme Glisse percpu_ref_kill(ref); 11194ef589dcSJérôme Glisse } 11204ef589dcSJérôme Glisse 1121b57e622eSSouptick Joarder static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma, 11224ef589dcSJérôme Glisse unsigned long addr, 11234ef589dcSJérôme Glisse const struct page *page, 11244ef589dcSJérôme Glisse unsigned int flags, 11254ef589dcSJérôme Glisse pmd_t *pmdp) 11264ef589dcSJérôme Glisse { 11274ef589dcSJérôme Glisse struct hmm_devmem *devmem = page->pgmap->data; 11284ef589dcSJérôme Glisse 11294ef589dcSJérôme Glisse return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp); 11304ef589dcSJérôme Glisse } 11314ef589dcSJérôme Glisse 11324ef589dcSJérôme Glisse static void hmm_devmem_free(struct page *page, void *data) 11334ef589dcSJérôme Glisse { 11344ef589dcSJérôme Glisse struct hmm_devmem *devmem = data; 11354ef589dcSJérôme Glisse 11362fa147bdSDan Williams page->mapping = NULL; 11372fa147bdSDan Williams 11384ef589dcSJérôme Glisse devmem->ops->free(devmem, page); 11394ef589dcSJérôme Glisse } 11404ef589dcSJérôme Glisse 11414ef589dcSJérôme Glisse /* 11424ef589dcSJérôme Glisse * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory 11434ef589dcSJérôme Glisse * 11444ef589dcSJérôme Glisse * @ops: memory event device driver callback (see struct hmm_devmem_ops) 11454ef589dcSJérôme Glisse * @device: device struct to bind the resource too 11464ef589dcSJérôme Glisse * @size: size in bytes of the device memory to add 11474ef589dcSJérôme Glisse * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise 11484ef589dcSJérôme Glisse * 11494ef589dcSJérôme Glisse * This function first finds an empty range of physical address big enough to 11504ef589dcSJérôme Glisse * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which 11514ef589dcSJérôme Glisse * in turn allocates struct pages. It does not do anything beyond that; all 11524ef589dcSJérôme Glisse * events affecting the memory will go through the various callbacks provided 11534ef589dcSJérôme Glisse * by hmm_devmem_ops struct. 11544ef589dcSJérôme Glisse * 11554ef589dcSJérôme Glisse * Device driver should call this function during device initialization and 11564ef589dcSJérôme Glisse * is then responsible of memory management. HMM only provides helpers. 11574ef589dcSJérôme Glisse */ 11584ef589dcSJérôme Glisse struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, 11594ef589dcSJérôme Glisse struct device *device, 11604ef589dcSJérôme Glisse unsigned long size) 11614ef589dcSJérôme Glisse { 11624ef589dcSJérôme Glisse struct hmm_devmem *devmem; 11634ef589dcSJérôme Glisse resource_size_t addr; 1164bbecd94eSDan Williams void *result; 11654ef589dcSJérôme Glisse int ret; 11664ef589dcSJérôme Glisse 1167e7638488SDan Williams dev_pagemap_get_ops(); 11684ef589dcSJérôme Glisse 116958ef15b7SDan Williams devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); 11704ef589dcSJérôme Glisse if (!devmem) 11714ef589dcSJérôme Glisse return ERR_PTR(-ENOMEM); 11724ef589dcSJérôme Glisse 11734ef589dcSJérôme Glisse init_completion(&devmem->completion); 11744ef589dcSJérôme Glisse devmem->pfn_first = -1UL; 11754ef589dcSJérôme Glisse devmem->pfn_last = -1UL; 11764ef589dcSJérôme Glisse devmem->resource = NULL; 11774ef589dcSJérôme Glisse devmem->device = device; 11784ef589dcSJérôme Glisse devmem->ops = ops; 11794ef589dcSJérôme Glisse 11804ef589dcSJérôme Glisse ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, 11814ef589dcSJérôme Glisse 0, GFP_KERNEL); 11824ef589dcSJérôme Glisse if (ret) 118358ef15b7SDan Williams return ERR_PTR(ret); 11844ef589dcSJérôme Glisse 118558ef15b7SDan Williams ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref); 11864ef589dcSJérôme Glisse if (ret) 118758ef15b7SDan Williams return ERR_PTR(ret); 11884ef589dcSJérôme Glisse 11894ef589dcSJérôme Glisse size = ALIGN(size, PA_SECTION_SIZE); 11904ef589dcSJérôme Glisse addr = min((unsigned long)iomem_resource.end, 11914ef589dcSJérôme Glisse (1UL << MAX_PHYSMEM_BITS) - 1); 11924ef589dcSJérôme Glisse addr = addr - size + 1UL; 11934ef589dcSJérôme Glisse 11944ef589dcSJérôme Glisse /* 11954ef589dcSJérôme Glisse * FIXME add a new helper to quickly walk resource tree and find free 11964ef589dcSJérôme Glisse * range 11974ef589dcSJérôme Glisse * 11984ef589dcSJérôme Glisse * FIXME what about ioport_resource resource ? 11994ef589dcSJérôme Glisse */ 12004ef589dcSJérôme Glisse for (; addr > size && addr >= iomem_resource.start; addr -= size) { 12014ef589dcSJérôme Glisse ret = region_intersects(addr, size, 0, IORES_DESC_NONE); 12024ef589dcSJérôme Glisse if (ret != REGION_DISJOINT) 12034ef589dcSJérôme Glisse continue; 12044ef589dcSJérôme Glisse 12054ef589dcSJérôme Glisse devmem->resource = devm_request_mem_region(device, addr, size, 12064ef589dcSJérôme Glisse dev_name(device)); 120758ef15b7SDan Williams if (!devmem->resource) 120858ef15b7SDan Williams return ERR_PTR(-ENOMEM); 12094ef589dcSJérôme Glisse break; 12104ef589dcSJérôme Glisse } 121158ef15b7SDan Williams if (!devmem->resource) 121258ef15b7SDan Williams return ERR_PTR(-ERANGE); 12134ef589dcSJérôme Glisse 12144ef589dcSJérôme Glisse devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; 12154ef589dcSJérôme Glisse devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; 12164ef589dcSJérôme Glisse devmem->pfn_last = devmem->pfn_first + 12174ef589dcSJérôme Glisse (resource_size(devmem->resource) >> PAGE_SHIFT); 1218063a7d1dSDan Williams devmem->page_fault = hmm_devmem_fault; 12194ef589dcSJérôme Glisse 1220bbecd94eSDan Williams devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; 1221bbecd94eSDan Williams devmem->pagemap.res = *devmem->resource; 1222bbecd94eSDan Williams devmem->pagemap.page_free = hmm_devmem_free; 1223bbecd94eSDan Williams devmem->pagemap.altmap_valid = false; 1224bbecd94eSDan Williams devmem->pagemap.ref = &devmem->ref; 1225bbecd94eSDan Williams devmem->pagemap.data = devmem; 1226bbecd94eSDan Williams devmem->pagemap.kill = hmm_devmem_ref_kill; 122758ef15b7SDan Williams 1228bbecd94eSDan Williams result = devm_memremap_pages(devmem->device, &devmem->pagemap); 1229bbecd94eSDan Williams if (IS_ERR(result)) 1230bbecd94eSDan Williams return result; 12314ef589dcSJérôme Glisse return devmem; 12324ef589dcSJérôme Glisse } 123302917e9fSDan Williams EXPORT_SYMBOL_GPL(hmm_devmem_add); 12344ef589dcSJérôme Glisse 1235d3df0a42SJérôme Glisse struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, 1236d3df0a42SJérôme Glisse struct device *device, 1237d3df0a42SJérôme Glisse struct resource *res) 1238d3df0a42SJérôme Glisse { 1239d3df0a42SJérôme Glisse struct hmm_devmem *devmem; 1240bbecd94eSDan Williams void *result; 1241d3df0a42SJérôme Glisse int ret; 1242d3df0a42SJérôme Glisse 1243d3df0a42SJérôme Glisse if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY) 1244d3df0a42SJérôme Glisse return ERR_PTR(-EINVAL); 1245d3df0a42SJérôme Glisse 1246e7638488SDan Williams dev_pagemap_get_ops(); 1247d3df0a42SJérôme Glisse 124858ef15b7SDan Williams devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); 1249d3df0a42SJérôme Glisse if (!devmem) 1250d3df0a42SJérôme Glisse return ERR_PTR(-ENOMEM); 1251d3df0a42SJérôme Glisse 1252d3df0a42SJérôme Glisse init_completion(&devmem->completion); 1253d3df0a42SJérôme Glisse devmem->pfn_first = -1UL; 1254d3df0a42SJérôme Glisse devmem->pfn_last = -1UL; 1255d3df0a42SJérôme Glisse devmem->resource = res; 1256d3df0a42SJérôme Glisse devmem->device = device; 1257d3df0a42SJérôme Glisse devmem->ops = ops; 1258d3df0a42SJérôme Glisse 1259d3df0a42SJérôme Glisse ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, 1260d3df0a42SJérôme Glisse 0, GFP_KERNEL); 1261d3df0a42SJérôme Glisse if (ret) 126258ef15b7SDan Williams return ERR_PTR(ret); 1263d3df0a42SJérôme Glisse 126458ef15b7SDan Williams ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, 126558ef15b7SDan Williams &devmem->ref); 1266d3df0a42SJérôme Glisse if (ret) 126758ef15b7SDan Williams return ERR_PTR(ret); 1268d3df0a42SJérôme Glisse 1269d3df0a42SJérôme Glisse devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; 1270d3df0a42SJérôme Glisse devmem->pfn_last = devmem->pfn_first + 1271d3df0a42SJérôme Glisse (resource_size(devmem->resource) >> PAGE_SHIFT); 1272063a7d1dSDan Williams devmem->page_fault = hmm_devmem_fault; 1273d3df0a42SJérôme Glisse 1274bbecd94eSDan Williams devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; 1275bbecd94eSDan Williams devmem->pagemap.res = *devmem->resource; 1276bbecd94eSDan Williams devmem->pagemap.page_free = hmm_devmem_free; 1277bbecd94eSDan Williams devmem->pagemap.altmap_valid = false; 1278bbecd94eSDan Williams devmem->pagemap.ref = &devmem->ref; 1279bbecd94eSDan Williams devmem->pagemap.data = devmem; 1280bbecd94eSDan Williams devmem->pagemap.kill = hmm_devmem_ref_kill; 128158ef15b7SDan Williams 1282bbecd94eSDan Williams result = devm_memremap_pages(devmem->device, &devmem->pagemap); 1283bbecd94eSDan Williams if (IS_ERR(result)) 1284bbecd94eSDan Williams return result; 1285d3df0a42SJérôme Glisse return devmem; 1286d3df0a42SJérôme Glisse } 128702917e9fSDan Williams EXPORT_SYMBOL_GPL(hmm_devmem_add_resource); 1288d3df0a42SJérôme Glisse 12894ef589dcSJérôme Glisse /* 1290858b54daSJérôme Glisse * A device driver that wants to handle multiple devices memory through a 1291858b54daSJérôme Glisse * single fake device can use hmm_device to do so. This is purely a helper 1292858b54daSJérôme Glisse * and it is not needed to make use of any HMM functionality. 1293858b54daSJérôme Glisse */ 1294858b54daSJérôme Glisse #define HMM_DEVICE_MAX 256 1295858b54daSJérôme Glisse 1296858b54daSJérôme Glisse static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX); 1297858b54daSJérôme Glisse static DEFINE_SPINLOCK(hmm_device_lock); 1298858b54daSJérôme Glisse static struct class *hmm_device_class; 1299858b54daSJérôme Glisse static dev_t hmm_device_devt; 1300858b54daSJérôme Glisse 1301858b54daSJérôme Glisse static void hmm_device_release(struct device *device) 1302858b54daSJérôme Glisse { 1303858b54daSJérôme Glisse struct hmm_device *hmm_device; 1304858b54daSJérôme Glisse 1305858b54daSJérôme Glisse hmm_device = container_of(device, struct hmm_device, device); 1306858b54daSJérôme Glisse spin_lock(&hmm_device_lock); 1307858b54daSJérôme Glisse clear_bit(hmm_device->minor, hmm_device_mask); 1308858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1309858b54daSJérôme Glisse 1310858b54daSJérôme Glisse kfree(hmm_device); 1311858b54daSJérôme Glisse } 1312858b54daSJérôme Glisse 1313858b54daSJérôme Glisse struct hmm_device *hmm_device_new(void *drvdata) 1314858b54daSJérôme Glisse { 1315858b54daSJérôme Glisse struct hmm_device *hmm_device; 1316858b54daSJérôme Glisse 1317858b54daSJérôme Glisse hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL); 1318858b54daSJérôme Glisse if (!hmm_device) 1319858b54daSJérôme Glisse return ERR_PTR(-ENOMEM); 1320858b54daSJérôme Glisse 1321858b54daSJérôme Glisse spin_lock(&hmm_device_lock); 1322858b54daSJérôme Glisse hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX); 1323858b54daSJérôme Glisse if (hmm_device->minor >= HMM_DEVICE_MAX) { 1324858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1325858b54daSJérôme Glisse kfree(hmm_device); 1326858b54daSJérôme Glisse return ERR_PTR(-EBUSY); 1327858b54daSJérôme Glisse } 1328858b54daSJérôme Glisse set_bit(hmm_device->minor, hmm_device_mask); 1329858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1330858b54daSJérôme Glisse 1331858b54daSJérôme Glisse dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor); 1332858b54daSJérôme Glisse hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt), 1333858b54daSJérôme Glisse hmm_device->minor); 1334858b54daSJérôme Glisse hmm_device->device.release = hmm_device_release; 1335858b54daSJérôme Glisse dev_set_drvdata(&hmm_device->device, drvdata); 1336858b54daSJérôme Glisse hmm_device->device.class = hmm_device_class; 1337858b54daSJérôme Glisse device_initialize(&hmm_device->device); 1338858b54daSJérôme Glisse 1339858b54daSJérôme Glisse return hmm_device; 1340858b54daSJérôme Glisse } 1341858b54daSJérôme Glisse EXPORT_SYMBOL(hmm_device_new); 1342858b54daSJérôme Glisse 1343858b54daSJérôme Glisse void hmm_device_put(struct hmm_device *hmm_device) 1344858b54daSJérôme Glisse { 1345858b54daSJérôme Glisse put_device(&hmm_device->device); 1346858b54daSJérôme Glisse } 1347858b54daSJérôme Glisse EXPORT_SYMBOL(hmm_device_put); 1348858b54daSJérôme Glisse 1349858b54daSJérôme Glisse static int __init hmm_init(void) 1350858b54daSJérôme Glisse { 1351858b54daSJérôme Glisse int ret; 1352858b54daSJérôme Glisse 1353858b54daSJérôme Glisse ret = alloc_chrdev_region(&hmm_device_devt, 0, 1354858b54daSJérôme Glisse HMM_DEVICE_MAX, 1355858b54daSJérôme Glisse "hmm_device"); 1356858b54daSJérôme Glisse if (ret) 1357858b54daSJérôme Glisse return ret; 1358858b54daSJérôme Glisse 1359858b54daSJérôme Glisse hmm_device_class = class_create(THIS_MODULE, "hmm_device"); 1360858b54daSJérôme Glisse if (IS_ERR(hmm_device_class)) { 1361858b54daSJérôme Glisse unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX); 1362858b54daSJérôme Glisse return PTR_ERR(hmm_device_class); 1363858b54daSJérôme Glisse } 1364858b54daSJérôme Glisse return 0; 1365858b54daSJérôme Glisse } 1366858b54daSJérôme Glisse 1367858b54daSJérôme Glisse device_initcall(hmm_init); 1368df6ad698SJérôme Glisse #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ 1369