1133ff0eaSJérôme Glisse /* 2133ff0eaSJérôme Glisse * Copyright 2013 Red Hat Inc. 3133ff0eaSJérôme Glisse * 4133ff0eaSJérôme Glisse * This program is free software; you can redistribute it and/or modify 5133ff0eaSJérôme Glisse * it under the terms of the GNU General Public License as published by 6133ff0eaSJérôme Glisse * the Free Software Foundation; either version 2 of the License, or 7133ff0eaSJérôme Glisse * (at your option) any later version. 8133ff0eaSJérôme Glisse * 9133ff0eaSJérôme Glisse * This program is distributed in the hope that it will be useful, 10133ff0eaSJérôme Glisse * but WITHOUT ANY WARRANTY; without even the implied warranty of 11133ff0eaSJérôme Glisse * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12133ff0eaSJérôme Glisse * GNU General Public License for more details. 13133ff0eaSJérôme Glisse * 14f813f219SJérôme Glisse * Authors: Jérôme Glisse <jglisse@redhat.com> 15133ff0eaSJérôme Glisse */ 16133ff0eaSJérôme Glisse /* 17133ff0eaSJérôme Glisse * Refer to include/linux/hmm.h for information about heterogeneous memory 18133ff0eaSJérôme Glisse * management or HMM for short. 19133ff0eaSJérôme Glisse */ 20133ff0eaSJérôme Glisse #include <linux/mm.h> 21133ff0eaSJérôme Glisse #include <linux/hmm.h> 22858b54daSJérôme Glisse #include <linux/init.h> 23da4c3c73SJérôme Glisse #include <linux/rmap.h> 24da4c3c73SJérôme Glisse #include <linux/swap.h> 25133ff0eaSJérôme Glisse #include <linux/slab.h> 26133ff0eaSJérôme Glisse #include <linux/sched.h> 274ef589dcSJérôme Glisse #include <linux/mmzone.h> 284ef589dcSJérôme Glisse #include <linux/pagemap.h> 29da4c3c73SJérôme Glisse #include <linux/swapops.h> 30da4c3c73SJérôme Glisse #include <linux/hugetlb.h> 314ef589dcSJérôme Glisse #include <linux/memremap.h> 327b2d55d2SJérôme Glisse #include <linux/jump_label.h> 33c0b12405SJérôme Glisse #include <linux/mmu_notifier.h> 344ef589dcSJérôme Glisse #include <linux/memory_hotplug.h> 354ef589dcSJérôme Glisse 364ef589dcSJérôme Glisse #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT) 37133ff0eaSJérôme Glisse 386b368cd4SJérôme Glisse #if IS_ENABLED(CONFIG_HMM_MIRROR) 39c0b12405SJérôme Glisse static const struct mmu_notifier_ops hmm_mmu_notifier_ops; 40c0b12405SJérôme Glisse 41704f3f2cSJérôme Glisse static inline struct hmm *mm_get_hmm(struct mm_struct *mm) 42133ff0eaSJérôme Glisse { 43c0b12405SJérôme Glisse struct hmm *hmm = READ_ONCE(mm->hmm); 44704f3f2cSJérôme Glisse 45704f3f2cSJérôme Glisse if (hmm && kref_get_unless_zero(&hmm->kref)) 46704f3f2cSJérôme Glisse return hmm; 47704f3f2cSJérôme Glisse 48704f3f2cSJérôme Glisse return NULL; 49704f3f2cSJérôme Glisse } 50704f3f2cSJérôme Glisse 51704f3f2cSJérôme Glisse /** 52704f3f2cSJérôme Glisse * hmm_get_or_create - register HMM against an mm (HMM internal) 53704f3f2cSJérôme Glisse * 54704f3f2cSJérôme Glisse * @mm: mm struct to attach to 55704f3f2cSJérôme Glisse * Returns: returns an HMM object, either by referencing the existing 56704f3f2cSJérôme Glisse * (per-process) object, or by creating a new one. 57704f3f2cSJérôme Glisse * 58704f3f2cSJérôme Glisse * This is not intended to be used directly by device drivers. If mm already 59704f3f2cSJérôme Glisse * has an HMM struct then it get a reference on it and returns it. Otherwise 60704f3f2cSJérôme Glisse * it allocates an HMM struct, initializes it, associate it with the mm and 61704f3f2cSJérôme Glisse * returns it. 62704f3f2cSJérôme Glisse */ 63704f3f2cSJérôme Glisse static struct hmm *hmm_get_or_create(struct mm_struct *mm) 64704f3f2cSJérôme Glisse { 65704f3f2cSJérôme Glisse struct hmm *hmm = mm_get_hmm(mm); 66c0b12405SJérôme Glisse bool cleanup = false; 67133ff0eaSJérôme Glisse 68c0b12405SJérôme Glisse if (hmm) 69c0b12405SJérôme Glisse return hmm; 70c0b12405SJérôme Glisse 71c0b12405SJérôme Glisse hmm = kmalloc(sizeof(*hmm), GFP_KERNEL); 72c0b12405SJérôme Glisse if (!hmm) 73c0b12405SJérôme Glisse return NULL; 74a3e0d41cSJérôme Glisse init_waitqueue_head(&hmm->wq); 75c0b12405SJérôme Glisse INIT_LIST_HEAD(&hmm->mirrors); 76c0b12405SJérôme Glisse init_rwsem(&hmm->mirrors_sem); 77c0b12405SJérôme Glisse hmm->mmu_notifier.ops = NULL; 78da4c3c73SJérôme Glisse INIT_LIST_HEAD(&hmm->ranges); 79a3e0d41cSJérôme Glisse mutex_init(&hmm->lock); 80704f3f2cSJérôme Glisse kref_init(&hmm->kref); 81a3e0d41cSJérôme Glisse hmm->notifiers = 0; 82a3e0d41cSJérôme Glisse hmm->dead = false; 83c0b12405SJérôme Glisse hmm->mm = mm; 84c0b12405SJérôme Glisse 85c0b12405SJérôme Glisse spin_lock(&mm->page_table_lock); 86c0b12405SJérôme Glisse if (!mm->hmm) 87c0b12405SJérôme Glisse mm->hmm = hmm; 88c0b12405SJérôme Glisse else 89c0b12405SJérôme Glisse cleanup = true; 90c0b12405SJérôme Glisse spin_unlock(&mm->page_table_lock); 91c0b12405SJérôme Glisse 9286a2d598SRalph Campbell if (cleanup) 9386a2d598SRalph Campbell goto error; 9486a2d598SRalph Campbell 9586a2d598SRalph Campbell /* 9686a2d598SRalph Campbell * We should only get here if hold the mmap_sem in write mode ie on 9786a2d598SRalph Campbell * registration of first mirror through hmm_mirror_register() 9886a2d598SRalph Campbell */ 9986a2d598SRalph Campbell hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops; 10086a2d598SRalph Campbell if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) 10186a2d598SRalph Campbell goto error_mm; 102c0b12405SJérôme Glisse 103704f3f2cSJérôme Glisse return hmm; 10486a2d598SRalph Campbell 10586a2d598SRalph Campbell error_mm: 10686a2d598SRalph Campbell spin_lock(&mm->page_table_lock); 10786a2d598SRalph Campbell if (mm->hmm == hmm) 10886a2d598SRalph Campbell mm->hmm = NULL; 10986a2d598SRalph Campbell spin_unlock(&mm->page_table_lock); 11086a2d598SRalph Campbell error: 11186a2d598SRalph Campbell kfree(hmm); 11286a2d598SRalph Campbell return NULL; 113133ff0eaSJérôme Glisse } 114133ff0eaSJérôme Glisse 115704f3f2cSJérôme Glisse static void hmm_free(struct kref *kref) 116704f3f2cSJérôme Glisse { 117704f3f2cSJérôme Glisse struct hmm *hmm = container_of(kref, struct hmm, kref); 118704f3f2cSJérôme Glisse struct mm_struct *mm = hmm->mm; 119704f3f2cSJérôme Glisse 120704f3f2cSJérôme Glisse mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm); 121704f3f2cSJérôme Glisse 122704f3f2cSJérôme Glisse spin_lock(&mm->page_table_lock); 123704f3f2cSJérôme Glisse if (mm->hmm == hmm) 124704f3f2cSJérôme Glisse mm->hmm = NULL; 125704f3f2cSJérôme Glisse spin_unlock(&mm->page_table_lock); 126704f3f2cSJérôme Glisse 127704f3f2cSJérôme Glisse kfree(hmm); 128704f3f2cSJérôme Glisse } 129704f3f2cSJérôme Glisse 130704f3f2cSJérôme Glisse static inline void hmm_put(struct hmm *hmm) 131704f3f2cSJérôme Glisse { 132704f3f2cSJérôme Glisse kref_put(&hmm->kref, hmm_free); 133704f3f2cSJérôme Glisse } 134704f3f2cSJérôme Glisse 135133ff0eaSJérôme Glisse void hmm_mm_destroy(struct mm_struct *mm) 136133ff0eaSJérôme Glisse { 137704f3f2cSJérôme Glisse struct hmm *hmm; 138704f3f2cSJérôme Glisse 139704f3f2cSJérôme Glisse spin_lock(&mm->page_table_lock); 140704f3f2cSJérôme Glisse hmm = mm_get_hmm(mm); 141704f3f2cSJérôme Glisse mm->hmm = NULL; 142704f3f2cSJérôme Glisse if (hmm) { 143704f3f2cSJérôme Glisse hmm->mm = NULL; 144a3e0d41cSJérôme Glisse hmm->dead = true; 145704f3f2cSJérôme Glisse spin_unlock(&mm->page_table_lock); 146704f3f2cSJérôme Glisse hmm_put(hmm); 147704f3f2cSJérôme Glisse return; 148704f3f2cSJérôme Glisse } 149704f3f2cSJérôme Glisse 150704f3f2cSJérôme Glisse spin_unlock(&mm->page_table_lock); 151133ff0eaSJérôme Glisse } 152c0b12405SJérôme Glisse 153a3e0d41cSJérôme Glisse static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) 154c0b12405SJérôme Glisse { 155a3e0d41cSJérôme Glisse struct hmm *hmm = mm_get_hmm(mm); 156c0b12405SJérôme Glisse struct hmm_mirror *mirror; 157da4c3c73SJérôme Glisse struct hmm_range *range; 158da4c3c73SJérôme Glisse 159a3e0d41cSJérôme Glisse /* Report this HMM as dying. */ 160a3e0d41cSJérôme Glisse hmm->dead = true; 161da4c3c73SJérôme Glisse 162a3e0d41cSJérôme Glisse /* Wake-up everyone waiting on any range. */ 163a3e0d41cSJérôme Glisse mutex_lock(&hmm->lock); 164a3e0d41cSJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 165da4c3c73SJérôme Glisse range->valid = false; 166da4c3c73SJérôme Glisse } 167a3e0d41cSJérôme Glisse wake_up_all(&hmm->wq); 168a3e0d41cSJérôme Glisse mutex_unlock(&hmm->lock); 169e1401513SRalph Campbell 170e1401513SRalph Campbell down_write(&hmm->mirrors_sem); 171e1401513SRalph Campbell mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror, 172e1401513SRalph Campbell list); 173e1401513SRalph Campbell while (mirror) { 174e1401513SRalph Campbell list_del_init(&mirror->list); 175e1401513SRalph Campbell if (mirror->ops->release) { 176e1401513SRalph Campbell /* 177e1401513SRalph Campbell * Drop mirrors_sem so callback can wait on any pending 178e1401513SRalph Campbell * work that might itself trigger mmu_notifier callback 179e1401513SRalph Campbell * and thus would deadlock with us. 180e1401513SRalph Campbell */ 181e1401513SRalph Campbell up_write(&hmm->mirrors_sem); 182e1401513SRalph Campbell mirror->ops->release(mirror); 183e1401513SRalph Campbell down_write(&hmm->mirrors_sem); 184e1401513SRalph Campbell } 185e1401513SRalph Campbell mirror = list_first_entry_or_null(&hmm->mirrors, 186e1401513SRalph Campbell struct hmm_mirror, list); 187e1401513SRalph Campbell } 188e1401513SRalph Campbell up_write(&hmm->mirrors_sem); 189704f3f2cSJérôme Glisse 190704f3f2cSJérôme Glisse hmm_put(hmm); 191e1401513SRalph Campbell } 192e1401513SRalph Campbell 19393065ac7SMichal Hocko static int hmm_invalidate_range_start(struct mmu_notifier *mn, 194a3e0d41cSJérôme Glisse const struct mmu_notifier_range *nrange) 195c0b12405SJérôme Glisse { 196a3e0d41cSJérôme Glisse struct hmm *hmm = mm_get_hmm(nrange->mm); 197a3e0d41cSJérôme Glisse struct hmm_mirror *mirror; 198ec131b2dSJérôme Glisse struct hmm_update update; 199a3e0d41cSJérôme Glisse struct hmm_range *range; 200a3e0d41cSJérôme Glisse int ret = 0; 201c0b12405SJérôme Glisse 202c0b12405SJérôme Glisse VM_BUG_ON(!hmm); 203c0b12405SJérôme Glisse 204a3e0d41cSJérôme Glisse update.start = nrange->start; 205a3e0d41cSJérôme Glisse update.end = nrange->end; 206ec131b2dSJérôme Glisse update.event = HMM_UPDATE_INVALIDATE; 207a3e0d41cSJérôme Glisse update.blockable = nrange->blockable; 208a3e0d41cSJérôme Glisse 209a3e0d41cSJérôme Glisse if (nrange->blockable) 210a3e0d41cSJérôme Glisse mutex_lock(&hmm->lock); 211a3e0d41cSJérôme Glisse else if (!mutex_trylock(&hmm->lock)) { 212a3e0d41cSJérôme Glisse ret = -EAGAIN; 213a3e0d41cSJérôme Glisse goto out; 214a3e0d41cSJérôme Glisse } 215a3e0d41cSJérôme Glisse hmm->notifiers++; 216a3e0d41cSJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 217a3e0d41cSJérôme Glisse if (update.end < range->start || update.start >= range->end) 218a3e0d41cSJérôme Glisse continue; 219a3e0d41cSJérôme Glisse 220a3e0d41cSJérôme Glisse range->valid = false; 221a3e0d41cSJérôme Glisse } 222a3e0d41cSJérôme Glisse mutex_unlock(&hmm->lock); 223a3e0d41cSJérôme Glisse 224a3e0d41cSJérôme Glisse if (nrange->blockable) 225a3e0d41cSJérôme Glisse down_read(&hmm->mirrors_sem); 226a3e0d41cSJérôme Glisse else if (!down_read_trylock(&hmm->mirrors_sem)) { 227a3e0d41cSJérôme Glisse ret = -EAGAIN; 228a3e0d41cSJérôme Glisse goto out; 229a3e0d41cSJérôme Glisse } 230a3e0d41cSJérôme Glisse list_for_each_entry(mirror, &hmm->mirrors, list) { 231a3e0d41cSJérôme Glisse int ret; 232a3e0d41cSJérôme Glisse 233a3e0d41cSJérôme Glisse ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update); 234a3e0d41cSJérôme Glisse if (!update.blockable && ret == -EAGAIN) { 235a3e0d41cSJérôme Glisse up_read(&hmm->mirrors_sem); 236a3e0d41cSJérôme Glisse ret = -EAGAIN; 237a3e0d41cSJérôme Glisse goto out; 238a3e0d41cSJérôme Glisse } 239a3e0d41cSJérôme Glisse } 240a3e0d41cSJérôme Glisse up_read(&hmm->mirrors_sem); 241a3e0d41cSJérôme Glisse 242a3e0d41cSJérôme Glisse out: 243704f3f2cSJérôme Glisse hmm_put(hmm); 244704f3f2cSJérôme Glisse return ret; 245c0b12405SJérôme Glisse } 246c0b12405SJérôme Glisse 247c0b12405SJérôme Glisse static void hmm_invalidate_range_end(struct mmu_notifier *mn, 248a3e0d41cSJérôme Glisse const struct mmu_notifier_range *nrange) 249c0b12405SJérôme Glisse { 250a3e0d41cSJérôme Glisse struct hmm *hmm = mm_get_hmm(nrange->mm); 251c0b12405SJérôme Glisse 252c0b12405SJérôme Glisse VM_BUG_ON(!hmm); 253c0b12405SJérôme Glisse 254a3e0d41cSJérôme Glisse mutex_lock(&hmm->lock); 255a3e0d41cSJérôme Glisse hmm->notifiers--; 256a3e0d41cSJérôme Glisse if (!hmm->notifiers) { 257a3e0d41cSJérôme Glisse struct hmm_range *range; 258a3e0d41cSJérôme Glisse 259a3e0d41cSJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 260a3e0d41cSJérôme Glisse if (range->valid) 261a3e0d41cSJérôme Glisse continue; 262a3e0d41cSJérôme Glisse range->valid = true; 263a3e0d41cSJérôme Glisse } 264a3e0d41cSJérôme Glisse wake_up_all(&hmm->wq); 265a3e0d41cSJérôme Glisse } 266a3e0d41cSJérôme Glisse mutex_unlock(&hmm->lock); 267a3e0d41cSJérôme Glisse 268704f3f2cSJérôme Glisse hmm_put(hmm); 269c0b12405SJérôme Glisse } 270c0b12405SJérôme Glisse 271c0b12405SJérôme Glisse static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { 272e1401513SRalph Campbell .release = hmm_release, 273c0b12405SJérôme Glisse .invalidate_range_start = hmm_invalidate_range_start, 274c0b12405SJérôme Glisse .invalidate_range_end = hmm_invalidate_range_end, 275c0b12405SJérôme Glisse }; 276c0b12405SJérôme Glisse 277c0b12405SJérôme Glisse /* 278c0b12405SJérôme Glisse * hmm_mirror_register() - register a mirror against an mm 279c0b12405SJérôme Glisse * 280c0b12405SJérôme Glisse * @mirror: new mirror struct to register 281c0b12405SJérôme Glisse * @mm: mm to register against 282c0b12405SJérôme Glisse * 283c0b12405SJérôme Glisse * To start mirroring a process address space, the device driver must register 284c0b12405SJérôme Glisse * an HMM mirror struct. 285c0b12405SJérôme Glisse * 286c0b12405SJérôme Glisse * THE mm->mmap_sem MUST BE HELD IN WRITE MODE ! 287c0b12405SJérôme Glisse */ 288c0b12405SJérôme Glisse int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) 289c0b12405SJérôme Glisse { 290c0b12405SJérôme Glisse /* Sanity check */ 291c0b12405SJérôme Glisse if (!mm || !mirror || !mirror->ops) 292c0b12405SJérôme Glisse return -EINVAL; 293c0b12405SJérôme Glisse 294704f3f2cSJérôme Glisse mirror->hmm = hmm_get_or_create(mm); 295c0b12405SJérôme Glisse if (!mirror->hmm) 296c0b12405SJérôme Glisse return -ENOMEM; 297c0b12405SJérôme Glisse 298c0b12405SJérôme Glisse down_write(&mirror->hmm->mirrors_sem); 299c0b12405SJérôme Glisse list_add(&mirror->list, &mirror->hmm->mirrors); 300c0b12405SJérôme Glisse up_write(&mirror->hmm->mirrors_sem); 301c0b12405SJérôme Glisse 302c0b12405SJérôme Glisse return 0; 303c0b12405SJérôme Glisse } 304c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_register); 305c0b12405SJérôme Glisse 306c0b12405SJérôme Glisse /* 307c0b12405SJérôme Glisse * hmm_mirror_unregister() - unregister a mirror 308c0b12405SJérôme Glisse * 309c0b12405SJérôme Glisse * @mirror: new mirror struct to register 310c0b12405SJérôme Glisse * 311c0b12405SJérôme Glisse * Stop mirroring a process address space, and cleanup. 312c0b12405SJérôme Glisse */ 313c0b12405SJérôme Glisse void hmm_mirror_unregister(struct hmm_mirror *mirror) 314c0b12405SJérôme Glisse { 315704f3f2cSJérôme Glisse struct hmm *hmm = READ_ONCE(mirror->hmm); 316c0b12405SJérôme Glisse 317704f3f2cSJérôme Glisse if (hmm == NULL) 318c01cbba2SJérôme Glisse return; 319c01cbba2SJérôme Glisse 320c0b12405SJérôme Glisse down_write(&hmm->mirrors_sem); 321e1401513SRalph Campbell list_del_init(&mirror->list); 322704f3f2cSJérôme Glisse /* To protect us against double unregister ... */ 323c01cbba2SJérôme Glisse mirror->hmm = NULL; 324c0b12405SJérôme Glisse up_write(&hmm->mirrors_sem); 325c01cbba2SJérôme Glisse 326704f3f2cSJérôme Glisse hmm_put(hmm); 327c0b12405SJérôme Glisse } 328c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_unregister); 329da4c3c73SJérôme Glisse 33074eee180SJérôme Glisse struct hmm_vma_walk { 33174eee180SJérôme Glisse struct hmm_range *range; 332*992de9a8SJérôme Glisse struct dev_pagemap *pgmap; 33374eee180SJérôme Glisse unsigned long last; 33474eee180SJérôme Glisse bool fault; 33574eee180SJérôme Glisse bool block; 33674eee180SJérôme Glisse }; 33774eee180SJérôme Glisse 3382aee09d8SJérôme Glisse static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, 3392aee09d8SJérôme Glisse bool write_fault, uint64_t *pfn) 34074eee180SJérôme Glisse { 34174eee180SJérôme Glisse unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE; 34274eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 343f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 34474eee180SJérôme Glisse struct vm_area_struct *vma = walk->vma; 34550a7ca3cSSouptick Joarder vm_fault_t ret; 34674eee180SJérôme Glisse 34774eee180SJérôme Glisse flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY; 3482aee09d8SJérôme Glisse flags |= write_fault ? FAULT_FLAG_WRITE : 0; 34950a7ca3cSSouptick Joarder ret = handle_mm_fault(vma, addr, flags); 35050a7ca3cSSouptick Joarder if (ret & VM_FAULT_RETRY) 35173231612SJérôme Glisse return -EAGAIN; 35250a7ca3cSSouptick Joarder if (ret & VM_FAULT_ERROR) { 353f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_ERROR]; 35474eee180SJérôme Glisse return -EFAULT; 35574eee180SJérôme Glisse } 35674eee180SJérôme Glisse 35773231612SJérôme Glisse return -EBUSY; 35874eee180SJérôme Glisse } 35974eee180SJérôme Glisse 360da4c3c73SJérôme Glisse static int hmm_pfns_bad(unsigned long addr, 361da4c3c73SJérôme Glisse unsigned long end, 362da4c3c73SJérôme Glisse struct mm_walk *walk) 363da4c3c73SJérôme Glisse { 364c719547fSJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 365c719547fSJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 366ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 367da4c3c73SJérôme Glisse unsigned long i; 368da4c3c73SJérôme Glisse 369da4c3c73SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 370da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, i++) 371f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_ERROR]; 372da4c3c73SJérôme Glisse 373da4c3c73SJérôme Glisse return 0; 374da4c3c73SJérôme Glisse } 375da4c3c73SJérôme Glisse 3765504ed29SJérôme Glisse /* 3775504ed29SJérôme Glisse * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s) 3785504ed29SJérôme Glisse * @start: range virtual start address (inclusive) 3795504ed29SJérôme Glisse * @end: range virtual end address (exclusive) 3802aee09d8SJérôme Glisse * @fault: should we fault or not ? 3812aee09d8SJérôme Glisse * @write_fault: write fault ? 3825504ed29SJérôme Glisse * @walk: mm_walk structure 38373231612SJérôme Glisse * Returns: 0 on success, -EBUSY after page fault, or page fault error 3845504ed29SJérôme Glisse * 3855504ed29SJérôme Glisse * This function will be called whenever pmd_none() or pte_none() returns true, 3865504ed29SJérôme Glisse * or whenever there is no page directory covering the virtual address range. 3875504ed29SJérôme Glisse */ 3882aee09d8SJérôme Glisse static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, 3892aee09d8SJérôme Glisse bool fault, bool write_fault, 390da4c3c73SJérôme Glisse struct mm_walk *walk) 391da4c3c73SJérôme Glisse { 39274eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 39374eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 394ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 39563d5066fSJérôme Glisse unsigned long i, page_size; 396da4c3c73SJérôme Glisse 39774eee180SJérôme Glisse hmm_vma_walk->last = addr; 39863d5066fSJérôme Glisse page_size = hmm_range_page_size(range); 39963d5066fSJérôme Glisse i = (addr - range->start) >> range->page_shift; 40063d5066fSJérôme Glisse 40163d5066fSJérôme Glisse for (; addr < end; addr += page_size, i++) { 402f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_NONE]; 4032aee09d8SJérôme Glisse if (fault || write_fault) { 40474eee180SJérôme Glisse int ret; 405da4c3c73SJérôme Glisse 4062aee09d8SJérôme Glisse ret = hmm_vma_do_fault(walk, addr, write_fault, 4072aee09d8SJérôme Glisse &pfns[i]); 40873231612SJérôme Glisse if (ret != -EBUSY) 40974eee180SJérôme Glisse return ret; 41074eee180SJérôme Glisse } 41174eee180SJérôme Glisse } 41274eee180SJérôme Glisse 41373231612SJérôme Glisse return (fault || write_fault) ? -EBUSY : 0; 4142aee09d8SJérôme Glisse } 4152aee09d8SJérôme Glisse 4162aee09d8SJérôme Glisse static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 4172aee09d8SJérôme Glisse uint64_t pfns, uint64_t cpu_flags, 4182aee09d8SJérôme Glisse bool *fault, bool *write_fault) 4192aee09d8SJérôme Glisse { 420f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 421f88a1e90SJérôme Glisse 4222aee09d8SJérôme Glisse if (!hmm_vma_walk->fault) 4232aee09d8SJérôme Glisse return; 4242aee09d8SJérôme Glisse 425023a019aSJérôme Glisse /* 426023a019aSJérôme Glisse * So we not only consider the individual per page request we also 427023a019aSJérôme Glisse * consider the default flags requested for the range. The API can 428023a019aSJérôme Glisse * be use in 2 fashions. The first one where the HMM user coalesce 429023a019aSJérôme Glisse * multiple page fault into one request and set flags per pfns for 430023a019aSJérôme Glisse * of those faults. The second one where the HMM user want to pre- 431023a019aSJérôme Glisse * fault a range with specific flags. For the latter one it is a 432023a019aSJérôme Glisse * waste to have the user pre-fill the pfn arrays with a default 433023a019aSJérôme Glisse * flags value. 434023a019aSJérôme Glisse */ 435023a019aSJérôme Glisse pfns = (pfns & range->pfn_flags_mask) | range->default_flags; 436023a019aSJérôme Glisse 4372aee09d8SJérôme Glisse /* We aren't ask to do anything ... */ 438f88a1e90SJérôme Glisse if (!(pfns & range->flags[HMM_PFN_VALID])) 4392aee09d8SJérôme Glisse return; 440f88a1e90SJérôme Glisse /* If this is device memory than only fault if explicitly requested */ 441f88a1e90SJérôme Glisse if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { 442f88a1e90SJérôme Glisse /* Do we fault on device memory ? */ 443f88a1e90SJérôme Glisse if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { 444f88a1e90SJérôme Glisse *write_fault = pfns & range->flags[HMM_PFN_WRITE]; 445f88a1e90SJérôme Glisse *fault = true; 446f88a1e90SJérôme Glisse } 4472aee09d8SJérôme Glisse return; 4482aee09d8SJérôme Glisse } 449f88a1e90SJérôme Glisse 450f88a1e90SJérôme Glisse /* If CPU page table is not valid then we need to fault */ 451f88a1e90SJérôme Glisse *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); 452f88a1e90SJérôme Glisse /* Need to write fault ? */ 453f88a1e90SJérôme Glisse if ((pfns & range->flags[HMM_PFN_WRITE]) && 454f88a1e90SJérôme Glisse !(cpu_flags & range->flags[HMM_PFN_WRITE])) { 455f88a1e90SJérôme Glisse *write_fault = true; 4562aee09d8SJérôme Glisse *fault = true; 4572aee09d8SJérôme Glisse } 4582aee09d8SJérôme Glisse } 4592aee09d8SJérôme Glisse 4602aee09d8SJérôme Glisse static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 4612aee09d8SJérôme Glisse const uint64_t *pfns, unsigned long npages, 4622aee09d8SJérôme Glisse uint64_t cpu_flags, bool *fault, 4632aee09d8SJérôme Glisse bool *write_fault) 4642aee09d8SJérôme Glisse { 4652aee09d8SJérôme Glisse unsigned long i; 4662aee09d8SJérôme Glisse 4672aee09d8SJérôme Glisse if (!hmm_vma_walk->fault) { 4682aee09d8SJérôme Glisse *fault = *write_fault = false; 4692aee09d8SJérôme Glisse return; 4702aee09d8SJérôme Glisse } 4712aee09d8SJérôme Glisse 472a3e0d41cSJérôme Glisse *fault = *write_fault = false; 4732aee09d8SJérôme Glisse for (i = 0; i < npages; ++i) { 4742aee09d8SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags, 4752aee09d8SJérôme Glisse fault, write_fault); 476a3e0d41cSJérôme Glisse if ((*write_fault)) 4772aee09d8SJérôme Glisse return; 4782aee09d8SJérôme Glisse } 4792aee09d8SJérôme Glisse } 4802aee09d8SJérôme Glisse 4812aee09d8SJérôme Glisse static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, 4822aee09d8SJérôme Glisse struct mm_walk *walk) 4832aee09d8SJérôme Glisse { 4842aee09d8SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 4852aee09d8SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 4862aee09d8SJérôme Glisse bool fault, write_fault; 4872aee09d8SJérôme Glisse unsigned long i, npages; 4882aee09d8SJérôme Glisse uint64_t *pfns; 4892aee09d8SJérôme Glisse 4902aee09d8SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 4912aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 4922aee09d8SJérôme Glisse pfns = &range->pfns[i]; 4932aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 4942aee09d8SJérôme Glisse 0, &fault, &write_fault); 4952aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 4962aee09d8SJérôme Glisse } 4972aee09d8SJérôme Glisse 498f88a1e90SJérôme Glisse static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) 4992aee09d8SJérôme Glisse { 5002aee09d8SJérôme Glisse if (pmd_protnone(pmd)) 5012aee09d8SJérôme Glisse return 0; 502f88a1e90SJérôme Glisse return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | 503f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 504f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 505da4c3c73SJérôme Glisse } 506da4c3c73SJérôme Glisse 507*992de9a8SJérôme Glisse static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud) 508*992de9a8SJérôme Glisse { 509*992de9a8SJérôme Glisse if (!pud_present(pud)) 510*992de9a8SJérôme Glisse return 0; 511*992de9a8SJérôme Glisse return pud_write(pud) ? range->flags[HMM_PFN_VALID] | 512*992de9a8SJérôme Glisse range->flags[HMM_PFN_WRITE] : 513*992de9a8SJérôme Glisse range->flags[HMM_PFN_VALID]; 514*992de9a8SJérôme Glisse } 515*992de9a8SJérôme Glisse 51653f5c3f4SJérôme Glisse static int hmm_vma_handle_pmd(struct mm_walk *walk, 51753f5c3f4SJérôme Glisse unsigned long addr, 51853f5c3f4SJérôme Glisse unsigned long end, 51953f5c3f4SJérôme Glisse uint64_t *pfns, 52053f5c3f4SJérôme Glisse pmd_t pmd) 52153f5c3f4SJérôme Glisse { 522*992de9a8SJérôme Glisse #ifdef CONFIG_TRANSPARENT_HUGEPAGE 52353f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 524f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 5252aee09d8SJérôme Glisse unsigned long pfn, npages, i; 5262aee09d8SJérôme Glisse bool fault, write_fault; 527f88a1e90SJérôme Glisse uint64_t cpu_flags; 52853f5c3f4SJérôme Glisse 5292aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 530f88a1e90SJérôme Glisse cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); 5312aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, 5322aee09d8SJérôme Glisse &fault, &write_fault); 53353f5c3f4SJérôme Glisse 5342aee09d8SJérôme Glisse if (pmd_protnone(pmd) || fault || write_fault) 5352aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 53653f5c3f4SJérôme Glisse 53753f5c3f4SJérôme Glisse pfn = pmd_pfn(pmd) + pte_index(addr); 538*992de9a8SJérôme Glisse for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { 539*992de9a8SJérôme Glisse if (pmd_devmap(pmd)) { 540*992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pfn, 541*992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 542*992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 543*992de9a8SJérôme Glisse return -EBUSY; 544*992de9a8SJérôme Glisse } 545f88a1e90SJérôme Glisse pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags; 546*992de9a8SJérôme Glisse } 547*992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 548*992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 549*992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 550*992de9a8SJérôme Glisse } 55153f5c3f4SJérôme Glisse hmm_vma_walk->last = end; 55253f5c3f4SJérôme Glisse return 0; 553*992de9a8SJérôme Glisse #else 554*992de9a8SJérôme Glisse /* If THP is not enabled then we should never reach that code ! */ 555*992de9a8SJérôme Glisse return -EINVAL; 556*992de9a8SJérôme Glisse #endif 55753f5c3f4SJérôme Glisse } 55853f5c3f4SJérôme Glisse 559f88a1e90SJérôme Glisse static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) 5602aee09d8SJérôme Glisse { 5612aee09d8SJérôme Glisse if (pte_none(pte) || !pte_present(pte)) 5622aee09d8SJérôme Glisse return 0; 563f88a1e90SJérôme Glisse return pte_write(pte) ? range->flags[HMM_PFN_VALID] | 564f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 565f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 5662aee09d8SJérôme Glisse } 5672aee09d8SJérôme Glisse 56853f5c3f4SJérôme Glisse static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, 56953f5c3f4SJérôme Glisse unsigned long end, pmd_t *pmdp, pte_t *ptep, 57053f5c3f4SJérôme Glisse uint64_t *pfn) 57153f5c3f4SJérôme Glisse { 57253f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 573f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 57453f5c3f4SJérôme Glisse struct vm_area_struct *vma = walk->vma; 5752aee09d8SJérôme Glisse bool fault, write_fault; 5762aee09d8SJérôme Glisse uint64_t cpu_flags; 57753f5c3f4SJérôme Glisse pte_t pte = *ptep; 578f88a1e90SJérôme Glisse uint64_t orig_pfn = *pfn; 57953f5c3f4SJérôme Glisse 580f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_NONE]; 58173231612SJérôme Glisse fault = write_fault = false; 58253f5c3f4SJérôme Glisse 58353f5c3f4SJérôme Glisse if (pte_none(pte)) { 58473231612SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, 58573231612SJérôme Glisse &fault, &write_fault); 5862aee09d8SJérôme Glisse if (fault || write_fault) 58753f5c3f4SJérôme Glisse goto fault; 58853f5c3f4SJérôme Glisse return 0; 58953f5c3f4SJérôme Glisse } 59053f5c3f4SJérôme Glisse 59153f5c3f4SJérôme Glisse if (!pte_present(pte)) { 59253f5c3f4SJérôme Glisse swp_entry_t entry = pte_to_swp_entry(pte); 59353f5c3f4SJérôme Glisse 59453f5c3f4SJérôme Glisse if (!non_swap_entry(entry)) { 5952aee09d8SJérôme Glisse if (fault || write_fault) 59653f5c3f4SJérôme Glisse goto fault; 59753f5c3f4SJérôme Glisse return 0; 59853f5c3f4SJérôme Glisse } 59953f5c3f4SJérôme Glisse 60053f5c3f4SJérôme Glisse /* 60153f5c3f4SJérôme Glisse * This is a special swap entry, ignore migration, use 60253f5c3f4SJérôme Glisse * device and report anything else as error. 60353f5c3f4SJérôme Glisse */ 60453f5c3f4SJérôme Glisse if (is_device_private_entry(entry)) { 605f88a1e90SJérôme Glisse cpu_flags = range->flags[HMM_PFN_VALID] | 606f88a1e90SJérôme Glisse range->flags[HMM_PFN_DEVICE_PRIVATE]; 6072aee09d8SJérôme Glisse cpu_flags |= is_write_device_private_entry(entry) ? 608f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 0; 609f88a1e90SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 610f88a1e90SJérôme Glisse &fault, &write_fault); 611f88a1e90SJérôme Glisse if (fault || write_fault) 612f88a1e90SJérôme Glisse goto fault; 613f88a1e90SJérôme Glisse *pfn = hmm_pfn_from_pfn(range, swp_offset(entry)); 614f88a1e90SJérôme Glisse *pfn |= cpu_flags; 61553f5c3f4SJérôme Glisse return 0; 61653f5c3f4SJérôme Glisse } 61753f5c3f4SJérôme Glisse 61853f5c3f4SJérôme Glisse if (is_migration_entry(entry)) { 6192aee09d8SJérôme Glisse if (fault || write_fault) { 62053f5c3f4SJérôme Glisse pte_unmap(ptep); 62153f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 62253f5c3f4SJérôme Glisse migration_entry_wait(vma->vm_mm, 62353f5c3f4SJérôme Glisse pmdp, addr); 62473231612SJérôme Glisse return -EBUSY; 62553f5c3f4SJérôme Glisse } 62653f5c3f4SJérôme Glisse return 0; 62753f5c3f4SJérôme Glisse } 62853f5c3f4SJérôme Glisse 62953f5c3f4SJérôme Glisse /* Report error for everything else */ 630f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_ERROR]; 63153f5c3f4SJérôme Glisse return -EFAULT; 63273231612SJérôme Glisse } else { 63373231612SJérôme Glisse cpu_flags = pte_to_hmm_pfn_flags(range, pte); 63473231612SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 63573231612SJérôme Glisse &fault, &write_fault); 63653f5c3f4SJérôme Glisse } 63753f5c3f4SJérôme Glisse 6382aee09d8SJérôme Glisse if (fault || write_fault) 63953f5c3f4SJérôme Glisse goto fault; 64053f5c3f4SJérôme Glisse 641*992de9a8SJérôme Glisse if (pte_devmap(pte)) { 642*992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte), 643*992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 644*992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 645*992de9a8SJérôme Glisse return -EBUSY; 646*992de9a8SJérôme Glisse } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) { 647*992de9a8SJérôme Glisse *pfn = range->values[HMM_PFN_SPECIAL]; 648*992de9a8SJérôme Glisse return -EFAULT; 649*992de9a8SJérôme Glisse } 650*992de9a8SJérôme Glisse 651f88a1e90SJérôme Glisse *pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags; 65253f5c3f4SJérôme Glisse return 0; 65353f5c3f4SJérôme Glisse 65453f5c3f4SJérôme Glisse fault: 655*992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 656*992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 657*992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 658*992de9a8SJérôme Glisse } 65953f5c3f4SJérôme Glisse pte_unmap(ptep); 66053f5c3f4SJérôme Glisse /* Fault any virtual address we were asked to fault */ 6612aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 66253f5c3f4SJérôme Glisse } 66353f5c3f4SJérôme Glisse 664da4c3c73SJérôme Glisse static int hmm_vma_walk_pmd(pmd_t *pmdp, 665da4c3c73SJérôme Glisse unsigned long start, 666da4c3c73SJérôme Glisse unsigned long end, 667da4c3c73SJérôme Glisse struct mm_walk *walk) 668da4c3c73SJérôme Glisse { 66974eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 67074eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 671d08faca0SJérôme Glisse struct vm_area_struct *vma = walk->vma; 672ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 673da4c3c73SJérôme Glisse unsigned long addr = start, i; 674da4c3c73SJérôme Glisse pte_t *ptep; 675da4c3c73SJérôme Glisse pmd_t pmd; 676da4c3c73SJérôme Glisse 677d08faca0SJérôme Glisse 678d08faca0SJérôme Glisse again: 679d08faca0SJérôme Glisse pmd = READ_ONCE(*pmdp); 680d08faca0SJérôme Glisse if (pmd_none(pmd)) 681d08faca0SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 682d08faca0SJérôme Glisse 683d08faca0SJérôme Glisse if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB)) 684d08faca0SJérôme Glisse return hmm_pfns_bad(start, end, walk); 685d08faca0SJérôme Glisse 686d08faca0SJérôme Glisse if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { 687d08faca0SJérôme Glisse bool fault, write_fault; 688d08faca0SJérôme Glisse unsigned long npages; 689d08faca0SJérôme Glisse uint64_t *pfns; 690d08faca0SJérôme Glisse 691d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 692d08faca0SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 693d08faca0SJérôme Glisse pfns = &range->pfns[i]; 694d08faca0SJérôme Glisse 695d08faca0SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 696d08faca0SJérôme Glisse 0, &fault, &write_fault); 697d08faca0SJérôme Glisse if (fault || write_fault) { 698d08faca0SJérôme Glisse hmm_vma_walk->last = addr; 699d08faca0SJérôme Glisse pmd_migration_entry_wait(vma->vm_mm, pmdp); 70073231612SJérôme Glisse return -EBUSY; 701d08faca0SJérôme Glisse } 702d08faca0SJérôme Glisse return 0; 703d08faca0SJérôme Glisse } else if (!pmd_present(pmd)) 704d08faca0SJérôme Glisse return hmm_pfns_bad(start, end, walk); 705d08faca0SJérôme Glisse 706d08faca0SJérôme Glisse if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { 707da4c3c73SJérôme Glisse /* 708da4c3c73SJérôme Glisse * No need to take pmd_lock here, even if some other threads 709da4c3c73SJérôme Glisse * is splitting the huge pmd we will get that event through 710da4c3c73SJérôme Glisse * mmu_notifier callback. 711da4c3c73SJérôme Glisse * 712da4c3c73SJérôme Glisse * So just read pmd value and check again its a transparent 713da4c3c73SJérôme Glisse * huge or device mapping one and compute corresponding pfn 714da4c3c73SJérôme Glisse * values. 715da4c3c73SJérôme Glisse */ 716da4c3c73SJérôme Glisse pmd = pmd_read_atomic(pmdp); 717da4c3c73SJérôme Glisse barrier(); 718da4c3c73SJérôme Glisse if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) 719da4c3c73SJérôme Glisse goto again; 720da4c3c73SJérôme Glisse 721d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 72253f5c3f4SJérôme Glisse return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd); 723da4c3c73SJérôme Glisse } 724da4c3c73SJérôme Glisse 725d08faca0SJérôme Glisse /* 726d08faca0SJérôme Glisse * We have handled all the valid case above ie either none, migration, 727d08faca0SJérôme Glisse * huge or transparent huge. At this point either it is a valid pmd 728d08faca0SJérôme Glisse * entry pointing to pte directory or it is a bad pmd that will not 729d08faca0SJérôme Glisse * recover. 730d08faca0SJérôme Glisse */ 731d08faca0SJérôme Glisse if (pmd_bad(pmd)) 732da4c3c73SJérôme Glisse return hmm_pfns_bad(start, end, walk); 733da4c3c73SJérôme Glisse 734da4c3c73SJérôme Glisse ptep = pte_offset_map(pmdp, addr); 735d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 736da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { 73753f5c3f4SJérôme Glisse int r; 738da4c3c73SJérôme Glisse 73953f5c3f4SJérôme Glisse r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]); 74053f5c3f4SJérôme Glisse if (r) { 74153f5c3f4SJérôme Glisse /* hmm_vma_handle_pte() did unmap pte directory */ 74274eee180SJérôme Glisse hmm_vma_walk->last = addr; 74353f5c3f4SJérôme Glisse return r; 74474eee180SJérôme Glisse } 745da4c3c73SJérôme Glisse } 746*992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 747*992de9a8SJérôme Glisse /* 748*992de9a8SJérôme Glisse * We do put_dev_pagemap() here and not in hmm_vma_handle_pte() 749*992de9a8SJérôme Glisse * so that we can leverage get_dev_pagemap() optimization which 750*992de9a8SJérôme Glisse * will not re-take a reference on a pgmap if we already have 751*992de9a8SJérôme Glisse * one. 752*992de9a8SJérôme Glisse */ 753*992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 754*992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 755*992de9a8SJérôme Glisse } 756da4c3c73SJérôme Glisse pte_unmap(ptep - 1); 757da4c3c73SJérôme Glisse 75853f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 759da4c3c73SJérôme Glisse return 0; 760da4c3c73SJérôme Glisse } 761da4c3c73SJérôme Glisse 762*992de9a8SJérôme Glisse static int hmm_vma_walk_pud(pud_t *pudp, 763*992de9a8SJérôme Glisse unsigned long start, 764*992de9a8SJérôme Glisse unsigned long end, 765*992de9a8SJérôme Glisse struct mm_walk *walk) 766*992de9a8SJérôme Glisse { 767*992de9a8SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 768*992de9a8SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 769*992de9a8SJérôme Glisse unsigned long addr = start, next; 770*992de9a8SJérôme Glisse pmd_t *pmdp; 771*992de9a8SJérôme Glisse pud_t pud; 772*992de9a8SJérôme Glisse int ret; 773*992de9a8SJérôme Glisse 774*992de9a8SJérôme Glisse again: 775*992de9a8SJérôme Glisse pud = READ_ONCE(*pudp); 776*992de9a8SJérôme Glisse if (pud_none(pud)) 777*992de9a8SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 778*992de9a8SJérôme Glisse 779*992de9a8SJérôme Glisse if (pud_huge(pud) && pud_devmap(pud)) { 780*992de9a8SJérôme Glisse unsigned long i, npages, pfn; 781*992de9a8SJérôme Glisse uint64_t *pfns, cpu_flags; 782*992de9a8SJérôme Glisse bool fault, write_fault; 783*992de9a8SJérôme Glisse 784*992de9a8SJérôme Glisse if (!pud_present(pud)) 785*992de9a8SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 786*992de9a8SJérôme Glisse 787*992de9a8SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 788*992de9a8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 789*992de9a8SJérôme Glisse pfns = &range->pfns[i]; 790*992de9a8SJérôme Glisse 791*992de9a8SJérôme Glisse cpu_flags = pud_to_hmm_pfn_flags(range, pud); 792*992de9a8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 793*992de9a8SJérôme Glisse cpu_flags, &fault, &write_fault); 794*992de9a8SJérôme Glisse if (fault || write_fault) 795*992de9a8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, 796*992de9a8SJérôme Glisse write_fault, walk); 797*992de9a8SJérôme Glisse 798*992de9a8SJérôme Glisse #ifdef CONFIG_HUGETLB_PAGE 799*992de9a8SJérôme Glisse pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 800*992de9a8SJérôme Glisse for (i = 0; i < npages; ++i, ++pfn) { 801*992de9a8SJérôme Glisse hmm_vma_walk->pgmap = get_dev_pagemap(pfn, 802*992de9a8SJérôme Glisse hmm_vma_walk->pgmap); 803*992de9a8SJérôme Glisse if (unlikely(!hmm_vma_walk->pgmap)) 804*992de9a8SJérôme Glisse return -EBUSY; 805*992de9a8SJérôme Glisse pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags; 806*992de9a8SJérôme Glisse } 807*992de9a8SJérôme Glisse if (hmm_vma_walk->pgmap) { 808*992de9a8SJérôme Glisse put_dev_pagemap(hmm_vma_walk->pgmap); 809*992de9a8SJérôme Glisse hmm_vma_walk->pgmap = NULL; 810*992de9a8SJérôme Glisse } 811*992de9a8SJérôme Glisse hmm_vma_walk->last = end; 812*992de9a8SJérôme Glisse return 0; 813*992de9a8SJérôme Glisse #else 814*992de9a8SJérôme Glisse return -EINVAL; 815*992de9a8SJérôme Glisse #endif 816*992de9a8SJérôme Glisse } 817*992de9a8SJérôme Glisse 818*992de9a8SJérôme Glisse split_huge_pud(walk->vma, pudp, addr); 819*992de9a8SJérôme Glisse if (pud_none(*pudp)) 820*992de9a8SJérôme Glisse goto again; 821*992de9a8SJérôme Glisse 822*992de9a8SJérôme Glisse pmdp = pmd_offset(pudp, addr); 823*992de9a8SJérôme Glisse do { 824*992de9a8SJérôme Glisse next = pmd_addr_end(addr, end); 825*992de9a8SJérôme Glisse ret = hmm_vma_walk_pmd(pmdp, addr, next, walk); 826*992de9a8SJérôme Glisse if (ret) 827*992de9a8SJérôme Glisse return ret; 828*992de9a8SJérôme Glisse } while (pmdp++, addr = next, addr != end); 829*992de9a8SJérôme Glisse 830*992de9a8SJérôme Glisse return 0; 831*992de9a8SJérôme Glisse } 832*992de9a8SJérôme Glisse 83363d5066fSJérôme Glisse static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, 83463d5066fSJérôme Glisse unsigned long start, unsigned long end, 83563d5066fSJérôme Glisse struct mm_walk *walk) 83663d5066fSJérôme Glisse { 83763d5066fSJérôme Glisse #ifdef CONFIG_HUGETLB_PAGE 83863d5066fSJérôme Glisse unsigned long addr = start, i, pfn, mask, size, pfn_inc; 83963d5066fSJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 84063d5066fSJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 84163d5066fSJérôme Glisse struct vm_area_struct *vma = walk->vma; 84263d5066fSJérôme Glisse struct hstate *h = hstate_vma(vma); 84363d5066fSJérôme Glisse uint64_t orig_pfn, cpu_flags; 84463d5066fSJérôme Glisse bool fault, write_fault; 84563d5066fSJérôme Glisse spinlock_t *ptl; 84663d5066fSJérôme Glisse pte_t entry; 84763d5066fSJérôme Glisse int ret = 0; 84863d5066fSJérôme Glisse 84963d5066fSJérôme Glisse size = 1UL << huge_page_shift(h); 85063d5066fSJérôme Glisse mask = size - 1; 85163d5066fSJérôme Glisse if (range->page_shift != PAGE_SHIFT) { 85263d5066fSJérôme Glisse /* Make sure we are looking at full page. */ 85363d5066fSJérôme Glisse if (start & mask) 85463d5066fSJérôme Glisse return -EINVAL; 85563d5066fSJérôme Glisse if (end < (start + size)) 85663d5066fSJérôme Glisse return -EINVAL; 85763d5066fSJérôme Glisse pfn_inc = size >> PAGE_SHIFT; 85863d5066fSJérôme Glisse } else { 85963d5066fSJérôme Glisse pfn_inc = 1; 86063d5066fSJérôme Glisse size = PAGE_SIZE; 86163d5066fSJérôme Glisse } 86263d5066fSJérôme Glisse 86363d5066fSJérôme Glisse 86463d5066fSJérôme Glisse ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 86563d5066fSJérôme Glisse entry = huge_ptep_get(pte); 86663d5066fSJérôme Glisse 86763d5066fSJérôme Glisse i = (start - range->start) >> range->page_shift; 86863d5066fSJérôme Glisse orig_pfn = range->pfns[i]; 86963d5066fSJérôme Glisse range->pfns[i] = range->values[HMM_PFN_NONE]; 87063d5066fSJérôme Glisse cpu_flags = pte_to_hmm_pfn_flags(range, entry); 87163d5066fSJérôme Glisse fault = write_fault = false; 87263d5066fSJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 87363d5066fSJérôme Glisse &fault, &write_fault); 87463d5066fSJérôme Glisse if (fault || write_fault) { 87563d5066fSJérôme Glisse ret = -ENOENT; 87663d5066fSJérôme Glisse goto unlock; 87763d5066fSJérôme Glisse } 87863d5066fSJérôme Glisse 87963d5066fSJérôme Glisse pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift); 88063d5066fSJérôme Glisse for (; addr < end; addr += size, i++, pfn += pfn_inc) 88163d5066fSJérôme Glisse range->pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags; 88263d5066fSJérôme Glisse hmm_vma_walk->last = end; 88363d5066fSJérôme Glisse 88463d5066fSJérôme Glisse unlock: 88563d5066fSJérôme Glisse spin_unlock(ptl); 88663d5066fSJérôme Glisse 88763d5066fSJérôme Glisse if (ret == -ENOENT) 88863d5066fSJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 88963d5066fSJérôme Glisse 89063d5066fSJérôme Glisse return ret; 89163d5066fSJérôme Glisse #else /* CONFIG_HUGETLB_PAGE */ 89263d5066fSJérôme Glisse return -EINVAL; 89363d5066fSJérôme Glisse #endif 89463d5066fSJérôme Glisse } 89563d5066fSJérôme Glisse 896f88a1e90SJérôme Glisse static void hmm_pfns_clear(struct hmm_range *range, 897f88a1e90SJérôme Glisse uint64_t *pfns, 89833cd47dcSJérôme Glisse unsigned long addr, 89933cd47dcSJérôme Glisse unsigned long end) 90033cd47dcSJérôme Glisse { 90133cd47dcSJérôme Glisse for (; addr < end; addr += PAGE_SIZE, pfns++) 902f88a1e90SJérôme Glisse *pfns = range->values[HMM_PFN_NONE]; 90333cd47dcSJérôme Glisse } 90433cd47dcSJérôme Glisse 905da4c3c73SJérôme Glisse /* 906a3e0d41cSJérôme Glisse * hmm_range_register() - start tracking change to CPU page table over a range 907a3e0d41cSJérôme Glisse * @range: range 908a3e0d41cSJérôme Glisse * @mm: the mm struct for the range of virtual address 909a3e0d41cSJérôme Glisse * @start: start virtual address (inclusive) 910a3e0d41cSJérôme Glisse * @end: end virtual address (exclusive) 91163d5066fSJérôme Glisse * @page_shift: expect page shift for the range 912a3e0d41cSJérôme Glisse * Returns 0 on success, -EFAULT if the address space is no longer valid 913a3e0d41cSJérôme Glisse * 914a3e0d41cSJérôme Glisse * Track updates to the CPU page table see include/linux/hmm.h 915a3e0d41cSJérôme Glisse */ 916a3e0d41cSJérôme Glisse int hmm_range_register(struct hmm_range *range, 917a3e0d41cSJérôme Glisse struct mm_struct *mm, 918a3e0d41cSJérôme Glisse unsigned long start, 91963d5066fSJérôme Glisse unsigned long end, 92063d5066fSJérôme Glisse unsigned page_shift) 921a3e0d41cSJérôme Glisse { 92263d5066fSJérôme Glisse unsigned long mask = ((1UL << page_shift) - 1UL); 92363d5066fSJérôme Glisse 924a3e0d41cSJérôme Glisse range->valid = false; 925a3e0d41cSJérôme Glisse range->hmm = NULL; 926a3e0d41cSJérôme Glisse 92763d5066fSJérôme Glisse if ((start & mask) || (end & mask)) 92863d5066fSJérôme Glisse return -EINVAL; 92963d5066fSJérôme Glisse if (start >= end) 930a3e0d41cSJérôme Glisse return -EINVAL; 931a3e0d41cSJérôme Glisse 93263d5066fSJérôme Glisse range->page_shift = page_shift; 933a3e0d41cSJérôme Glisse range->start = start; 934a3e0d41cSJérôme Glisse range->end = end; 935a3e0d41cSJérôme Glisse 936a3e0d41cSJérôme Glisse range->hmm = hmm_get_or_create(mm); 937a3e0d41cSJérôme Glisse if (!range->hmm) 938a3e0d41cSJérôme Glisse return -EFAULT; 939a3e0d41cSJérôme Glisse 940a3e0d41cSJérôme Glisse /* Check if hmm_mm_destroy() was call. */ 941a3e0d41cSJérôme Glisse if (range->hmm->mm == NULL || range->hmm->dead) { 942a3e0d41cSJérôme Glisse hmm_put(range->hmm); 943a3e0d41cSJérôme Glisse return -EFAULT; 944a3e0d41cSJérôme Glisse } 945a3e0d41cSJérôme Glisse 946a3e0d41cSJérôme Glisse /* Initialize range to track CPU page table update */ 947a3e0d41cSJérôme Glisse mutex_lock(&range->hmm->lock); 948a3e0d41cSJérôme Glisse 949a3e0d41cSJérôme Glisse list_add_rcu(&range->list, &range->hmm->ranges); 950a3e0d41cSJérôme Glisse 951a3e0d41cSJérôme Glisse /* 952a3e0d41cSJérôme Glisse * If there are any concurrent notifiers we have to wait for them for 953a3e0d41cSJérôme Glisse * the range to be valid (see hmm_range_wait_until_valid()). 954a3e0d41cSJérôme Glisse */ 955a3e0d41cSJérôme Glisse if (!range->hmm->notifiers) 956a3e0d41cSJérôme Glisse range->valid = true; 957a3e0d41cSJérôme Glisse mutex_unlock(&range->hmm->lock); 958a3e0d41cSJérôme Glisse 959a3e0d41cSJérôme Glisse return 0; 960a3e0d41cSJérôme Glisse } 961a3e0d41cSJérôme Glisse EXPORT_SYMBOL(hmm_range_register); 962a3e0d41cSJérôme Glisse 963a3e0d41cSJérôme Glisse /* 964a3e0d41cSJérôme Glisse * hmm_range_unregister() - stop tracking change to CPU page table over a range 965a3e0d41cSJérôme Glisse * @range: range 966a3e0d41cSJérôme Glisse * 967a3e0d41cSJérôme Glisse * Range struct is used to track updates to the CPU page table after a call to 968a3e0d41cSJérôme Glisse * hmm_range_register(). See include/linux/hmm.h for how to use it. 969a3e0d41cSJérôme Glisse */ 970a3e0d41cSJérôme Glisse void hmm_range_unregister(struct hmm_range *range) 971a3e0d41cSJérôme Glisse { 972a3e0d41cSJérôme Glisse /* Sanity check this really should not happen. */ 973a3e0d41cSJérôme Glisse if (range->hmm == NULL || range->end <= range->start) 974a3e0d41cSJérôme Glisse return; 975a3e0d41cSJérôme Glisse 976a3e0d41cSJérôme Glisse mutex_lock(&range->hmm->lock); 977a3e0d41cSJérôme Glisse list_del_rcu(&range->list); 978a3e0d41cSJérôme Glisse mutex_unlock(&range->hmm->lock); 979a3e0d41cSJérôme Glisse 980a3e0d41cSJérôme Glisse /* Drop reference taken by hmm_range_register() */ 981a3e0d41cSJérôme Glisse range->valid = false; 982a3e0d41cSJérôme Glisse hmm_put(range->hmm); 983a3e0d41cSJérôme Glisse range->hmm = NULL; 984a3e0d41cSJérôme Glisse } 985a3e0d41cSJérôme Glisse EXPORT_SYMBOL(hmm_range_unregister); 986a3e0d41cSJérôme Glisse 987a3e0d41cSJérôme Glisse /* 98825f23a0cSJérôme Glisse * hmm_range_snapshot() - snapshot CPU page table for a range 98925f23a0cSJérôme Glisse * @range: range 990a3e0d41cSJérôme Glisse * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid 991a3e0d41cSJérôme Glisse * permission (for instance asking for write and range is read only), 992a3e0d41cSJérôme Glisse * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid 993a3e0d41cSJérôme Glisse * vma or it is illegal to access that range), number of valid pages 994a3e0d41cSJérôme Glisse * in range->pfns[] (from range start address). 995da4c3c73SJérôme Glisse * 996da4c3c73SJérôme Glisse * This snapshots the CPU page table for a range of virtual addresses. Snapshot 997a3e0d41cSJérôme Glisse * validity is tracked by range struct. See in include/linux/hmm.h for example 998a3e0d41cSJérôme Glisse * on how to use. 999da4c3c73SJérôme Glisse */ 100025f23a0cSJérôme Glisse long hmm_range_snapshot(struct hmm_range *range) 1001da4c3c73SJérôme Glisse { 100263d5066fSJérôme Glisse const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; 1003a3e0d41cSJérôme Glisse unsigned long start = range->start, end; 100474eee180SJérôme Glisse struct hmm_vma_walk hmm_vma_walk; 1005a3e0d41cSJérôme Glisse struct hmm *hmm = range->hmm; 1006a3e0d41cSJérôme Glisse struct vm_area_struct *vma; 1007da4c3c73SJérôme Glisse struct mm_walk mm_walk; 1008704f3f2cSJérôme Glisse 1009704f3f2cSJérôme Glisse /* Check if hmm_mm_destroy() was call. */ 1010a3e0d41cSJérôme Glisse if (hmm->mm == NULL || hmm->dead) 1011a3e0d41cSJérôme Glisse return -EFAULT; 1012da4c3c73SJérôme Glisse 1013a3e0d41cSJérôme Glisse do { 1014a3e0d41cSJérôme Glisse /* If range is no longer valid force retry. */ 1015a3e0d41cSJérôme Glisse if (!range->valid) 1016a3e0d41cSJérôme Glisse return -EAGAIN; 1017a3e0d41cSJérôme Glisse 1018a3e0d41cSJérôme Glisse vma = find_vma(hmm->mm, start); 101963d5066fSJérôme Glisse if (vma == NULL || (vma->vm_flags & device_vma)) 1020a3e0d41cSJérôme Glisse return -EFAULT; 1021a3e0d41cSJérôme Glisse 102263d5066fSJérôme Glisse if (is_vm_hugetlb_page(vma)) { 102363d5066fSJérôme Glisse struct hstate *h = hstate_vma(vma); 102463d5066fSJérôme Glisse 102563d5066fSJérôme Glisse if (huge_page_shift(h) != range->page_shift && 102663d5066fSJérôme Glisse range->page_shift != PAGE_SHIFT) 102763d5066fSJérôme Glisse return -EINVAL; 102863d5066fSJérôme Glisse } else { 102963d5066fSJérôme Glisse if (range->page_shift != PAGE_SHIFT) 103063d5066fSJérôme Glisse return -EINVAL; 103163d5066fSJérôme Glisse } 103263d5066fSJérôme Glisse 103386586a41SJérôme Glisse if (!(vma->vm_flags & VM_READ)) { 103486586a41SJérôme Glisse /* 1035a3e0d41cSJérôme Glisse * If vma do not allow read access, then assume that it 1036a3e0d41cSJérôme Glisse * does not allow write access, either. HMM does not 1037a3e0d41cSJérôme Glisse * support architecture that allow write without read. 103886586a41SJérôme Glisse */ 1039a3e0d41cSJérôme Glisse hmm_pfns_clear(range, range->pfns, 1040a3e0d41cSJérôme Glisse range->start, range->end); 104186586a41SJérôme Glisse return -EPERM; 104286586a41SJérôme Glisse } 104386586a41SJérôme Glisse 1044a3e0d41cSJérôme Glisse range->vma = vma; 1045*992de9a8SJérôme Glisse hmm_vma_walk.pgmap = NULL; 1046a3e0d41cSJérôme Glisse hmm_vma_walk.last = start; 104774eee180SJérôme Glisse hmm_vma_walk.fault = false; 104874eee180SJérôme Glisse hmm_vma_walk.range = range; 104974eee180SJérôme Glisse mm_walk.private = &hmm_vma_walk; 1050a3e0d41cSJérôme Glisse end = min(range->end, vma->vm_end); 105174eee180SJérôme Glisse 1052da4c3c73SJérôme Glisse mm_walk.vma = vma; 1053da4c3c73SJérôme Glisse mm_walk.mm = vma->vm_mm; 1054da4c3c73SJérôme Glisse mm_walk.pte_entry = NULL; 1055da4c3c73SJérôme Glisse mm_walk.test_walk = NULL; 1056da4c3c73SJérôme Glisse mm_walk.hugetlb_entry = NULL; 1057*992de9a8SJérôme Glisse mm_walk.pud_entry = hmm_vma_walk_pud; 1058da4c3c73SJérôme Glisse mm_walk.pmd_entry = hmm_vma_walk_pmd; 1059da4c3c73SJérôme Glisse mm_walk.pte_hole = hmm_vma_walk_hole; 106063d5066fSJérôme Glisse mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry; 1061da4c3c73SJérôme Glisse 1062a3e0d41cSJérôme Glisse walk_page_range(start, end, &mm_walk); 1063a3e0d41cSJérôme Glisse start = end; 1064a3e0d41cSJérôme Glisse } while (start < range->end); 1065a3e0d41cSJérôme Glisse 106625f23a0cSJérôme Glisse return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 1067da4c3c73SJérôme Glisse } 106825f23a0cSJérôme Glisse EXPORT_SYMBOL(hmm_range_snapshot); 1069da4c3c73SJérôme Glisse 1070da4c3c73SJérôme Glisse /* 107173231612SJérôme Glisse * hmm_range_fault() - try to fault some address in a virtual address range 107208232a45SJérôme Glisse * @range: range being faulted 107374eee180SJérôme Glisse * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) 107473231612SJérôme Glisse * Returns: number of valid pages in range->pfns[] (from range start 107573231612SJérôme Glisse * address). This may be zero. If the return value is negative, 107673231612SJérôme Glisse * then one of the following values may be returned: 107773231612SJérôme Glisse * 107873231612SJérôme Glisse * -EINVAL invalid arguments or mm or virtual address are in an 107963d5066fSJérôme Glisse * invalid vma (for instance device file vma). 108073231612SJérôme Glisse * -ENOMEM: Out of memory. 108173231612SJérôme Glisse * -EPERM: Invalid permission (for instance asking for write and 108273231612SJérôme Glisse * range is read only). 108373231612SJérôme Glisse * -EAGAIN: If you need to retry and mmap_sem was drop. This can only 108473231612SJérôme Glisse * happens if block argument is false. 108573231612SJérôme Glisse * -EBUSY: If the the range is being invalidated and you should wait 108673231612SJérôme Glisse * for invalidation to finish. 108773231612SJérôme Glisse * -EFAULT: Invalid (ie either no valid vma or it is illegal to access 108873231612SJérôme Glisse * that range), number of valid pages in range->pfns[] (from 108973231612SJérôme Glisse * range start address). 109074eee180SJérôme Glisse * 109174eee180SJérôme Glisse * This is similar to a regular CPU page fault except that it will not trigger 109273231612SJérôme Glisse * any memory migration if the memory being faulted is not accessible by CPUs 109373231612SJérôme Glisse * and caller does not ask for migration. 109474eee180SJérôme Glisse * 1095ff05c0c6SJérôme Glisse * On error, for one virtual address in the range, the function will mark the 1096ff05c0c6SJérôme Glisse * corresponding HMM pfn entry with an error flag. 109774eee180SJérôme Glisse */ 109873231612SJérôme Glisse long hmm_range_fault(struct hmm_range *range, bool block) 109974eee180SJérôme Glisse { 110063d5066fSJérôme Glisse const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; 1101a3e0d41cSJérôme Glisse unsigned long start = range->start, end; 110274eee180SJérôme Glisse struct hmm_vma_walk hmm_vma_walk; 1103a3e0d41cSJérôme Glisse struct hmm *hmm = range->hmm; 1104a3e0d41cSJérôme Glisse struct vm_area_struct *vma; 110574eee180SJérôme Glisse struct mm_walk mm_walk; 110674eee180SJérôme Glisse int ret; 110774eee180SJérôme Glisse 1108704f3f2cSJérôme Glisse /* Check if hmm_mm_destroy() was call. */ 1109a3e0d41cSJérôme Glisse if (hmm->mm == NULL || hmm->dead) 1110a3e0d41cSJérôme Glisse return -EFAULT; 1111a3e0d41cSJérôme Glisse 1112a3e0d41cSJérôme Glisse do { 1113a3e0d41cSJérôme Glisse /* If range is no longer valid force retry. */ 1114a3e0d41cSJérôme Glisse if (!range->valid) { 1115a3e0d41cSJérôme Glisse up_read(&hmm->mm->mmap_sem); 1116a3e0d41cSJérôme Glisse return -EAGAIN; 1117704f3f2cSJérôme Glisse } 111874eee180SJérôme Glisse 1119a3e0d41cSJérôme Glisse vma = find_vma(hmm->mm, start); 112063d5066fSJérôme Glisse if (vma == NULL || (vma->vm_flags & device_vma)) 1121a3e0d41cSJérôme Glisse return -EFAULT; 1122a3e0d41cSJérôme Glisse 112363d5066fSJérôme Glisse if (is_vm_hugetlb_page(vma)) { 112463d5066fSJérôme Glisse if (huge_page_shift(hstate_vma(vma)) != 112563d5066fSJérôme Glisse range->page_shift && 112663d5066fSJérôme Glisse range->page_shift != PAGE_SHIFT) 112763d5066fSJérôme Glisse return -EINVAL; 112863d5066fSJérôme Glisse } else { 112963d5066fSJérôme Glisse if (range->page_shift != PAGE_SHIFT) 113063d5066fSJérôme Glisse return -EINVAL; 113163d5066fSJérôme Glisse } 113263d5066fSJérôme Glisse 113386586a41SJérôme Glisse if (!(vma->vm_flags & VM_READ)) { 113486586a41SJérôme Glisse /* 1135a3e0d41cSJérôme Glisse * If vma do not allow read access, then assume that it 1136a3e0d41cSJérôme Glisse * does not allow write access, either. HMM does not 1137a3e0d41cSJérôme Glisse * support architecture that allow write without read. 113886586a41SJérôme Glisse */ 1139a3e0d41cSJérôme Glisse hmm_pfns_clear(range, range->pfns, 1140a3e0d41cSJérôme Glisse range->start, range->end); 114186586a41SJérôme Glisse return -EPERM; 114286586a41SJérôme Glisse } 114374eee180SJérôme Glisse 1144a3e0d41cSJérôme Glisse range->vma = vma; 1145*992de9a8SJérôme Glisse hmm_vma_walk.pgmap = NULL; 1146a3e0d41cSJérôme Glisse hmm_vma_walk.last = start; 114774eee180SJérôme Glisse hmm_vma_walk.fault = true; 114874eee180SJérôme Glisse hmm_vma_walk.block = block; 114974eee180SJérôme Glisse hmm_vma_walk.range = range; 115074eee180SJérôme Glisse mm_walk.private = &hmm_vma_walk; 1151a3e0d41cSJérôme Glisse end = min(range->end, vma->vm_end); 115274eee180SJérôme Glisse 115374eee180SJérôme Glisse mm_walk.vma = vma; 115474eee180SJérôme Glisse mm_walk.mm = vma->vm_mm; 115574eee180SJérôme Glisse mm_walk.pte_entry = NULL; 115674eee180SJérôme Glisse mm_walk.test_walk = NULL; 115774eee180SJérôme Glisse mm_walk.hugetlb_entry = NULL; 1158*992de9a8SJérôme Glisse mm_walk.pud_entry = hmm_vma_walk_pud; 115974eee180SJérôme Glisse mm_walk.pmd_entry = hmm_vma_walk_pmd; 116074eee180SJérôme Glisse mm_walk.pte_hole = hmm_vma_walk_hole; 116163d5066fSJérôme Glisse mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry; 116274eee180SJérôme Glisse 116374eee180SJérôme Glisse do { 1164a3e0d41cSJérôme Glisse ret = walk_page_range(start, end, &mm_walk); 116574eee180SJérôme Glisse start = hmm_vma_walk.last; 1166a3e0d41cSJérôme Glisse 116773231612SJérôme Glisse /* Keep trying while the range is valid. */ 116873231612SJérôme Glisse } while (ret == -EBUSY && range->valid); 116974eee180SJérôme Glisse 117074eee180SJérôme Glisse if (ret) { 117174eee180SJérôme Glisse unsigned long i; 117274eee180SJérôme Glisse 117374eee180SJérôme Glisse i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 1174a3e0d41cSJérôme Glisse hmm_pfns_clear(range, &range->pfns[i], 1175a3e0d41cSJérôme Glisse hmm_vma_walk.last, range->end); 117673231612SJérôme Glisse return ret; 117774eee180SJérôme Glisse } 1178a3e0d41cSJérôme Glisse start = end; 1179a3e0d41cSJérôme Glisse 1180a3e0d41cSJérôme Glisse } while (start < range->end); 1181704f3f2cSJérôme Glisse 118273231612SJérôme Glisse return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 118374eee180SJérôme Glisse } 118473231612SJérôme Glisse EXPORT_SYMBOL(hmm_range_fault); 1185c0b12405SJérôme Glisse #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ 11864ef589dcSJérôme Glisse 11874ef589dcSJérôme Glisse 1188df6ad698SJérôme Glisse #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) 11894ef589dcSJérôme Glisse struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, 11904ef589dcSJérôme Glisse unsigned long addr) 11914ef589dcSJérôme Glisse { 11924ef589dcSJérôme Glisse struct page *page; 11934ef589dcSJérôme Glisse 11944ef589dcSJérôme Glisse page = alloc_page_vma(GFP_HIGHUSER, vma, addr); 11954ef589dcSJérôme Glisse if (!page) 11964ef589dcSJérôme Glisse return NULL; 11974ef589dcSJérôme Glisse lock_page(page); 11984ef589dcSJérôme Glisse return page; 11994ef589dcSJérôme Glisse } 12004ef589dcSJérôme Glisse EXPORT_SYMBOL(hmm_vma_alloc_locked_page); 12014ef589dcSJérôme Glisse 12024ef589dcSJérôme Glisse 12034ef589dcSJérôme Glisse static void hmm_devmem_ref_release(struct percpu_ref *ref) 12044ef589dcSJérôme Glisse { 12054ef589dcSJérôme Glisse struct hmm_devmem *devmem; 12064ef589dcSJérôme Glisse 12074ef589dcSJérôme Glisse devmem = container_of(ref, struct hmm_devmem, ref); 12084ef589dcSJérôme Glisse complete(&devmem->completion); 12094ef589dcSJérôme Glisse } 12104ef589dcSJérôme Glisse 12114ef589dcSJérôme Glisse static void hmm_devmem_ref_exit(void *data) 12124ef589dcSJérôme Glisse { 12134ef589dcSJérôme Glisse struct percpu_ref *ref = data; 12144ef589dcSJérôme Glisse struct hmm_devmem *devmem; 12154ef589dcSJérôme Glisse 12164ef589dcSJérôme Glisse devmem = container_of(ref, struct hmm_devmem, ref); 1217bbecd94eSDan Williams wait_for_completion(&devmem->completion); 12184ef589dcSJérôme Glisse percpu_ref_exit(ref); 12194ef589dcSJérôme Glisse } 12204ef589dcSJérôme Glisse 1221bbecd94eSDan Williams static void hmm_devmem_ref_kill(struct percpu_ref *ref) 12224ef589dcSJérôme Glisse { 12234ef589dcSJérôme Glisse percpu_ref_kill(ref); 12244ef589dcSJérôme Glisse } 12254ef589dcSJérôme Glisse 1226b57e622eSSouptick Joarder static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma, 12274ef589dcSJérôme Glisse unsigned long addr, 12284ef589dcSJérôme Glisse const struct page *page, 12294ef589dcSJérôme Glisse unsigned int flags, 12304ef589dcSJérôme Glisse pmd_t *pmdp) 12314ef589dcSJérôme Glisse { 12324ef589dcSJérôme Glisse struct hmm_devmem *devmem = page->pgmap->data; 12334ef589dcSJérôme Glisse 12344ef589dcSJérôme Glisse return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp); 12354ef589dcSJérôme Glisse } 12364ef589dcSJérôme Glisse 12374ef589dcSJérôme Glisse static void hmm_devmem_free(struct page *page, void *data) 12384ef589dcSJérôme Glisse { 12394ef589dcSJérôme Glisse struct hmm_devmem *devmem = data; 12404ef589dcSJérôme Glisse 12412fa147bdSDan Williams page->mapping = NULL; 12422fa147bdSDan Williams 12434ef589dcSJérôme Glisse devmem->ops->free(devmem, page); 12444ef589dcSJérôme Glisse } 12454ef589dcSJérôme Glisse 12464ef589dcSJérôme Glisse /* 12474ef589dcSJérôme Glisse * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory 12484ef589dcSJérôme Glisse * 12494ef589dcSJérôme Glisse * @ops: memory event device driver callback (see struct hmm_devmem_ops) 12504ef589dcSJérôme Glisse * @device: device struct to bind the resource too 12514ef589dcSJérôme Glisse * @size: size in bytes of the device memory to add 12524ef589dcSJérôme Glisse * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise 12534ef589dcSJérôme Glisse * 12544ef589dcSJérôme Glisse * This function first finds an empty range of physical address big enough to 12554ef589dcSJérôme Glisse * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which 12564ef589dcSJérôme Glisse * in turn allocates struct pages. It does not do anything beyond that; all 12574ef589dcSJérôme Glisse * events affecting the memory will go through the various callbacks provided 12584ef589dcSJérôme Glisse * by hmm_devmem_ops struct. 12594ef589dcSJérôme Glisse * 12604ef589dcSJérôme Glisse * Device driver should call this function during device initialization and 12614ef589dcSJérôme Glisse * is then responsible of memory management. HMM only provides helpers. 12624ef589dcSJérôme Glisse */ 12634ef589dcSJérôme Glisse struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, 12644ef589dcSJérôme Glisse struct device *device, 12654ef589dcSJérôme Glisse unsigned long size) 12664ef589dcSJérôme Glisse { 12674ef589dcSJérôme Glisse struct hmm_devmem *devmem; 12684ef589dcSJérôme Glisse resource_size_t addr; 1269bbecd94eSDan Williams void *result; 12704ef589dcSJérôme Glisse int ret; 12714ef589dcSJérôme Glisse 1272e7638488SDan Williams dev_pagemap_get_ops(); 12734ef589dcSJérôme Glisse 127458ef15b7SDan Williams devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); 12754ef589dcSJérôme Glisse if (!devmem) 12764ef589dcSJérôme Glisse return ERR_PTR(-ENOMEM); 12774ef589dcSJérôme Glisse 12784ef589dcSJérôme Glisse init_completion(&devmem->completion); 12794ef589dcSJérôme Glisse devmem->pfn_first = -1UL; 12804ef589dcSJérôme Glisse devmem->pfn_last = -1UL; 12814ef589dcSJérôme Glisse devmem->resource = NULL; 12824ef589dcSJérôme Glisse devmem->device = device; 12834ef589dcSJérôme Glisse devmem->ops = ops; 12844ef589dcSJérôme Glisse 12854ef589dcSJérôme Glisse ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, 12864ef589dcSJérôme Glisse 0, GFP_KERNEL); 12874ef589dcSJérôme Glisse if (ret) 128858ef15b7SDan Williams return ERR_PTR(ret); 12894ef589dcSJérôme Glisse 129058ef15b7SDan Williams ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref); 12914ef589dcSJérôme Glisse if (ret) 129258ef15b7SDan Williams return ERR_PTR(ret); 12934ef589dcSJérôme Glisse 12944ef589dcSJérôme Glisse size = ALIGN(size, PA_SECTION_SIZE); 12954ef589dcSJérôme Glisse addr = min((unsigned long)iomem_resource.end, 12964ef589dcSJérôme Glisse (1UL << MAX_PHYSMEM_BITS) - 1); 12974ef589dcSJérôme Glisse addr = addr - size + 1UL; 12984ef589dcSJérôme Glisse 12994ef589dcSJérôme Glisse /* 13004ef589dcSJérôme Glisse * FIXME add a new helper to quickly walk resource tree and find free 13014ef589dcSJérôme Glisse * range 13024ef589dcSJérôme Glisse * 13034ef589dcSJérôme Glisse * FIXME what about ioport_resource resource ? 13044ef589dcSJérôme Glisse */ 13054ef589dcSJérôme Glisse for (; addr > size && addr >= iomem_resource.start; addr -= size) { 13064ef589dcSJérôme Glisse ret = region_intersects(addr, size, 0, IORES_DESC_NONE); 13074ef589dcSJérôme Glisse if (ret != REGION_DISJOINT) 13084ef589dcSJérôme Glisse continue; 13094ef589dcSJérôme Glisse 13104ef589dcSJérôme Glisse devmem->resource = devm_request_mem_region(device, addr, size, 13114ef589dcSJérôme Glisse dev_name(device)); 131258ef15b7SDan Williams if (!devmem->resource) 131358ef15b7SDan Williams return ERR_PTR(-ENOMEM); 13144ef589dcSJérôme Glisse break; 13154ef589dcSJérôme Glisse } 131658ef15b7SDan Williams if (!devmem->resource) 131758ef15b7SDan Williams return ERR_PTR(-ERANGE); 13184ef589dcSJérôme Glisse 13194ef589dcSJérôme Glisse devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; 13204ef589dcSJérôme Glisse devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; 13214ef589dcSJérôme Glisse devmem->pfn_last = devmem->pfn_first + 13224ef589dcSJérôme Glisse (resource_size(devmem->resource) >> PAGE_SHIFT); 1323063a7d1dSDan Williams devmem->page_fault = hmm_devmem_fault; 13244ef589dcSJérôme Glisse 1325bbecd94eSDan Williams devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; 1326bbecd94eSDan Williams devmem->pagemap.res = *devmem->resource; 1327bbecd94eSDan Williams devmem->pagemap.page_free = hmm_devmem_free; 1328bbecd94eSDan Williams devmem->pagemap.altmap_valid = false; 1329bbecd94eSDan Williams devmem->pagemap.ref = &devmem->ref; 1330bbecd94eSDan Williams devmem->pagemap.data = devmem; 1331bbecd94eSDan Williams devmem->pagemap.kill = hmm_devmem_ref_kill; 133258ef15b7SDan Williams 1333bbecd94eSDan Williams result = devm_memremap_pages(devmem->device, &devmem->pagemap); 1334bbecd94eSDan Williams if (IS_ERR(result)) 1335bbecd94eSDan Williams return result; 13364ef589dcSJérôme Glisse return devmem; 13374ef589dcSJérôme Glisse } 133802917e9fSDan Williams EXPORT_SYMBOL_GPL(hmm_devmem_add); 13394ef589dcSJérôme Glisse 1340d3df0a42SJérôme Glisse struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, 1341d3df0a42SJérôme Glisse struct device *device, 1342d3df0a42SJérôme Glisse struct resource *res) 1343d3df0a42SJérôme Glisse { 1344d3df0a42SJérôme Glisse struct hmm_devmem *devmem; 1345bbecd94eSDan Williams void *result; 1346d3df0a42SJérôme Glisse int ret; 1347d3df0a42SJérôme Glisse 1348d3df0a42SJérôme Glisse if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY) 1349d3df0a42SJérôme Glisse return ERR_PTR(-EINVAL); 1350d3df0a42SJérôme Glisse 1351e7638488SDan Williams dev_pagemap_get_ops(); 1352d3df0a42SJérôme Glisse 135358ef15b7SDan Williams devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); 1354d3df0a42SJérôme Glisse if (!devmem) 1355d3df0a42SJérôme Glisse return ERR_PTR(-ENOMEM); 1356d3df0a42SJérôme Glisse 1357d3df0a42SJérôme Glisse init_completion(&devmem->completion); 1358d3df0a42SJérôme Glisse devmem->pfn_first = -1UL; 1359d3df0a42SJérôme Glisse devmem->pfn_last = -1UL; 1360d3df0a42SJérôme Glisse devmem->resource = res; 1361d3df0a42SJérôme Glisse devmem->device = device; 1362d3df0a42SJérôme Glisse devmem->ops = ops; 1363d3df0a42SJérôme Glisse 1364d3df0a42SJérôme Glisse ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, 1365d3df0a42SJérôme Glisse 0, GFP_KERNEL); 1366d3df0a42SJérôme Glisse if (ret) 136758ef15b7SDan Williams return ERR_PTR(ret); 1368d3df0a42SJérôme Glisse 136958ef15b7SDan Williams ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, 137058ef15b7SDan Williams &devmem->ref); 1371d3df0a42SJérôme Glisse if (ret) 137258ef15b7SDan Williams return ERR_PTR(ret); 1373d3df0a42SJérôme Glisse 1374d3df0a42SJérôme Glisse devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; 1375d3df0a42SJérôme Glisse devmem->pfn_last = devmem->pfn_first + 1376d3df0a42SJérôme Glisse (resource_size(devmem->resource) >> PAGE_SHIFT); 1377063a7d1dSDan Williams devmem->page_fault = hmm_devmem_fault; 1378d3df0a42SJérôme Glisse 1379bbecd94eSDan Williams devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; 1380bbecd94eSDan Williams devmem->pagemap.res = *devmem->resource; 1381bbecd94eSDan Williams devmem->pagemap.page_free = hmm_devmem_free; 1382bbecd94eSDan Williams devmem->pagemap.altmap_valid = false; 1383bbecd94eSDan Williams devmem->pagemap.ref = &devmem->ref; 1384bbecd94eSDan Williams devmem->pagemap.data = devmem; 1385bbecd94eSDan Williams devmem->pagemap.kill = hmm_devmem_ref_kill; 138658ef15b7SDan Williams 1387bbecd94eSDan Williams result = devm_memremap_pages(devmem->device, &devmem->pagemap); 1388bbecd94eSDan Williams if (IS_ERR(result)) 1389bbecd94eSDan Williams return result; 1390d3df0a42SJérôme Glisse return devmem; 1391d3df0a42SJérôme Glisse } 139202917e9fSDan Williams EXPORT_SYMBOL_GPL(hmm_devmem_add_resource); 1393d3df0a42SJérôme Glisse 13944ef589dcSJérôme Glisse /* 1395858b54daSJérôme Glisse * A device driver that wants to handle multiple devices memory through a 1396858b54daSJérôme Glisse * single fake device can use hmm_device to do so. This is purely a helper 1397858b54daSJérôme Glisse * and it is not needed to make use of any HMM functionality. 1398858b54daSJérôme Glisse */ 1399858b54daSJérôme Glisse #define HMM_DEVICE_MAX 256 1400858b54daSJérôme Glisse 1401858b54daSJérôme Glisse static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX); 1402858b54daSJérôme Glisse static DEFINE_SPINLOCK(hmm_device_lock); 1403858b54daSJérôme Glisse static struct class *hmm_device_class; 1404858b54daSJérôme Glisse static dev_t hmm_device_devt; 1405858b54daSJérôme Glisse 1406858b54daSJérôme Glisse static void hmm_device_release(struct device *device) 1407858b54daSJérôme Glisse { 1408858b54daSJérôme Glisse struct hmm_device *hmm_device; 1409858b54daSJérôme Glisse 1410858b54daSJérôme Glisse hmm_device = container_of(device, struct hmm_device, device); 1411858b54daSJérôme Glisse spin_lock(&hmm_device_lock); 1412858b54daSJérôme Glisse clear_bit(hmm_device->minor, hmm_device_mask); 1413858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1414858b54daSJérôme Glisse 1415858b54daSJérôme Glisse kfree(hmm_device); 1416858b54daSJérôme Glisse } 1417858b54daSJérôme Glisse 1418858b54daSJérôme Glisse struct hmm_device *hmm_device_new(void *drvdata) 1419858b54daSJérôme Glisse { 1420858b54daSJérôme Glisse struct hmm_device *hmm_device; 1421858b54daSJérôme Glisse 1422858b54daSJérôme Glisse hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL); 1423858b54daSJérôme Glisse if (!hmm_device) 1424858b54daSJérôme Glisse return ERR_PTR(-ENOMEM); 1425858b54daSJérôme Glisse 1426858b54daSJérôme Glisse spin_lock(&hmm_device_lock); 1427858b54daSJérôme Glisse hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX); 1428858b54daSJérôme Glisse if (hmm_device->minor >= HMM_DEVICE_MAX) { 1429858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1430858b54daSJérôme Glisse kfree(hmm_device); 1431858b54daSJérôme Glisse return ERR_PTR(-EBUSY); 1432858b54daSJérôme Glisse } 1433858b54daSJérôme Glisse set_bit(hmm_device->minor, hmm_device_mask); 1434858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1435858b54daSJérôme Glisse 1436858b54daSJérôme Glisse dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor); 1437858b54daSJérôme Glisse hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt), 1438858b54daSJérôme Glisse hmm_device->minor); 1439858b54daSJérôme Glisse hmm_device->device.release = hmm_device_release; 1440858b54daSJérôme Glisse dev_set_drvdata(&hmm_device->device, drvdata); 1441858b54daSJérôme Glisse hmm_device->device.class = hmm_device_class; 1442858b54daSJérôme Glisse device_initialize(&hmm_device->device); 1443858b54daSJérôme Glisse 1444858b54daSJérôme Glisse return hmm_device; 1445858b54daSJérôme Glisse } 1446858b54daSJérôme Glisse EXPORT_SYMBOL(hmm_device_new); 1447858b54daSJérôme Glisse 1448858b54daSJérôme Glisse void hmm_device_put(struct hmm_device *hmm_device) 1449858b54daSJérôme Glisse { 1450858b54daSJérôme Glisse put_device(&hmm_device->device); 1451858b54daSJérôme Glisse } 1452858b54daSJérôme Glisse EXPORT_SYMBOL(hmm_device_put); 1453858b54daSJérôme Glisse 1454858b54daSJérôme Glisse static int __init hmm_init(void) 1455858b54daSJérôme Glisse { 1456858b54daSJérôme Glisse int ret; 1457858b54daSJérôme Glisse 1458858b54daSJérôme Glisse ret = alloc_chrdev_region(&hmm_device_devt, 0, 1459858b54daSJérôme Glisse HMM_DEVICE_MAX, 1460858b54daSJérôme Glisse "hmm_device"); 1461858b54daSJérôme Glisse if (ret) 1462858b54daSJérôme Glisse return ret; 1463858b54daSJérôme Glisse 1464858b54daSJérôme Glisse hmm_device_class = class_create(THIS_MODULE, "hmm_device"); 1465858b54daSJérôme Glisse if (IS_ERR(hmm_device_class)) { 1466858b54daSJérôme Glisse unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX); 1467858b54daSJérôme Glisse return PTR_ERR(hmm_device_class); 1468858b54daSJérôme Glisse } 1469858b54daSJérôme Glisse return 0; 1470858b54daSJérôme Glisse } 1471858b54daSJérôme Glisse 1472858b54daSJérôme Glisse device_initcall(hmm_init); 1473df6ad698SJérôme Glisse #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ 1474