1133ff0eaSJérôme Glisse /* 2133ff0eaSJérôme Glisse * Copyright 2013 Red Hat Inc. 3133ff0eaSJérôme Glisse * 4133ff0eaSJérôme Glisse * This program is free software; you can redistribute it and/or modify 5133ff0eaSJérôme Glisse * it under the terms of the GNU General Public License as published by 6133ff0eaSJérôme Glisse * the Free Software Foundation; either version 2 of the License, or 7133ff0eaSJérôme Glisse * (at your option) any later version. 8133ff0eaSJérôme Glisse * 9133ff0eaSJérôme Glisse * This program is distributed in the hope that it will be useful, 10133ff0eaSJérôme Glisse * but WITHOUT ANY WARRANTY; without even the implied warranty of 11133ff0eaSJérôme Glisse * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12133ff0eaSJérôme Glisse * GNU General Public License for more details. 13133ff0eaSJérôme Glisse * 14f813f219SJérôme Glisse * Authors: Jérôme Glisse <jglisse@redhat.com> 15133ff0eaSJérôme Glisse */ 16133ff0eaSJérôme Glisse /* 17133ff0eaSJérôme Glisse * Refer to include/linux/hmm.h for information about heterogeneous memory 18133ff0eaSJérôme Glisse * management or HMM for short. 19133ff0eaSJérôme Glisse */ 20133ff0eaSJérôme Glisse #include <linux/mm.h> 21133ff0eaSJérôme Glisse #include <linux/hmm.h> 22858b54daSJérôme Glisse #include <linux/init.h> 23da4c3c73SJérôme Glisse #include <linux/rmap.h> 24da4c3c73SJérôme Glisse #include <linux/swap.h> 25133ff0eaSJérôme Glisse #include <linux/slab.h> 26133ff0eaSJérôme Glisse #include <linux/sched.h> 274ef589dcSJérôme Glisse #include <linux/mmzone.h> 284ef589dcSJérôme Glisse #include <linux/pagemap.h> 29da4c3c73SJérôme Glisse #include <linux/swapops.h> 30da4c3c73SJérôme Glisse #include <linux/hugetlb.h> 314ef589dcSJérôme Glisse #include <linux/memremap.h> 327b2d55d2SJérôme Glisse #include <linux/jump_label.h> 33c0b12405SJérôme Glisse #include <linux/mmu_notifier.h> 344ef589dcSJérôme Glisse #include <linux/memory_hotplug.h> 354ef589dcSJérôme Glisse 364ef589dcSJérôme Glisse #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT) 37133ff0eaSJérôme Glisse 386b368cd4SJérôme Glisse #if IS_ENABLED(CONFIG_HMM_MIRROR) 39c0b12405SJérôme Glisse static const struct mmu_notifier_ops hmm_mmu_notifier_ops; 40c0b12405SJérôme Glisse 41133ff0eaSJérôme Glisse /* 42133ff0eaSJérôme Glisse * struct hmm - HMM per mm struct 43133ff0eaSJérôme Glisse * 44133ff0eaSJérôme Glisse * @mm: mm struct this HMM struct is bound to 45da4c3c73SJérôme Glisse * @lock: lock protecting ranges list 46c0b12405SJérôme Glisse * @sequence: we track updates to the CPU page table with a sequence number 47da4c3c73SJérôme Glisse * @ranges: list of range being snapshotted 48c0b12405SJérôme Glisse * @mirrors: list of mirrors for this mm 49c0b12405SJérôme Glisse * @mmu_notifier: mmu notifier to track updates to CPU page table 50c0b12405SJérôme Glisse * @mirrors_sem: read/write semaphore protecting the mirrors list 51133ff0eaSJérôme Glisse */ 52133ff0eaSJérôme Glisse struct hmm { 53133ff0eaSJérôme Glisse struct mm_struct *mm; 54da4c3c73SJérôme Glisse spinlock_t lock; 55c0b12405SJérôme Glisse atomic_t sequence; 56da4c3c73SJérôme Glisse struct list_head ranges; 57c0b12405SJérôme Glisse struct list_head mirrors; 58c0b12405SJérôme Glisse struct mmu_notifier mmu_notifier; 59c0b12405SJérôme Glisse struct rw_semaphore mirrors_sem; 60133ff0eaSJérôme Glisse }; 61133ff0eaSJérôme Glisse 62133ff0eaSJérôme Glisse /* 63133ff0eaSJérôme Glisse * hmm_register - register HMM against an mm (HMM internal) 64133ff0eaSJérôme Glisse * 65133ff0eaSJérôme Glisse * @mm: mm struct to attach to 66133ff0eaSJérôme Glisse * 67133ff0eaSJérôme Glisse * This is not intended to be used directly by device drivers. It allocates an 68133ff0eaSJérôme Glisse * HMM struct if mm does not have one, and initializes it. 69133ff0eaSJérôme Glisse */ 70133ff0eaSJérôme Glisse static struct hmm *hmm_register(struct mm_struct *mm) 71133ff0eaSJérôme Glisse { 72c0b12405SJérôme Glisse struct hmm *hmm = READ_ONCE(mm->hmm); 73c0b12405SJérôme Glisse bool cleanup = false; 74133ff0eaSJérôme Glisse 75133ff0eaSJérôme Glisse /* 76133ff0eaSJérôme Glisse * The hmm struct can only be freed once the mm_struct goes away, 77133ff0eaSJérôme Glisse * hence we should always have pre-allocated an new hmm struct 78133ff0eaSJérôme Glisse * above. 79133ff0eaSJérôme Glisse */ 80c0b12405SJérôme Glisse if (hmm) 81c0b12405SJérôme Glisse return hmm; 82c0b12405SJérôme Glisse 83c0b12405SJérôme Glisse hmm = kmalloc(sizeof(*hmm), GFP_KERNEL); 84c0b12405SJérôme Glisse if (!hmm) 85c0b12405SJérôme Glisse return NULL; 86c0b12405SJérôme Glisse INIT_LIST_HEAD(&hmm->mirrors); 87c0b12405SJérôme Glisse init_rwsem(&hmm->mirrors_sem); 88c0b12405SJérôme Glisse atomic_set(&hmm->sequence, 0); 89c0b12405SJérôme Glisse hmm->mmu_notifier.ops = NULL; 90da4c3c73SJérôme Glisse INIT_LIST_HEAD(&hmm->ranges); 91da4c3c73SJérôme Glisse spin_lock_init(&hmm->lock); 92c0b12405SJérôme Glisse hmm->mm = mm; 93c0b12405SJérôme Glisse 94c0b12405SJérôme Glisse spin_lock(&mm->page_table_lock); 95c0b12405SJérôme Glisse if (!mm->hmm) 96c0b12405SJérôme Glisse mm->hmm = hmm; 97c0b12405SJérôme Glisse else 98c0b12405SJérôme Glisse cleanup = true; 99c0b12405SJérôme Glisse spin_unlock(&mm->page_table_lock); 100c0b12405SJérôme Glisse 10186a2d598SRalph Campbell if (cleanup) 10286a2d598SRalph Campbell goto error; 10386a2d598SRalph Campbell 10486a2d598SRalph Campbell /* 10586a2d598SRalph Campbell * We should only get here if hold the mmap_sem in write mode ie on 10686a2d598SRalph Campbell * registration of first mirror through hmm_mirror_register() 10786a2d598SRalph Campbell */ 10886a2d598SRalph Campbell hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops; 10986a2d598SRalph Campbell if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) 11086a2d598SRalph Campbell goto error_mm; 111c0b12405SJérôme Glisse 112133ff0eaSJérôme Glisse return mm->hmm; 11386a2d598SRalph Campbell 11486a2d598SRalph Campbell error_mm: 11586a2d598SRalph Campbell spin_lock(&mm->page_table_lock); 11686a2d598SRalph Campbell if (mm->hmm == hmm) 11786a2d598SRalph Campbell mm->hmm = NULL; 11886a2d598SRalph Campbell spin_unlock(&mm->page_table_lock); 11986a2d598SRalph Campbell error: 12086a2d598SRalph Campbell kfree(hmm); 12186a2d598SRalph Campbell return NULL; 122133ff0eaSJérôme Glisse } 123133ff0eaSJérôme Glisse 124133ff0eaSJérôme Glisse void hmm_mm_destroy(struct mm_struct *mm) 125133ff0eaSJérôme Glisse { 126133ff0eaSJérôme Glisse kfree(mm->hmm); 127133ff0eaSJérôme Glisse } 128c0b12405SJérôme Glisse 129c0b12405SJérôme Glisse static void hmm_invalidate_range(struct hmm *hmm, 130c0b12405SJérôme Glisse enum hmm_update_type action, 131c0b12405SJérôme Glisse unsigned long start, 132c0b12405SJérôme Glisse unsigned long end) 133c0b12405SJérôme Glisse { 134c0b12405SJérôme Glisse struct hmm_mirror *mirror; 135da4c3c73SJérôme Glisse struct hmm_range *range; 136da4c3c73SJérôme Glisse 137da4c3c73SJérôme Glisse spin_lock(&hmm->lock); 138da4c3c73SJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 139da4c3c73SJérôme Glisse unsigned long addr, idx, npages; 140da4c3c73SJérôme Glisse 141da4c3c73SJérôme Glisse if (end < range->start || start >= range->end) 142da4c3c73SJérôme Glisse continue; 143da4c3c73SJérôme Glisse 144da4c3c73SJérôme Glisse range->valid = false; 145da4c3c73SJérôme Glisse addr = max(start, range->start); 146da4c3c73SJérôme Glisse idx = (addr - range->start) >> PAGE_SHIFT; 147da4c3c73SJérôme Glisse npages = (min(range->end, end) - addr) >> PAGE_SHIFT; 148da4c3c73SJérôme Glisse memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages); 149da4c3c73SJérôme Glisse } 150da4c3c73SJérôme Glisse spin_unlock(&hmm->lock); 151c0b12405SJérôme Glisse 152c0b12405SJérôme Glisse down_read(&hmm->mirrors_sem); 153c0b12405SJérôme Glisse list_for_each_entry(mirror, &hmm->mirrors, list) 154c0b12405SJérôme Glisse mirror->ops->sync_cpu_device_pagetables(mirror, action, 155c0b12405SJérôme Glisse start, end); 156c0b12405SJérôme Glisse up_read(&hmm->mirrors_sem); 157c0b12405SJérôme Glisse } 158c0b12405SJérôme Glisse 159e1401513SRalph Campbell static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) 160e1401513SRalph Campbell { 161e1401513SRalph Campbell struct hmm_mirror *mirror; 162e1401513SRalph Campbell struct hmm *hmm = mm->hmm; 163e1401513SRalph Campbell 164e1401513SRalph Campbell down_write(&hmm->mirrors_sem); 165e1401513SRalph Campbell mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror, 166e1401513SRalph Campbell list); 167e1401513SRalph Campbell while (mirror) { 168e1401513SRalph Campbell list_del_init(&mirror->list); 169e1401513SRalph Campbell if (mirror->ops->release) { 170e1401513SRalph Campbell /* 171e1401513SRalph Campbell * Drop mirrors_sem so callback can wait on any pending 172e1401513SRalph Campbell * work that might itself trigger mmu_notifier callback 173e1401513SRalph Campbell * and thus would deadlock with us. 174e1401513SRalph Campbell */ 175e1401513SRalph Campbell up_write(&hmm->mirrors_sem); 176e1401513SRalph Campbell mirror->ops->release(mirror); 177e1401513SRalph Campbell down_write(&hmm->mirrors_sem); 178e1401513SRalph Campbell } 179e1401513SRalph Campbell mirror = list_first_entry_or_null(&hmm->mirrors, 180e1401513SRalph Campbell struct hmm_mirror, list); 181e1401513SRalph Campbell } 182e1401513SRalph Campbell up_write(&hmm->mirrors_sem); 183e1401513SRalph Campbell } 184e1401513SRalph Campbell 18593065ac7SMichal Hocko static int hmm_invalidate_range_start(struct mmu_notifier *mn, 186c0b12405SJérôme Glisse struct mm_struct *mm, 187c0b12405SJérôme Glisse unsigned long start, 18893065ac7SMichal Hocko unsigned long end, 18993065ac7SMichal Hocko bool blockable) 190c0b12405SJérôme Glisse { 191c0b12405SJérôme Glisse struct hmm *hmm = mm->hmm; 192c0b12405SJérôme Glisse 193c0b12405SJérôme Glisse VM_BUG_ON(!hmm); 194c0b12405SJérôme Glisse 195c0b12405SJérôme Glisse atomic_inc(&hmm->sequence); 19693065ac7SMichal Hocko 19793065ac7SMichal Hocko return 0; 198c0b12405SJérôme Glisse } 199c0b12405SJérôme Glisse 200c0b12405SJérôme Glisse static void hmm_invalidate_range_end(struct mmu_notifier *mn, 201c0b12405SJérôme Glisse struct mm_struct *mm, 202c0b12405SJérôme Glisse unsigned long start, 203c0b12405SJérôme Glisse unsigned long end) 204c0b12405SJérôme Glisse { 205c0b12405SJérôme Glisse struct hmm *hmm = mm->hmm; 206c0b12405SJérôme Glisse 207c0b12405SJérôme Glisse VM_BUG_ON(!hmm); 208c0b12405SJérôme Glisse 209c0b12405SJérôme Glisse hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE, start, end); 210c0b12405SJérôme Glisse } 211c0b12405SJérôme Glisse 212c0b12405SJérôme Glisse static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { 213e1401513SRalph Campbell .release = hmm_release, 214c0b12405SJérôme Glisse .invalidate_range_start = hmm_invalidate_range_start, 215c0b12405SJérôme Glisse .invalidate_range_end = hmm_invalidate_range_end, 216c0b12405SJérôme Glisse }; 217c0b12405SJérôme Glisse 218c0b12405SJérôme Glisse /* 219c0b12405SJérôme Glisse * hmm_mirror_register() - register a mirror against an mm 220c0b12405SJérôme Glisse * 221c0b12405SJérôme Glisse * @mirror: new mirror struct to register 222c0b12405SJérôme Glisse * @mm: mm to register against 223c0b12405SJérôme Glisse * 224c0b12405SJérôme Glisse * To start mirroring a process address space, the device driver must register 225c0b12405SJérôme Glisse * an HMM mirror struct. 226c0b12405SJérôme Glisse * 227c0b12405SJérôme Glisse * THE mm->mmap_sem MUST BE HELD IN WRITE MODE ! 228c0b12405SJérôme Glisse */ 229c0b12405SJérôme Glisse int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) 230c0b12405SJérôme Glisse { 231c0b12405SJérôme Glisse /* Sanity check */ 232c0b12405SJérôme Glisse if (!mm || !mirror || !mirror->ops) 233c0b12405SJérôme Glisse return -EINVAL; 234c0b12405SJérôme Glisse 235c01cbba2SJérôme Glisse again: 236c0b12405SJérôme Glisse mirror->hmm = hmm_register(mm); 237c0b12405SJérôme Glisse if (!mirror->hmm) 238c0b12405SJérôme Glisse return -ENOMEM; 239c0b12405SJérôme Glisse 240c0b12405SJérôme Glisse down_write(&mirror->hmm->mirrors_sem); 241c01cbba2SJérôme Glisse if (mirror->hmm->mm == NULL) { 242c01cbba2SJérôme Glisse /* 243c01cbba2SJérôme Glisse * A racing hmm_mirror_unregister() is about to destroy the hmm 244c01cbba2SJérôme Glisse * struct. Try again to allocate a new one. 245c01cbba2SJérôme Glisse */ 246c01cbba2SJérôme Glisse up_write(&mirror->hmm->mirrors_sem); 247c01cbba2SJérôme Glisse mirror->hmm = NULL; 248c01cbba2SJérôme Glisse goto again; 249c01cbba2SJérôme Glisse } else { 250c0b12405SJérôme Glisse list_add(&mirror->list, &mirror->hmm->mirrors); 251c0b12405SJérôme Glisse up_write(&mirror->hmm->mirrors_sem); 252c01cbba2SJérôme Glisse } 253c0b12405SJérôme Glisse 254c0b12405SJérôme Glisse return 0; 255c0b12405SJérôme Glisse } 256c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_register); 257c0b12405SJérôme Glisse 258c0b12405SJérôme Glisse /* 259c0b12405SJérôme Glisse * hmm_mirror_unregister() - unregister a mirror 260c0b12405SJérôme Glisse * 261c0b12405SJérôme Glisse * @mirror: new mirror struct to register 262c0b12405SJérôme Glisse * 263c0b12405SJérôme Glisse * Stop mirroring a process address space, and cleanup. 264c0b12405SJérôme Glisse */ 265c0b12405SJérôme Glisse void hmm_mirror_unregister(struct hmm_mirror *mirror) 266c0b12405SJérôme Glisse { 267c01cbba2SJérôme Glisse bool should_unregister = false; 268c01cbba2SJérôme Glisse struct mm_struct *mm; 269c01cbba2SJérôme Glisse struct hmm *hmm; 270c0b12405SJérôme Glisse 271c01cbba2SJérôme Glisse if (mirror->hmm == NULL) 272c01cbba2SJérôme Glisse return; 273c01cbba2SJérôme Glisse 274c01cbba2SJérôme Glisse hmm = mirror->hmm; 275c0b12405SJérôme Glisse down_write(&hmm->mirrors_sem); 276e1401513SRalph Campbell list_del_init(&mirror->list); 277c01cbba2SJérôme Glisse should_unregister = list_empty(&hmm->mirrors); 278c01cbba2SJérôme Glisse mirror->hmm = NULL; 279c01cbba2SJérôme Glisse mm = hmm->mm; 280c01cbba2SJérôme Glisse hmm->mm = NULL; 281c0b12405SJérôme Glisse up_write(&hmm->mirrors_sem); 282c01cbba2SJérôme Glisse 283c01cbba2SJérôme Glisse if (!should_unregister || mm == NULL) 284c01cbba2SJérôme Glisse return; 285c01cbba2SJérôme Glisse 28686a2d598SRalph Campbell mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm); 28786a2d598SRalph Campbell 288c01cbba2SJérôme Glisse spin_lock(&mm->page_table_lock); 289c01cbba2SJérôme Glisse if (mm->hmm == hmm) 290c01cbba2SJérôme Glisse mm->hmm = NULL; 291c01cbba2SJérôme Glisse spin_unlock(&mm->page_table_lock); 292c01cbba2SJérôme Glisse 293c01cbba2SJérôme Glisse kfree(hmm); 294c0b12405SJérôme Glisse } 295c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_unregister); 296da4c3c73SJérôme Glisse 29774eee180SJérôme Glisse struct hmm_vma_walk { 29874eee180SJérôme Glisse struct hmm_range *range; 29974eee180SJérôme Glisse unsigned long last; 30074eee180SJérôme Glisse bool fault; 30174eee180SJérôme Glisse bool block; 30274eee180SJérôme Glisse }; 30374eee180SJérôme Glisse 3042aee09d8SJérôme Glisse static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, 3052aee09d8SJérôme Glisse bool write_fault, uint64_t *pfn) 30674eee180SJérôme Glisse { 30774eee180SJérôme Glisse unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE; 30874eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 309f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 31074eee180SJérôme Glisse struct vm_area_struct *vma = walk->vma; 31150a7ca3cSSouptick Joarder vm_fault_t ret; 31274eee180SJérôme Glisse 31374eee180SJérôme Glisse flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY; 3142aee09d8SJérôme Glisse flags |= write_fault ? FAULT_FLAG_WRITE : 0; 31550a7ca3cSSouptick Joarder ret = handle_mm_fault(vma, addr, flags); 31650a7ca3cSSouptick Joarder if (ret & VM_FAULT_RETRY) 31774eee180SJérôme Glisse return -EBUSY; 31850a7ca3cSSouptick Joarder if (ret & VM_FAULT_ERROR) { 319f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_ERROR]; 32074eee180SJérôme Glisse return -EFAULT; 32174eee180SJérôme Glisse } 32274eee180SJérôme Glisse 32374eee180SJérôme Glisse return -EAGAIN; 32474eee180SJérôme Glisse } 32574eee180SJérôme Glisse 326da4c3c73SJérôme Glisse static int hmm_pfns_bad(unsigned long addr, 327da4c3c73SJérôme Glisse unsigned long end, 328da4c3c73SJérôme Glisse struct mm_walk *walk) 329da4c3c73SJérôme Glisse { 330c719547fSJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 331c719547fSJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 332ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 333da4c3c73SJérôme Glisse unsigned long i; 334da4c3c73SJérôme Glisse 335da4c3c73SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 336da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, i++) 337f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_ERROR]; 338da4c3c73SJérôme Glisse 339da4c3c73SJérôme Glisse return 0; 340da4c3c73SJérôme Glisse } 341da4c3c73SJérôme Glisse 3425504ed29SJérôme Glisse /* 3435504ed29SJérôme Glisse * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s) 3445504ed29SJérôme Glisse * @start: range virtual start address (inclusive) 3455504ed29SJérôme Glisse * @end: range virtual end address (exclusive) 3462aee09d8SJérôme Glisse * @fault: should we fault or not ? 3472aee09d8SJérôme Glisse * @write_fault: write fault ? 3485504ed29SJérôme Glisse * @walk: mm_walk structure 3495504ed29SJérôme Glisse * Returns: 0 on success, -EAGAIN after page fault, or page fault error 3505504ed29SJérôme Glisse * 3515504ed29SJérôme Glisse * This function will be called whenever pmd_none() or pte_none() returns true, 3525504ed29SJérôme Glisse * or whenever there is no page directory covering the virtual address range. 3535504ed29SJérôme Glisse */ 3542aee09d8SJérôme Glisse static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, 3552aee09d8SJérôme Glisse bool fault, bool write_fault, 356da4c3c73SJérôme Glisse struct mm_walk *walk) 357da4c3c73SJérôme Glisse { 35874eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 35974eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 360ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 361da4c3c73SJérôme Glisse unsigned long i; 362da4c3c73SJérôme Glisse 36374eee180SJérôme Glisse hmm_vma_walk->last = addr; 364da4c3c73SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 36574eee180SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, i++) { 366f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_NONE]; 3672aee09d8SJérôme Glisse if (fault || write_fault) { 36874eee180SJérôme Glisse int ret; 369da4c3c73SJérôme Glisse 3702aee09d8SJérôme Glisse ret = hmm_vma_do_fault(walk, addr, write_fault, 3712aee09d8SJérôme Glisse &pfns[i]); 37274eee180SJérôme Glisse if (ret != -EAGAIN) 37374eee180SJérôme Glisse return ret; 37474eee180SJérôme Glisse } 37574eee180SJérôme Glisse } 37674eee180SJérôme Glisse 3772aee09d8SJérôme Glisse return (fault || write_fault) ? -EAGAIN : 0; 3782aee09d8SJérôme Glisse } 3792aee09d8SJérôme Glisse 3802aee09d8SJérôme Glisse static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 3812aee09d8SJérôme Glisse uint64_t pfns, uint64_t cpu_flags, 3822aee09d8SJérôme Glisse bool *fault, bool *write_fault) 3832aee09d8SJérôme Glisse { 384f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 385f88a1e90SJérôme Glisse 3862aee09d8SJérôme Glisse *fault = *write_fault = false; 3872aee09d8SJérôme Glisse if (!hmm_vma_walk->fault) 3882aee09d8SJérôme Glisse return; 3892aee09d8SJérôme Glisse 3902aee09d8SJérôme Glisse /* We aren't ask to do anything ... */ 391f88a1e90SJérôme Glisse if (!(pfns & range->flags[HMM_PFN_VALID])) 3922aee09d8SJérôme Glisse return; 393f88a1e90SJérôme Glisse /* If this is device memory than only fault if explicitly requested */ 394f88a1e90SJérôme Glisse if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { 395f88a1e90SJérôme Glisse /* Do we fault on device memory ? */ 396f88a1e90SJérôme Glisse if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { 397f88a1e90SJérôme Glisse *write_fault = pfns & range->flags[HMM_PFN_WRITE]; 398f88a1e90SJérôme Glisse *fault = true; 399f88a1e90SJérôme Glisse } 4002aee09d8SJérôme Glisse return; 4012aee09d8SJérôme Glisse } 402f88a1e90SJérôme Glisse 403f88a1e90SJérôme Glisse /* If CPU page table is not valid then we need to fault */ 404f88a1e90SJérôme Glisse *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); 405f88a1e90SJérôme Glisse /* Need to write fault ? */ 406f88a1e90SJérôme Glisse if ((pfns & range->flags[HMM_PFN_WRITE]) && 407f88a1e90SJérôme Glisse !(cpu_flags & range->flags[HMM_PFN_WRITE])) { 408f88a1e90SJérôme Glisse *write_fault = true; 4092aee09d8SJérôme Glisse *fault = true; 4102aee09d8SJérôme Glisse } 4112aee09d8SJérôme Glisse } 4122aee09d8SJérôme Glisse 4132aee09d8SJérôme Glisse static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 4142aee09d8SJérôme Glisse const uint64_t *pfns, unsigned long npages, 4152aee09d8SJérôme Glisse uint64_t cpu_flags, bool *fault, 4162aee09d8SJérôme Glisse bool *write_fault) 4172aee09d8SJérôme Glisse { 4182aee09d8SJérôme Glisse unsigned long i; 4192aee09d8SJérôme Glisse 4202aee09d8SJérôme Glisse if (!hmm_vma_walk->fault) { 4212aee09d8SJérôme Glisse *fault = *write_fault = false; 4222aee09d8SJérôme Glisse return; 4232aee09d8SJérôme Glisse } 4242aee09d8SJérôme Glisse 4252aee09d8SJérôme Glisse for (i = 0; i < npages; ++i) { 4262aee09d8SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags, 4272aee09d8SJérôme Glisse fault, write_fault); 4282aee09d8SJérôme Glisse if ((*fault) || (*write_fault)) 4292aee09d8SJérôme Glisse return; 4302aee09d8SJérôme Glisse } 4312aee09d8SJérôme Glisse } 4322aee09d8SJérôme Glisse 4332aee09d8SJérôme Glisse static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, 4342aee09d8SJérôme Glisse struct mm_walk *walk) 4352aee09d8SJérôme Glisse { 4362aee09d8SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 4372aee09d8SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 4382aee09d8SJérôme Glisse bool fault, write_fault; 4392aee09d8SJérôme Glisse unsigned long i, npages; 4402aee09d8SJérôme Glisse uint64_t *pfns; 4412aee09d8SJérôme Glisse 4422aee09d8SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 4432aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 4442aee09d8SJérôme Glisse pfns = &range->pfns[i]; 4452aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 4462aee09d8SJérôme Glisse 0, &fault, &write_fault); 4472aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 4482aee09d8SJérôme Glisse } 4492aee09d8SJérôme Glisse 450f88a1e90SJérôme Glisse static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) 4512aee09d8SJérôme Glisse { 4522aee09d8SJérôme Glisse if (pmd_protnone(pmd)) 4532aee09d8SJérôme Glisse return 0; 454f88a1e90SJérôme Glisse return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | 455f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 456f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 457da4c3c73SJérôme Glisse } 458da4c3c73SJérôme Glisse 45953f5c3f4SJérôme Glisse static int hmm_vma_handle_pmd(struct mm_walk *walk, 46053f5c3f4SJérôme Glisse unsigned long addr, 46153f5c3f4SJérôme Glisse unsigned long end, 46253f5c3f4SJérôme Glisse uint64_t *pfns, 46353f5c3f4SJérôme Glisse pmd_t pmd) 46453f5c3f4SJérôme Glisse { 46553f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 466f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 4672aee09d8SJérôme Glisse unsigned long pfn, npages, i; 4682aee09d8SJérôme Glisse bool fault, write_fault; 469f88a1e90SJérôme Glisse uint64_t cpu_flags; 47053f5c3f4SJérôme Glisse 4712aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 472f88a1e90SJérôme Glisse cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); 4732aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, 4742aee09d8SJérôme Glisse &fault, &write_fault); 47553f5c3f4SJérôme Glisse 4762aee09d8SJérôme Glisse if (pmd_protnone(pmd) || fault || write_fault) 4772aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 47853f5c3f4SJérôme Glisse 47953f5c3f4SJérôme Glisse pfn = pmd_pfn(pmd) + pte_index(addr); 48053f5c3f4SJérôme Glisse for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) 481f88a1e90SJérôme Glisse pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags; 48253f5c3f4SJérôme Glisse hmm_vma_walk->last = end; 48353f5c3f4SJérôme Glisse return 0; 48453f5c3f4SJérôme Glisse } 48553f5c3f4SJérôme Glisse 486f88a1e90SJérôme Glisse static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) 4872aee09d8SJérôme Glisse { 4882aee09d8SJérôme Glisse if (pte_none(pte) || !pte_present(pte)) 4892aee09d8SJérôme Glisse return 0; 490f88a1e90SJérôme Glisse return pte_write(pte) ? range->flags[HMM_PFN_VALID] | 491f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 492f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 4932aee09d8SJérôme Glisse } 4942aee09d8SJérôme Glisse 49553f5c3f4SJérôme Glisse static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, 49653f5c3f4SJérôme Glisse unsigned long end, pmd_t *pmdp, pte_t *ptep, 49753f5c3f4SJérôme Glisse uint64_t *pfn) 49853f5c3f4SJérôme Glisse { 49953f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 500f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 50153f5c3f4SJérôme Glisse struct vm_area_struct *vma = walk->vma; 5022aee09d8SJérôme Glisse bool fault, write_fault; 5032aee09d8SJérôme Glisse uint64_t cpu_flags; 50453f5c3f4SJérôme Glisse pte_t pte = *ptep; 505f88a1e90SJérôme Glisse uint64_t orig_pfn = *pfn; 50653f5c3f4SJérôme Glisse 507f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_NONE]; 508f88a1e90SJérôme Glisse cpu_flags = pte_to_hmm_pfn_flags(range, pte); 509f88a1e90SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 5102aee09d8SJérôme Glisse &fault, &write_fault); 51153f5c3f4SJérôme Glisse 51253f5c3f4SJérôme Glisse if (pte_none(pte)) { 5132aee09d8SJérôme Glisse if (fault || write_fault) 51453f5c3f4SJérôme Glisse goto fault; 51553f5c3f4SJérôme Glisse return 0; 51653f5c3f4SJérôme Glisse } 51753f5c3f4SJérôme Glisse 51853f5c3f4SJérôme Glisse if (!pte_present(pte)) { 51953f5c3f4SJérôme Glisse swp_entry_t entry = pte_to_swp_entry(pte); 52053f5c3f4SJérôme Glisse 52153f5c3f4SJérôme Glisse if (!non_swap_entry(entry)) { 5222aee09d8SJérôme Glisse if (fault || write_fault) 52353f5c3f4SJérôme Glisse goto fault; 52453f5c3f4SJérôme Glisse return 0; 52553f5c3f4SJérôme Glisse } 52653f5c3f4SJérôme Glisse 52753f5c3f4SJérôme Glisse /* 52853f5c3f4SJérôme Glisse * This is a special swap entry, ignore migration, use 52953f5c3f4SJérôme Glisse * device and report anything else as error. 53053f5c3f4SJérôme Glisse */ 53153f5c3f4SJérôme Glisse if (is_device_private_entry(entry)) { 532f88a1e90SJérôme Glisse cpu_flags = range->flags[HMM_PFN_VALID] | 533f88a1e90SJérôme Glisse range->flags[HMM_PFN_DEVICE_PRIVATE]; 5342aee09d8SJérôme Glisse cpu_flags |= is_write_device_private_entry(entry) ? 535f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 0; 536f88a1e90SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 537f88a1e90SJérôme Glisse &fault, &write_fault); 538f88a1e90SJérôme Glisse if (fault || write_fault) 539f88a1e90SJérôme Glisse goto fault; 540f88a1e90SJérôme Glisse *pfn = hmm_pfn_from_pfn(range, swp_offset(entry)); 541f88a1e90SJérôme Glisse *pfn |= cpu_flags; 54253f5c3f4SJérôme Glisse return 0; 54353f5c3f4SJérôme Glisse } 54453f5c3f4SJérôme Glisse 54553f5c3f4SJérôme Glisse if (is_migration_entry(entry)) { 5462aee09d8SJérôme Glisse if (fault || write_fault) { 54753f5c3f4SJérôme Glisse pte_unmap(ptep); 54853f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 54953f5c3f4SJérôme Glisse migration_entry_wait(vma->vm_mm, 55053f5c3f4SJérôme Glisse pmdp, addr); 55153f5c3f4SJérôme Glisse return -EAGAIN; 55253f5c3f4SJérôme Glisse } 55353f5c3f4SJérôme Glisse return 0; 55453f5c3f4SJérôme Glisse } 55553f5c3f4SJérôme Glisse 55653f5c3f4SJérôme Glisse /* Report error for everything else */ 557f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_ERROR]; 55853f5c3f4SJérôme Glisse return -EFAULT; 55953f5c3f4SJérôme Glisse } 56053f5c3f4SJérôme Glisse 5612aee09d8SJérôme Glisse if (fault || write_fault) 56253f5c3f4SJérôme Glisse goto fault; 56353f5c3f4SJérôme Glisse 564f88a1e90SJérôme Glisse *pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags; 56553f5c3f4SJérôme Glisse return 0; 56653f5c3f4SJérôme Glisse 56753f5c3f4SJérôme Glisse fault: 56853f5c3f4SJérôme Glisse pte_unmap(ptep); 56953f5c3f4SJérôme Glisse /* Fault any virtual address we were asked to fault */ 5702aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 57153f5c3f4SJérôme Glisse } 57253f5c3f4SJérôme Glisse 573da4c3c73SJérôme Glisse static int hmm_vma_walk_pmd(pmd_t *pmdp, 574da4c3c73SJérôme Glisse unsigned long start, 575da4c3c73SJérôme Glisse unsigned long end, 576da4c3c73SJérôme Glisse struct mm_walk *walk) 577da4c3c73SJérôme Glisse { 57874eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 57974eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 580*d08faca0SJérôme Glisse struct vm_area_struct *vma = walk->vma; 581ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 582da4c3c73SJérôme Glisse unsigned long addr = start, i; 583da4c3c73SJérôme Glisse pte_t *ptep; 584da4c3c73SJérôme Glisse pmd_t pmd; 585da4c3c73SJérôme Glisse 586*d08faca0SJérôme Glisse 587*d08faca0SJérôme Glisse again: 588*d08faca0SJérôme Glisse pmd = READ_ONCE(*pmdp); 589*d08faca0SJérôme Glisse if (pmd_none(pmd)) 590*d08faca0SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 591*d08faca0SJérôme Glisse 592*d08faca0SJérôme Glisse if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB)) 593*d08faca0SJérôme Glisse return hmm_pfns_bad(start, end, walk); 594*d08faca0SJérôme Glisse 595*d08faca0SJérôme Glisse if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { 596*d08faca0SJérôme Glisse bool fault, write_fault; 597*d08faca0SJérôme Glisse unsigned long npages; 598*d08faca0SJérôme Glisse uint64_t *pfns; 599*d08faca0SJérôme Glisse 600*d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 601*d08faca0SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 602*d08faca0SJérôme Glisse pfns = &range->pfns[i]; 603*d08faca0SJérôme Glisse 604*d08faca0SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 605*d08faca0SJérôme Glisse 0, &fault, &write_fault); 606*d08faca0SJérôme Glisse if (fault || write_fault) { 607*d08faca0SJérôme Glisse hmm_vma_walk->last = addr; 608*d08faca0SJérôme Glisse pmd_migration_entry_wait(vma->vm_mm, pmdp); 609*d08faca0SJérôme Glisse return -EAGAIN; 610*d08faca0SJérôme Glisse } 611*d08faca0SJérôme Glisse return 0; 612*d08faca0SJérôme Glisse } else if (!pmd_present(pmd)) 613*d08faca0SJérôme Glisse return hmm_pfns_bad(start, end, walk); 614*d08faca0SJérôme Glisse 615*d08faca0SJérôme Glisse if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { 616da4c3c73SJérôme Glisse /* 617da4c3c73SJérôme Glisse * No need to take pmd_lock here, even if some other threads 618da4c3c73SJérôme Glisse * is splitting the huge pmd we will get that event through 619da4c3c73SJérôme Glisse * mmu_notifier callback. 620da4c3c73SJérôme Glisse * 621da4c3c73SJérôme Glisse * So just read pmd value and check again its a transparent 622da4c3c73SJérôme Glisse * huge or device mapping one and compute corresponding pfn 623da4c3c73SJérôme Glisse * values. 624da4c3c73SJérôme Glisse */ 625da4c3c73SJérôme Glisse pmd = pmd_read_atomic(pmdp); 626da4c3c73SJérôme Glisse barrier(); 627da4c3c73SJérôme Glisse if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) 628da4c3c73SJérôme Glisse goto again; 629da4c3c73SJérôme Glisse 630*d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 63153f5c3f4SJérôme Glisse return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd); 632da4c3c73SJérôme Glisse } 633da4c3c73SJérôme Glisse 634*d08faca0SJérôme Glisse /* 635*d08faca0SJérôme Glisse * We have handled all the valid case above ie either none, migration, 636*d08faca0SJérôme Glisse * huge or transparent huge. At this point either it is a valid pmd 637*d08faca0SJérôme Glisse * entry pointing to pte directory or it is a bad pmd that will not 638*d08faca0SJérôme Glisse * recover. 639*d08faca0SJérôme Glisse */ 640*d08faca0SJérôme Glisse if (pmd_bad(pmd)) 641da4c3c73SJérôme Glisse return hmm_pfns_bad(start, end, walk); 642da4c3c73SJérôme Glisse 643da4c3c73SJérôme Glisse ptep = pte_offset_map(pmdp, addr); 644*d08faca0SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 645da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { 64653f5c3f4SJérôme Glisse int r; 647da4c3c73SJérôme Glisse 64853f5c3f4SJérôme Glisse r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]); 64953f5c3f4SJérôme Glisse if (r) { 65053f5c3f4SJérôme Glisse /* hmm_vma_handle_pte() did unmap pte directory */ 65174eee180SJérôme Glisse hmm_vma_walk->last = addr; 65253f5c3f4SJérôme Glisse return r; 65374eee180SJérôme Glisse } 654da4c3c73SJérôme Glisse } 655da4c3c73SJérôme Glisse pte_unmap(ptep - 1); 656da4c3c73SJérôme Glisse 65753f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 658da4c3c73SJérôme Glisse return 0; 659da4c3c73SJérôme Glisse } 660da4c3c73SJérôme Glisse 661f88a1e90SJérôme Glisse static void hmm_pfns_clear(struct hmm_range *range, 662f88a1e90SJérôme Glisse uint64_t *pfns, 66333cd47dcSJérôme Glisse unsigned long addr, 66433cd47dcSJérôme Glisse unsigned long end) 66533cd47dcSJérôme Glisse { 66633cd47dcSJérôme Glisse for (; addr < end; addr += PAGE_SIZE, pfns++) 667f88a1e90SJérôme Glisse *pfns = range->values[HMM_PFN_NONE]; 66833cd47dcSJérôme Glisse } 66933cd47dcSJérôme Glisse 670855ce7d2SJérôme Glisse static void hmm_pfns_special(struct hmm_range *range) 671855ce7d2SJérôme Glisse { 672855ce7d2SJérôme Glisse unsigned long addr = range->start, i = 0; 673855ce7d2SJérôme Glisse 674855ce7d2SJérôme Glisse for (; addr < range->end; addr += PAGE_SIZE, i++) 675f88a1e90SJérôme Glisse range->pfns[i] = range->values[HMM_PFN_SPECIAL]; 676855ce7d2SJérôme Glisse } 677855ce7d2SJérôme Glisse 678da4c3c73SJérôme Glisse /* 679da4c3c73SJérôme Glisse * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses 68008232a45SJérôme Glisse * @range: range being snapshotted 68186586a41SJérôme Glisse * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid 68286586a41SJérôme Glisse * vma permission, 0 success 683da4c3c73SJérôme Glisse * 684da4c3c73SJérôme Glisse * This snapshots the CPU page table for a range of virtual addresses. Snapshot 685da4c3c73SJérôme Glisse * validity is tracked by range struct. See hmm_vma_range_done() for further 686da4c3c73SJérôme Glisse * information. 687da4c3c73SJérôme Glisse * 688da4c3c73SJérôme Glisse * The range struct is initialized here. It tracks the CPU page table, but only 689da4c3c73SJérôme Glisse * if the function returns success (0), in which case the caller must then call 690da4c3c73SJérôme Glisse * hmm_vma_range_done() to stop CPU page table update tracking on this range. 691da4c3c73SJérôme Glisse * 692da4c3c73SJérôme Glisse * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS 693da4c3c73SJérôme Glisse * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED ! 694da4c3c73SJérôme Glisse */ 69508232a45SJérôme Glisse int hmm_vma_get_pfns(struct hmm_range *range) 696da4c3c73SJérôme Glisse { 69708232a45SJérôme Glisse struct vm_area_struct *vma = range->vma; 69874eee180SJérôme Glisse struct hmm_vma_walk hmm_vma_walk; 699da4c3c73SJérôme Glisse struct mm_walk mm_walk; 700da4c3c73SJérôme Glisse struct hmm *hmm; 701da4c3c73SJérôme Glisse 702da4c3c73SJérôme Glisse /* Sanity check, this really should not happen ! */ 70308232a45SJérôme Glisse if (range->start < vma->vm_start || range->start >= vma->vm_end) 704da4c3c73SJérôme Glisse return -EINVAL; 70508232a45SJérôme Glisse if (range->end < vma->vm_start || range->end > vma->vm_end) 706da4c3c73SJérôme Glisse return -EINVAL; 707da4c3c73SJérôme Glisse 708da4c3c73SJérôme Glisse hmm = hmm_register(vma->vm_mm); 709da4c3c73SJérôme Glisse if (!hmm) 710da4c3c73SJérôme Glisse return -ENOMEM; 711da4c3c73SJérôme Glisse /* Caller must have registered a mirror, via hmm_mirror_register() ! */ 712da4c3c73SJérôme Glisse if (!hmm->mmu_notifier.ops) 713da4c3c73SJérôme Glisse return -EINVAL; 714da4c3c73SJérôme Glisse 715855ce7d2SJérôme Glisse /* FIXME support hugetlb fs */ 716e1fb4a08SDave Jiang if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) || 717e1fb4a08SDave Jiang vma_is_dax(vma)) { 718855ce7d2SJérôme Glisse hmm_pfns_special(range); 719855ce7d2SJérôme Glisse return -EINVAL; 720855ce7d2SJérôme Glisse } 721855ce7d2SJérôme Glisse 72286586a41SJérôme Glisse if (!(vma->vm_flags & VM_READ)) { 72386586a41SJérôme Glisse /* 72486586a41SJérôme Glisse * If vma do not allow read access, then assume that it does 72586586a41SJérôme Glisse * not allow write access, either. Architecture that allow 72686586a41SJérôme Glisse * write without read access are not supported by HMM, because 72786586a41SJérôme Glisse * operations such has atomic access would not work. 72886586a41SJérôme Glisse */ 729f88a1e90SJérôme Glisse hmm_pfns_clear(range, range->pfns, range->start, range->end); 73086586a41SJérôme Glisse return -EPERM; 73186586a41SJérôme Glisse } 73286586a41SJérôme Glisse 733da4c3c73SJérôme Glisse /* Initialize range to track CPU page table update */ 734da4c3c73SJérôme Glisse spin_lock(&hmm->lock); 735da4c3c73SJérôme Glisse range->valid = true; 736da4c3c73SJérôme Glisse list_add_rcu(&range->list, &hmm->ranges); 737da4c3c73SJérôme Glisse spin_unlock(&hmm->lock); 738da4c3c73SJérôme Glisse 73974eee180SJérôme Glisse hmm_vma_walk.fault = false; 74074eee180SJérôme Glisse hmm_vma_walk.range = range; 74174eee180SJérôme Glisse mm_walk.private = &hmm_vma_walk; 74274eee180SJérôme Glisse 743da4c3c73SJérôme Glisse mm_walk.vma = vma; 744da4c3c73SJérôme Glisse mm_walk.mm = vma->vm_mm; 745da4c3c73SJérôme Glisse mm_walk.pte_entry = NULL; 746da4c3c73SJérôme Glisse mm_walk.test_walk = NULL; 747da4c3c73SJérôme Glisse mm_walk.hugetlb_entry = NULL; 748da4c3c73SJérôme Glisse mm_walk.pmd_entry = hmm_vma_walk_pmd; 749da4c3c73SJérôme Glisse mm_walk.pte_hole = hmm_vma_walk_hole; 750da4c3c73SJérôme Glisse 75108232a45SJérôme Glisse walk_page_range(range->start, range->end, &mm_walk); 752da4c3c73SJérôme Glisse return 0; 753da4c3c73SJérôme Glisse } 754da4c3c73SJérôme Glisse EXPORT_SYMBOL(hmm_vma_get_pfns); 755da4c3c73SJérôme Glisse 756da4c3c73SJérôme Glisse /* 757da4c3c73SJérôme Glisse * hmm_vma_range_done() - stop tracking change to CPU page table over a range 758da4c3c73SJérôme Glisse * @range: range being tracked 759da4c3c73SJérôme Glisse * Returns: false if range data has been invalidated, true otherwise 760da4c3c73SJérôme Glisse * 761da4c3c73SJérôme Glisse * Range struct is used to track updates to the CPU page table after a call to 762da4c3c73SJérôme Glisse * either hmm_vma_get_pfns() or hmm_vma_fault(). Once the device driver is done 763da4c3c73SJérôme Glisse * using the data, or wants to lock updates to the data it got from those 764da4c3c73SJérôme Glisse * functions, it must call the hmm_vma_range_done() function, which will then 765da4c3c73SJérôme Glisse * stop tracking CPU page table updates. 766da4c3c73SJérôme Glisse * 767da4c3c73SJérôme Glisse * Note that device driver must still implement general CPU page table update 768da4c3c73SJérôme Glisse * tracking either by using hmm_mirror (see hmm_mirror_register()) or by using 769da4c3c73SJérôme Glisse * the mmu_notifier API directly. 770da4c3c73SJérôme Glisse * 771da4c3c73SJérôme Glisse * CPU page table update tracking done through hmm_range is only temporary and 772da4c3c73SJérôme Glisse * to be used while trying to duplicate CPU page table contents for a range of 773da4c3c73SJérôme Glisse * virtual addresses. 774da4c3c73SJérôme Glisse * 775da4c3c73SJérôme Glisse * There are two ways to use this : 776da4c3c73SJérôme Glisse * again: 77708232a45SJérôme Glisse * hmm_vma_get_pfns(range); or hmm_vma_fault(...); 778da4c3c73SJérôme Glisse * trans = device_build_page_table_update_transaction(pfns); 779da4c3c73SJérôme Glisse * device_page_table_lock(); 78008232a45SJérôme Glisse * if (!hmm_vma_range_done(range)) { 781da4c3c73SJérôme Glisse * device_page_table_unlock(); 782da4c3c73SJérôme Glisse * goto again; 783da4c3c73SJérôme Glisse * } 784da4c3c73SJérôme Glisse * device_commit_transaction(trans); 785da4c3c73SJérôme Glisse * device_page_table_unlock(); 786da4c3c73SJérôme Glisse * 787da4c3c73SJérôme Glisse * Or: 78808232a45SJérôme Glisse * hmm_vma_get_pfns(range); or hmm_vma_fault(...); 789da4c3c73SJérôme Glisse * device_page_table_lock(); 79008232a45SJérôme Glisse * hmm_vma_range_done(range); 79108232a45SJérôme Glisse * device_update_page_table(range->pfns); 792da4c3c73SJérôme Glisse * device_page_table_unlock(); 793da4c3c73SJérôme Glisse */ 79408232a45SJérôme Glisse bool hmm_vma_range_done(struct hmm_range *range) 795da4c3c73SJérôme Glisse { 796da4c3c73SJérôme Glisse unsigned long npages = (range->end - range->start) >> PAGE_SHIFT; 797da4c3c73SJérôme Glisse struct hmm *hmm; 798da4c3c73SJérôme Glisse 799da4c3c73SJérôme Glisse if (range->end <= range->start) { 800da4c3c73SJérôme Glisse BUG(); 801da4c3c73SJérôme Glisse return false; 802da4c3c73SJérôme Glisse } 803da4c3c73SJérôme Glisse 80408232a45SJérôme Glisse hmm = hmm_register(range->vma->vm_mm); 805da4c3c73SJérôme Glisse if (!hmm) { 806da4c3c73SJérôme Glisse memset(range->pfns, 0, sizeof(*range->pfns) * npages); 807da4c3c73SJérôme Glisse return false; 808da4c3c73SJérôme Glisse } 809da4c3c73SJérôme Glisse 810da4c3c73SJérôme Glisse spin_lock(&hmm->lock); 811da4c3c73SJérôme Glisse list_del_rcu(&range->list); 812da4c3c73SJérôme Glisse spin_unlock(&hmm->lock); 813da4c3c73SJérôme Glisse 814da4c3c73SJérôme Glisse return range->valid; 815da4c3c73SJérôme Glisse } 816da4c3c73SJérôme Glisse EXPORT_SYMBOL(hmm_vma_range_done); 81774eee180SJérôme Glisse 81874eee180SJérôme Glisse /* 81974eee180SJérôme Glisse * hmm_vma_fault() - try to fault some address in a virtual address range 82008232a45SJérôme Glisse * @range: range being faulted 82174eee180SJérôme Glisse * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) 82274eee180SJérôme Glisse * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop) 82374eee180SJérôme Glisse * 82474eee180SJérôme Glisse * This is similar to a regular CPU page fault except that it will not trigger 82574eee180SJérôme Glisse * any memory migration if the memory being faulted is not accessible by CPUs. 82674eee180SJérôme Glisse * 827ff05c0c6SJérôme Glisse * On error, for one virtual address in the range, the function will mark the 828ff05c0c6SJérôme Glisse * corresponding HMM pfn entry with an error flag. 82974eee180SJérôme Glisse * 83074eee180SJérôme Glisse * Expected use pattern: 83174eee180SJérôme Glisse * retry: 83274eee180SJérôme Glisse * down_read(&mm->mmap_sem); 83374eee180SJérôme Glisse * // Find vma and address device wants to fault, initialize hmm_pfn_t 83474eee180SJérôme Glisse * // array accordingly 83508232a45SJérôme Glisse * ret = hmm_vma_fault(range, write, block); 83674eee180SJérôme Glisse * switch (ret) { 83774eee180SJérôme Glisse * case -EAGAIN: 83808232a45SJérôme Glisse * hmm_vma_range_done(range); 83974eee180SJérôme Glisse * // You might want to rate limit or yield to play nicely, you may 84074eee180SJérôme Glisse * // also commit any valid pfn in the array assuming that you are 84174eee180SJérôme Glisse * // getting true from hmm_vma_range_monitor_end() 84274eee180SJérôme Glisse * goto retry; 84374eee180SJérôme Glisse * case 0: 84474eee180SJérôme Glisse * break; 84586586a41SJérôme Glisse * case -ENOMEM: 84686586a41SJérôme Glisse * case -EINVAL: 84786586a41SJérôme Glisse * case -EPERM: 84874eee180SJérôme Glisse * default: 84974eee180SJérôme Glisse * // Handle error ! 85074eee180SJérôme Glisse * up_read(&mm->mmap_sem) 85174eee180SJérôme Glisse * return; 85274eee180SJérôme Glisse * } 85374eee180SJérôme Glisse * // Take device driver lock that serialize device page table update 85474eee180SJérôme Glisse * driver_lock_device_page_table_update(); 85508232a45SJérôme Glisse * hmm_vma_range_done(range); 85674eee180SJérôme Glisse * // Commit pfns we got from hmm_vma_fault() 85774eee180SJérôme Glisse * driver_unlock_device_page_table_update(); 85874eee180SJérôme Glisse * up_read(&mm->mmap_sem) 85974eee180SJérôme Glisse * 86074eee180SJérôme Glisse * YOU MUST CALL hmm_vma_range_done() AFTER THIS FUNCTION RETURN SUCCESS (0) 86174eee180SJérôme Glisse * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION ! 86274eee180SJérôme Glisse * 86374eee180SJérôme Glisse * YOU HAVE BEEN WARNED ! 86474eee180SJérôme Glisse */ 8652aee09d8SJérôme Glisse int hmm_vma_fault(struct hmm_range *range, bool block) 86674eee180SJérôme Glisse { 86708232a45SJérôme Glisse struct vm_area_struct *vma = range->vma; 86808232a45SJérôme Glisse unsigned long start = range->start; 86974eee180SJérôme Glisse struct hmm_vma_walk hmm_vma_walk; 87074eee180SJérôme Glisse struct mm_walk mm_walk; 87174eee180SJérôme Glisse struct hmm *hmm; 87274eee180SJérôme Glisse int ret; 87374eee180SJérôme Glisse 87474eee180SJérôme Glisse /* Sanity check, this really should not happen ! */ 87508232a45SJérôme Glisse if (range->start < vma->vm_start || range->start >= vma->vm_end) 87674eee180SJérôme Glisse return -EINVAL; 87708232a45SJérôme Glisse if (range->end < vma->vm_start || range->end > vma->vm_end) 87874eee180SJérôme Glisse return -EINVAL; 87974eee180SJérôme Glisse 88074eee180SJérôme Glisse hmm = hmm_register(vma->vm_mm); 88174eee180SJérôme Glisse if (!hmm) { 882f88a1e90SJérôme Glisse hmm_pfns_clear(range, range->pfns, range->start, range->end); 88374eee180SJérôme Glisse return -ENOMEM; 88474eee180SJérôme Glisse } 88574eee180SJérôme Glisse /* Caller must have registered a mirror using hmm_mirror_register() */ 88674eee180SJérôme Glisse if (!hmm->mmu_notifier.ops) 88774eee180SJérôme Glisse return -EINVAL; 88874eee180SJérôme Glisse 889855ce7d2SJérôme Glisse /* FIXME support hugetlb fs */ 890e1fb4a08SDave Jiang if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) || 891e1fb4a08SDave Jiang vma_is_dax(vma)) { 892855ce7d2SJérôme Glisse hmm_pfns_special(range); 893855ce7d2SJérôme Glisse return -EINVAL; 894855ce7d2SJérôme Glisse } 895855ce7d2SJérôme Glisse 89686586a41SJérôme Glisse if (!(vma->vm_flags & VM_READ)) { 89786586a41SJérôme Glisse /* 89886586a41SJérôme Glisse * If vma do not allow read access, then assume that it does 89986586a41SJérôme Glisse * not allow write access, either. Architecture that allow 90086586a41SJérôme Glisse * write without read access are not supported by HMM, because 90186586a41SJérôme Glisse * operations such has atomic access would not work. 90286586a41SJérôme Glisse */ 903f88a1e90SJérôme Glisse hmm_pfns_clear(range, range->pfns, range->start, range->end); 90486586a41SJérôme Glisse return -EPERM; 90586586a41SJérôme Glisse } 90674eee180SJérôme Glisse 90786586a41SJérôme Glisse /* Initialize range to track CPU page table update */ 90886586a41SJérôme Glisse spin_lock(&hmm->lock); 90986586a41SJérôme Glisse range->valid = true; 91086586a41SJérôme Glisse list_add_rcu(&range->list, &hmm->ranges); 91186586a41SJérôme Glisse spin_unlock(&hmm->lock); 91286586a41SJérôme Glisse 91374eee180SJérôme Glisse hmm_vma_walk.fault = true; 91474eee180SJérôme Glisse hmm_vma_walk.block = block; 91574eee180SJérôme Glisse hmm_vma_walk.range = range; 91674eee180SJérôme Glisse mm_walk.private = &hmm_vma_walk; 91774eee180SJérôme Glisse hmm_vma_walk.last = range->start; 91874eee180SJérôme Glisse 91974eee180SJérôme Glisse mm_walk.vma = vma; 92074eee180SJérôme Glisse mm_walk.mm = vma->vm_mm; 92174eee180SJérôme Glisse mm_walk.pte_entry = NULL; 92274eee180SJérôme Glisse mm_walk.test_walk = NULL; 92374eee180SJérôme Glisse mm_walk.hugetlb_entry = NULL; 92474eee180SJérôme Glisse mm_walk.pmd_entry = hmm_vma_walk_pmd; 92574eee180SJérôme Glisse mm_walk.pte_hole = hmm_vma_walk_hole; 92674eee180SJérôme Glisse 92774eee180SJérôme Glisse do { 92808232a45SJérôme Glisse ret = walk_page_range(start, range->end, &mm_walk); 92974eee180SJérôme Glisse start = hmm_vma_walk.last; 93074eee180SJérôme Glisse } while (ret == -EAGAIN); 93174eee180SJérôme Glisse 93274eee180SJérôme Glisse if (ret) { 93374eee180SJérôme Glisse unsigned long i; 93474eee180SJérôme Glisse 93574eee180SJérôme Glisse i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 936f88a1e90SJérôme Glisse hmm_pfns_clear(range, &range->pfns[i], hmm_vma_walk.last, 937f88a1e90SJérôme Glisse range->end); 93808232a45SJérôme Glisse hmm_vma_range_done(range); 93974eee180SJérôme Glisse } 94074eee180SJérôme Glisse return ret; 94174eee180SJérôme Glisse } 94274eee180SJérôme Glisse EXPORT_SYMBOL(hmm_vma_fault); 943c0b12405SJérôme Glisse #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ 9444ef589dcSJérôme Glisse 9454ef589dcSJérôme Glisse 946df6ad698SJérôme Glisse #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) 9474ef589dcSJérôme Glisse struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, 9484ef589dcSJérôme Glisse unsigned long addr) 9494ef589dcSJérôme Glisse { 9504ef589dcSJérôme Glisse struct page *page; 9514ef589dcSJérôme Glisse 9524ef589dcSJérôme Glisse page = alloc_page_vma(GFP_HIGHUSER, vma, addr); 9534ef589dcSJérôme Glisse if (!page) 9544ef589dcSJérôme Glisse return NULL; 9554ef589dcSJérôme Glisse lock_page(page); 9564ef589dcSJérôme Glisse return page; 9574ef589dcSJérôme Glisse } 9584ef589dcSJérôme Glisse EXPORT_SYMBOL(hmm_vma_alloc_locked_page); 9594ef589dcSJérôme Glisse 9604ef589dcSJérôme Glisse 9614ef589dcSJérôme Glisse static void hmm_devmem_ref_release(struct percpu_ref *ref) 9624ef589dcSJérôme Glisse { 9634ef589dcSJérôme Glisse struct hmm_devmem *devmem; 9644ef589dcSJérôme Glisse 9654ef589dcSJérôme Glisse devmem = container_of(ref, struct hmm_devmem, ref); 9664ef589dcSJérôme Glisse complete(&devmem->completion); 9674ef589dcSJérôme Glisse } 9684ef589dcSJérôme Glisse 9694ef589dcSJérôme Glisse static void hmm_devmem_ref_exit(void *data) 9704ef589dcSJérôme Glisse { 9714ef589dcSJérôme Glisse struct percpu_ref *ref = data; 9724ef589dcSJérôme Glisse struct hmm_devmem *devmem; 9734ef589dcSJérôme Glisse 9744ef589dcSJérôme Glisse devmem = container_of(ref, struct hmm_devmem, ref); 9754ef589dcSJérôme Glisse percpu_ref_exit(ref); 9764ef589dcSJérôme Glisse devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data); 9774ef589dcSJérôme Glisse } 9784ef589dcSJérôme Glisse 9794ef589dcSJérôme Glisse static void hmm_devmem_ref_kill(void *data) 9804ef589dcSJérôme Glisse { 9814ef589dcSJérôme Glisse struct percpu_ref *ref = data; 9824ef589dcSJérôme Glisse struct hmm_devmem *devmem; 9834ef589dcSJérôme Glisse 9844ef589dcSJérôme Glisse devmem = container_of(ref, struct hmm_devmem, ref); 9854ef589dcSJérôme Glisse percpu_ref_kill(ref); 9864ef589dcSJérôme Glisse wait_for_completion(&devmem->completion); 9874ef589dcSJérôme Glisse devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data); 9884ef589dcSJérôme Glisse } 9894ef589dcSJérôme Glisse 9904ef589dcSJérôme Glisse static int hmm_devmem_fault(struct vm_area_struct *vma, 9914ef589dcSJérôme Glisse unsigned long addr, 9924ef589dcSJérôme Glisse const struct page *page, 9934ef589dcSJérôme Glisse unsigned int flags, 9944ef589dcSJérôme Glisse pmd_t *pmdp) 9954ef589dcSJérôme Glisse { 9964ef589dcSJérôme Glisse struct hmm_devmem *devmem = page->pgmap->data; 9974ef589dcSJérôme Glisse 9984ef589dcSJérôme Glisse return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp); 9994ef589dcSJérôme Glisse } 10004ef589dcSJérôme Glisse 10014ef589dcSJérôme Glisse static void hmm_devmem_free(struct page *page, void *data) 10024ef589dcSJérôme Glisse { 10034ef589dcSJérôme Glisse struct hmm_devmem *devmem = data; 10044ef589dcSJérôme Glisse 10052fa147bdSDan Williams page->mapping = NULL; 10062fa147bdSDan Williams 10074ef589dcSJérôme Glisse devmem->ops->free(devmem, page); 10084ef589dcSJérôme Glisse } 10094ef589dcSJérôme Glisse 10104ef589dcSJérôme Glisse static DEFINE_MUTEX(hmm_devmem_lock); 10114ef589dcSJérôme Glisse static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL); 10124ef589dcSJérôme Glisse 10134ef589dcSJérôme Glisse static void hmm_devmem_radix_release(struct resource *resource) 10144ef589dcSJérôme Glisse { 10151e926419SColin Ian King resource_size_t key; 10164ef589dcSJérôme Glisse 10174ef589dcSJérôme Glisse mutex_lock(&hmm_devmem_lock); 10184ef589dcSJérôme Glisse for (key = resource->start; 10194ef589dcSJérôme Glisse key <= resource->end; 10204ef589dcSJérôme Glisse key += PA_SECTION_SIZE) 10214ef589dcSJérôme Glisse radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT); 10224ef589dcSJérôme Glisse mutex_unlock(&hmm_devmem_lock); 10234ef589dcSJérôme Glisse } 10244ef589dcSJérôme Glisse 10254ef589dcSJérôme Glisse static void hmm_devmem_release(struct device *dev, void *data) 10264ef589dcSJérôme Glisse { 10274ef589dcSJérôme Glisse struct hmm_devmem *devmem = data; 10284ef589dcSJérôme Glisse struct resource *resource = devmem->resource; 10294ef589dcSJérôme Glisse unsigned long start_pfn, npages; 10304ef589dcSJérôme Glisse struct zone *zone; 10314ef589dcSJérôme Glisse struct page *page; 10324ef589dcSJérôme Glisse 10334ef589dcSJérôme Glisse if (percpu_ref_tryget_live(&devmem->ref)) { 10344ef589dcSJérôme Glisse dev_WARN(dev, "%s: page mapping is still live!\n", __func__); 10354ef589dcSJérôme Glisse percpu_ref_put(&devmem->ref); 10364ef589dcSJérôme Glisse } 10374ef589dcSJérôme Glisse 10384ef589dcSJérôme Glisse /* pages are dead and unused, undo the arch mapping */ 10394ef589dcSJérôme Glisse start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT; 10404ef589dcSJérôme Glisse npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT; 10414ef589dcSJérôme Glisse 10424ef589dcSJérôme Glisse page = pfn_to_page(start_pfn); 10434ef589dcSJérôme Glisse zone = page_zone(page); 10444ef589dcSJérôme Glisse 10454ef589dcSJérôme Glisse mem_hotplug_begin(); 1046d3df0a42SJérôme Glisse if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) 1047da024512SChristoph Hellwig __remove_pages(zone, start_pfn, npages, NULL); 1048d3df0a42SJérôme Glisse else 1049d3df0a42SJérôme Glisse arch_remove_memory(start_pfn << PAGE_SHIFT, 1050da024512SChristoph Hellwig npages << PAGE_SHIFT, NULL); 10514ef589dcSJérôme Glisse mem_hotplug_done(); 10524ef589dcSJérôme Glisse 10534ef589dcSJérôme Glisse hmm_devmem_radix_release(resource); 10544ef589dcSJérôme Glisse } 10554ef589dcSJérôme Glisse 10564ef589dcSJérôme Glisse static int hmm_devmem_pages_create(struct hmm_devmem *devmem) 10574ef589dcSJérôme Glisse { 10584ef589dcSJérôme Glisse resource_size_t key, align_start, align_size, align_end; 10594ef589dcSJérôme Glisse struct device *device = devmem->device; 10604ef589dcSJérôme Glisse int ret, nid, is_ram; 10614ef589dcSJérôme Glisse 10624ef589dcSJérôme Glisse align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1); 10634ef589dcSJérôme Glisse align_size = ALIGN(devmem->resource->start + 10644ef589dcSJérôme Glisse resource_size(devmem->resource), 10654ef589dcSJérôme Glisse PA_SECTION_SIZE) - align_start; 10664ef589dcSJérôme Glisse 10674ef589dcSJérôme Glisse is_ram = region_intersects(align_start, align_size, 10684ef589dcSJérôme Glisse IORESOURCE_SYSTEM_RAM, 10694ef589dcSJérôme Glisse IORES_DESC_NONE); 10704ef589dcSJérôme Glisse if (is_ram == REGION_MIXED) { 10714ef589dcSJérôme Glisse WARN_ONCE(1, "%s attempted on mixed region %pr\n", 10724ef589dcSJérôme Glisse __func__, devmem->resource); 10734ef589dcSJérôme Glisse return -ENXIO; 10744ef589dcSJérôme Glisse } 10754ef589dcSJérôme Glisse if (is_ram == REGION_INTERSECTS) 10764ef589dcSJérôme Glisse return -ENXIO; 10774ef589dcSJérôme Glisse 1078d3df0a42SJérôme Glisse if (devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY) 1079d3df0a42SJérôme Glisse devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; 1080d3df0a42SJérôme Glisse else 10814ef589dcSJérôme Glisse devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; 1082d3df0a42SJérôme Glisse 1083e7744aa2SLogan Gunthorpe devmem->pagemap.res = *devmem->resource; 10844ef589dcSJérôme Glisse devmem->pagemap.page_fault = hmm_devmem_fault; 10854ef589dcSJérôme Glisse devmem->pagemap.page_free = hmm_devmem_free; 10864ef589dcSJérôme Glisse devmem->pagemap.dev = devmem->device; 10874ef589dcSJérôme Glisse devmem->pagemap.ref = &devmem->ref; 10884ef589dcSJérôme Glisse devmem->pagemap.data = devmem; 10894ef589dcSJérôme Glisse 10904ef589dcSJérôme Glisse mutex_lock(&hmm_devmem_lock); 10914ef589dcSJérôme Glisse align_end = align_start + align_size - 1; 10924ef589dcSJérôme Glisse for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) { 10934ef589dcSJérôme Glisse struct hmm_devmem *dup; 10944ef589dcSJérôme Glisse 109518be460eSTejun Heo dup = radix_tree_lookup(&hmm_devmem_radix, 109618be460eSTejun Heo key >> PA_SECTION_SHIFT); 10974ef589dcSJérôme Glisse if (dup) { 10984ef589dcSJérôme Glisse dev_err(device, "%s: collides with mapping for %s\n", 10994ef589dcSJérôme Glisse __func__, dev_name(dup->device)); 11004ef589dcSJérôme Glisse mutex_unlock(&hmm_devmem_lock); 11014ef589dcSJérôme Glisse ret = -EBUSY; 11024ef589dcSJérôme Glisse goto error; 11034ef589dcSJérôme Glisse } 11044ef589dcSJérôme Glisse ret = radix_tree_insert(&hmm_devmem_radix, 11054ef589dcSJérôme Glisse key >> PA_SECTION_SHIFT, 11064ef589dcSJérôme Glisse devmem); 11074ef589dcSJérôme Glisse if (ret) { 11084ef589dcSJérôme Glisse dev_err(device, "%s: failed: %d\n", __func__, ret); 11094ef589dcSJérôme Glisse mutex_unlock(&hmm_devmem_lock); 11104ef589dcSJérôme Glisse goto error_radix; 11114ef589dcSJérôme Glisse } 11124ef589dcSJérôme Glisse } 11134ef589dcSJérôme Glisse mutex_unlock(&hmm_devmem_lock); 11144ef589dcSJérôme Glisse 11154ef589dcSJérôme Glisse nid = dev_to_node(device); 11164ef589dcSJérôme Glisse if (nid < 0) 11174ef589dcSJérôme Glisse nid = numa_mem_id(); 11184ef589dcSJérôme Glisse 11194ef589dcSJérôme Glisse mem_hotplug_begin(); 11204ef589dcSJérôme Glisse /* 11214ef589dcSJérôme Glisse * For device private memory we call add_pages() as we only need to 11224ef589dcSJérôme Glisse * allocate and initialize struct page for the device memory. More- 11234ef589dcSJérôme Glisse * over the device memory is un-accessible thus we do not want to 11244ef589dcSJérôme Glisse * create a linear mapping for the memory like arch_add_memory() 11254ef589dcSJérôme Glisse * would do. 1126d3df0a42SJérôme Glisse * 1127d3df0a42SJérôme Glisse * For device public memory, which is accesible by the CPU, we do 1128d3df0a42SJérôme Glisse * want the linear mapping and thus use arch_add_memory(). 11294ef589dcSJérôme Glisse */ 1130d3df0a42SJérôme Glisse if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC) 113124e6d5a5SChristoph Hellwig ret = arch_add_memory(nid, align_start, align_size, NULL, 113224e6d5a5SChristoph Hellwig false); 1133d3df0a42SJérôme Glisse else 11344ef589dcSJérôme Glisse ret = add_pages(nid, align_start >> PAGE_SHIFT, 113524e6d5a5SChristoph Hellwig align_size >> PAGE_SHIFT, NULL, false); 11364ef589dcSJérôme Glisse if (ret) { 11374ef589dcSJérôme Glisse mem_hotplug_done(); 11384ef589dcSJérôme Glisse goto error_add_memory; 11394ef589dcSJérôme Glisse } 11404ef589dcSJérôme Glisse move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], 11414ef589dcSJérôme Glisse align_start >> PAGE_SHIFT, 1142a99583e7SChristoph Hellwig align_size >> PAGE_SHIFT, NULL); 11434ef589dcSJérôme Glisse mem_hotplug_done(); 11444ef589dcSJérôme Glisse 1145966cf44fSAlexander Duyck /* 1146966cf44fSAlexander Duyck * Initialization of the pages has been deferred until now in order 1147966cf44fSAlexander Duyck * to allow us to do the work while not holding the hotplug lock. 1148966cf44fSAlexander Duyck */ 1149966cf44fSAlexander Duyck memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], 1150966cf44fSAlexander Duyck align_start >> PAGE_SHIFT, 1151966cf44fSAlexander Duyck align_size >> PAGE_SHIFT, &devmem->pagemap); 11524ef589dcSJérôme Glisse 11534ef589dcSJérôme Glisse return 0; 11544ef589dcSJérôme Glisse 11554ef589dcSJérôme Glisse error_add_memory: 11564ef589dcSJérôme Glisse untrack_pfn(NULL, PHYS_PFN(align_start), align_size); 11574ef589dcSJérôme Glisse error_radix: 11584ef589dcSJérôme Glisse hmm_devmem_radix_release(devmem->resource); 11594ef589dcSJérôme Glisse error: 11604ef589dcSJérôme Glisse return ret; 11614ef589dcSJérôme Glisse } 11624ef589dcSJérôme Glisse 11634ef589dcSJérôme Glisse static int hmm_devmem_match(struct device *dev, void *data, void *match_data) 11644ef589dcSJérôme Glisse { 11654ef589dcSJérôme Glisse struct hmm_devmem *devmem = data; 11664ef589dcSJérôme Glisse 11674ef589dcSJérôme Glisse return devmem->resource == match_data; 11684ef589dcSJérôme Glisse } 11694ef589dcSJérôme Glisse 11704ef589dcSJérôme Glisse static void hmm_devmem_pages_remove(struct hmm_devmem *devmem) 11714ef589dcSJérôme Glisse { 11724ef589dcSJérôme Glisse devres_release(devmem->device, &hmm_devmem_release, 11734ef589dcSJérôme Glisse &hmm_devmem_match, devmem->resource); 11744ef589dcSJérôme Glisse } 11754ef589dcSJérôme Glisse 11764ef589dcSJérôme Glisse /* 11774ef589dcSJérôme Glisse * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory 11784ef589dcSJérôme Glisse * 11794ef589dcSJérôme Glisse * @ops: memory event device driver callback (see struct hmm_devmem_ops) 11804ef589dcSJérôme Glisse * @device: device struct to bind the resource too 11814ef589dcSJérôme Glisse * @size: size in bytes of the device memory to add 11824ef589dcSJérôme Glisse * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise 11834ef589dcSJérôme Glisse * 11844ef589dcSJérôme Glisse * This function first finds an empty range of physical address big enough to 11854ef589dcSJérôme Glisse * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which 11864ef589dcSJérôme Glisse * in turn allocates struct pages. It does not do anything beyond that; all 11874ef589dcSJérôme Glisse * events affecting the memory will go through the various callbacks provided 11884ef589dcSJérôme Glisse * by hmm_devmem_ops struct. 11894ef589dcSJérôme Glisse * 11904ef589dcSJérôme Glisse * Device driver should call this function during device initialization and 11914ef589dcSJérôme Glisse * is then responsible of memory management. HMM only provides helpers. 11924ef589dcSJérôme Glisse */ 11934ef589dcSJérôme Glisse struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, 11944ef589dcSJérôme Glisse struct device *device, 11954ef589dcSJérôme Glisse unsigned long size) 11964ef589dcSJérôme Glisse { 11974ef589dcSJérôme Glisse struct hmm_devmem *devmem; 11984ef589dcSJérôme Glisse resource_size_t addr; 11994ef589dcSJérôme Glisse int ret; 12004ef589dcSJérôme Glisse 1201e7638488SDan Williams dev_pagemap_get_ops(); 12024ef589dcSJérôme Glisse 12034ef589dcSJérôme Glisse devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem), 12044ef589dcSJérôme Glisse GFP_KERNEL, dev_to_node(device)); 12054ef589dcSJérôme Glisse if (!devmem) 12064ef589dcSJérôme Glisse return ERR_PTR(-ENOMEM); 12074ef589dcSJérôme Glisse 12084ef589dcSJérôme Glisse init_completion(&devmem->completion); 12094ef589dcSJérôme Glisse devmem->pfn_first = -1UL; 12104ef589dcSJérôme Glisse devmem->pfn_last = -1UL; 12114ef589dcSJérôme Glisse devmem->resource = NULL; 12124ef589dcSJérôme Glisse devmem->device = device; 12134ef589dcSJérôme Glisse devmem->ops = ops; 12144ef589dcSJérôme Glisse 12154ef589dcSJérôme Glisse ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, 12164ef589dcSJérôme Glisse 0, GFP_KERNEL); 12174ef589dcSJérôme Glisse if (ret) 12184ef589dcSJérôme Glisse goto error_percpu_ref; 12194ef589dcSJérôme Glisse 12204ef589dcSJérôme Glisse ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref); 12214ef589dcSJérôme Glisse if (ret) 12224ef589dcSJérôme Glisse goto error_devm_add_action; 12234ef589dcSJérôme Glisse 12244ef589dcSJérôme Glisse size = ALIGN(size, PA_SECTION_SIZE); 12254ef589dcSJérôme Glisse addr = min((unsigned long)iomem_resource.end, 12264ef589dcSJérôme Glisse (1UL << MAX_PHYSMEM_BITS) - 1); 12274ef589dcSJérôme Glisse addr = addr - size + 1UL; 12284ef589dcSJérôme Glisse 12294ef589dcSJérôme Glisse /* 12304ef589dcSJérôme Glisse * FIXME add a new helper to quickly walk resource tree and find free 12314ef589dcSJérôme Glisse * range 12324ef589dcSJérôme Glisse * 12334ef589dcSJérôme Glisse * FIXME what about ioport_resource resource ? 12344ef589dcSJérôme Glisse */ 12354ef589dcSJérôme Glisse for (; addr > size && addr >= iomem_resource.start; addr -= size) { 12364ef589dcSJérôme Glisse ret = region_intersects(addr, size, 0, IORES_DESC_NONE); 12374ef589dcSJérôme Glisse if (ret != REGION_DISJOINT) 12384ef589dcSJérôme Glisse continue; 12394ef589dcSJérôme Glisse 12404ef589dcSJérôme Glisse devmem->resource = devm_request_mem_region(device, addr, size, 12414ef589dcSJérôme Glisse dev_name(device)); 12424ef589dcSJérôme Glisse if (!devmem->resource) { 12434ef589dcSJérôme Glisse ret = -ENOMEM; 12444ef589dcSJérôme Glisse goto error_no_resource; 12454ef589dcSJérôme Glisse } 12464ef589dcSJérôme Glisse break; 12474ef589dcSJérôme Glisse } 12484ef589dcSJérôme Glisse if (!devmem->resource) { 12494ef589dcSJérôme Glisse ret = -ERANGE; 12504ef589dcSJérôme Glisse goto error_no_resource; 12514ef589dcSJérôme Glisse } 12524ef589dcSJérôme Glisse 12534ef589dcSJérôme Glisse devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; 12544ef589dcSJérôme Glisse devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; 12554ef589dcSJérôme Glisse devmem->pfn_last = devmem->pfn_first + 12564ef589dcSJérôme Glisse (resource_size(devmem->resource) >> PAGE_SHIFT); 12574ef589dcSJérôme Glisse 12584ef589dcSJérôme Glisse ret = hmm_devmem_pages_create(devmem); 12594ef589dcSJérôme Glisse if (ret) 12604ef589dcSJérôme Glisse goto error_pages; 12614ef589dcSJérôme Glisse 12624ef589dcSJérôme Glisse devres_add(device, devmem); 12634ef589dcSJérôme Glisse 12644ef589dcSJérôme Glisse ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref); 12654ef589dcSJérôme Glisse if (ret) { 12664ef589dcSJérôme Glisse hmm_devmem_remove(devmem); 12674ef589dcSJérôme Glisse return ERR_PTR(ret); 12684ef589dcSJérôme Glisse } 12694ef589dcSJérôme Glisse 12704ef589dcSJérôme Glisse return devmem; 12714ef589dcSJérôme Glisse 12724ef589dcSJérôme Glisse error_pages: 12734ef589dcSJérôme Glisse devm_release_mem_region(device, devmem->resource->start, 12744ef589dcSJérôme Glisse resource_size(devmem->resource)); 12754ef589dcSJérôme Glisse error_no_resource: 12764ef589dcSJérôme Glisse error_devm_add_action: 12774ef589dcSJérôme Glisse hmm_devmem_ref_kill(&devmem->ref); 12784ef589dcSJérôme Glisse hmm_devmem_ref_exit(&devmem->ref); 12794ef589dcSJérôme Glisse error_percpu_ref: 12804ef589dcSJérôme Glisse devres_free(devmem); 12814ef589dcSJérôme Glisse return ERR_PTR(ret); 12824ef589dcSJérôme Glisse } 12834ef589dcSJérôme Glisse EXPORT_SYMBOL(hmm_devmem_add); 12844ef589dcSJérôme Glisse 1285d3df0a42SJérôme Glisse struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, 1286d3df0a42SJérôme Glisse struct device *device, 1287d3df0a42SJérôme Glisse struct resource *res) 1288d3df0a42SJérôme Glisse { 1289d3df0a42SJérôme Glisse struct hmm_devmem *devmem; 1290d3df0a42SJérôme Glisse int ret; 1291d3df0a42SJérôme Glisse 1292d3df0a42SJérôme Glisse if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY) 1293d3df0a42SJérôme Glisse return ERR_PTR(-EINVAL); 1294d3df0a42SJérôme Glisse 1295e7638488SDan Williams dev_pagemap_get_ops(); 1296d3df0a42SJérôme Glisse 1297d3df0a42SJérôme Glisse devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem), 1298d3df0a42SJérôme Glisse GFP_KERNEL, dev_to_node(device)); 1299d3df0a42SJérôme Glisse if (!devmem) 1300d3df0a42SJérôme Glisse return ERR_PTR(-ENOMEM); 1301d3df0a42SJérôme Glisse 1302d3df0a42SJérôme Glisse init_completion(&devmem->completion); 1303d3df0a42SJérôme Glisse devmem->pfn_first = -1UL; 1304d3df0a42SJérôme Glisse devmem->pfn_last = -1UL; 1305d3df0a42SJérôme Glisse devmem->resource = res; 1306d3df0a42SJérôme Glisse devmem->device = device; 1307d3df0a42SJérôme Glisse devmem->ops = ops; 1308d3df0a42SJérôme Glisse 1309d3df0a42SJérôme Glisse ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, 1310d3df0a42SJérôme Glisse 0, GFP_KERNEL); 1311d3df0a42SJérôme Glisse if (ret) 1312d3df0a42SJérôme Glisse goto error_percpu_ref; 1313d3df0a42SJérôme Glisse 1314d3df0a42SJérôme Glisse ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref); 1315d3df0a42SJérôme Glisse if (ret) 1316d3df0a42SJérôme Glisse goto error_devm_add_action; 1317d3df0a42SJérôme Glisse 1318d3df0a42SJérôme Glisse 1319d3df0a42SJérôme Glisse devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; 1320d3df0a42SJérôme Glisse devmem->pfn_last = devmem->pfn_first + 1321d3df0a42SJérôme Glisse (resource_size(devmem->resource) >> PAGE_SHIFT); 1322d3df0a42SJérôme Glisse 1323d3df0a42SJérôme Glisse ret = hmm_devmem_pages_create(devmem); 1324d3df0a42SJérôme Glisse if (ret) 1325d3df0a42SJérôme Glisse goto error_devm_add_action; 1326d3df0a42SJérôme Glisse 1327d3df0a42SJérôme Glisse devres_add(device, devmem); 1328d3df0a42SJérôme Glisse 1329d3df0a42SJérôme Glisse ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref); 1330d3df0a42SJérôme Glisse if (ret) { 1331d3df0a42SJérôme Glisse hmm_devmem_remove(devmem); 1332d3df0a42SJérôme Glisse return ERR_PTR(ret); 1333d3df0a42SJérôme Glisse } 1334d3df0a42SJérôme Glisse 1335d3df0a42SJérôme Glisse return devmem; 1336d3df0a42SJérôme Glisse 1337d3df0a42SJérôme Glisse error_devm_add_action: 1338d3df0a42SJérôme Glisse hmm_devmem_ref_kill(&devmem->ref); 1339d3df0a42SJérôme Glisse hmm_devmem_ref_exit(&devmem->ref); 1340d3df0a42SJérôme Glisse error_percpu_ref: 1341d3df0a42SJérôme Glisse devres_free(devmem); 1342d3df0a42SJérôme Glisse return ERR_PTR(ret); 1343d3df0a42SJérôme Glisse } 1344d3df0a42SJérôme Glisse EXPORT_SYMBOL(hmm_devmem_add_resource); 1345d3df0a42SJérôme Glisse 13464ef589dcSJérôme Glisse /* 13474ef589dcSJérôme Glisse * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE) 13484ef589dcSJérôme Glisse * 13494ef589dcSJérôme Glisse * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory 13504ef589dcSJérôme Glisse * 13514ef589dcSJérôme Glisse * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf 13524ef589dcSJérôme Glisse * of the device driver. It will free struct page and remove the resource that 13534ef589dcSJérôme Glisse * reserved the physical address range for this device memory. 13544ef589dcSJérôme Glisse */ 13554ef589dcSJérôme Glisse void hmm_devmem_remove(struct hmm_devmem *devmem) 13564ef589dcSJérôme Glisse { 13574ef589dcSJérôme Glisse resource_size_t start, size; 13584ef589dcSJérôme Glisse struct device *device; 1359d3df0a42SJérôme Glisse bool cdm = false; 13604ef589dcSJérôme Glisse 13614ef589dcSJérôme Glisse if (!devmem) 13624ef589dcSJérôme Glisse return; 13634ef589dcSJérôme Glisse 13644ef589dcSJérôme Glisse device = devmem->device; 13654ef589dcSJérôme Glisse start = devmem->resource->start; 13664ef589dcSJérôme Glisse size = resource_size(devmem->resource); 13674ef589dcSJérôme Glisse 1368d3df0a42SJérôme Glisse cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY; 13694ef589dcSJérôme Glisse hmm_devmem_ref_kill(&devmem->ref); 13704ef589dcSJérôme Glisse hmm_devmem_ref_exit(&devmem->ref); 13714ef589dcSJérôme Glisse hmm_devmem_pages_remove(devmem); 13724ef589dcSJérôme Glisse 1373d3df0a42SJérôme Glisse if (!cdm) 13744ef589dcSJérôme Glisse devm_release_mem_region(device, start, size); 13754ef589dcSJérôme Glisse } 13764ef589dcSJérôme Glisse EXPORT_SYMBOL(hmm_devmem_remove); 1377858b54daSJérôme Glisse 1378858b54daSJérôme Glisse /* 1379858b54daSJérôme Glisse * A device driver that wants to handle multiple devices memory through a 1380858b54daSJérôme Glisse * single fake device can use hmm_device to do so. This is purely a helper 1381858b54daSJérôme Glisse * and it is not needed to make use of any HMM functionality. 1382858b54daSJérôme Glisse */ 1383858b54daSJérôme Glisse #define HMM_DEVICE_MAX 256 1384858b54daSJérôme Glisse 1385858b54daSJérôme Glisse static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX); 1386858b54daSJérôme Glisse static DEFINE_SPINLOCK(hmm_device_lock); 1387858b54daSJérôme Glisse static struct class *hmm_device_class; 1388858b54daSJérôme Glisse static dev_t hmm_device_devt; 1389858b54daSJérôme Glisse 1390858b54daSJérôme Glisse static void hmm_device_release(struct device *device) 1391858b54daSJérôme Glisse { 1392858b54daSJérôme Glisse struct hmm_device *hmm_device; 1393858b54daSJérôme Glisse 1394858b54daSJérôme Glisse hmm_device = container_of(device, struct hmm_device, device); 1395858b54daSJérôme Glisse spin_lock(&hmm_device_lock); 1396858b54daSJérôme Glisse clear_bit(hmm_device->minor, hmm_device_mask); 1397858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1398858b54daSJérôme Glisse 1399858b54daSJérôme Glisse kfree(hmm_device); 1400858b54daSJérôme Glisse } 1401858b54daSJérôme Glisse 1402858b54daSJérôme Glisse struct hmm_device *hmm_device_new(void *drvdata) 1403858b54daSJérôme Glisse { 1404858b54daSJérôme Glisse struct hmm_device *hmm_device; 1405858b54daSJérôme Glisse 1406858b54daSJérôme Glisse hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL); 1407858b54daSJérôme Glisse if (!hmm_device) 1408858b54daSJérôme Glisse return ERR_PTR(-ENOMEM); 1409858b54daSJérôme Glisse 1410858b54daSJérôme Glisse spin_lock(&hmm_device_lock); 1411858b54daSJérôme Glisse hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX); 1412858b54daSJérôme Glisse if (hmm_device->minor >= HMM_DEVICE_MAX) { 1413858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1414858b54daSJérôme Glisse kfree(hmm_device); 1415858b54daSJérôme Glisse return ERR_PTR(-EBUSY); 1416858b54daSJérôme Glisse } 1417858b54daSJérôme Glisse set_bit(hmm_device->minor, hmm_device_mask); 1418858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1419858b54daSJérôme Glisse 1420858b54daSJérôme Glisse dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor); 1421858b54daSJérôme Glisse hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt), 1422858b54daSJérôme Glisse hmm_device->minor); 1423858b54daSJérôme Glisse hmm_device->device.release = hmm_device_release; 1424858b54daSJérôme Glisse dev_set_drvdata(&hmm_device->device, drvdata); 1425858b54daSJérôme Glisse hmm_device->device.class = hmm_device_class; 1426858b54daSJérôme Glisse device_initialize(&hmm_device->device); 1427858b54daSJérôme Glisse 1428858b54daSJérôme Glisse return hmm_device; 1429858b54daSJérôme Glisse } 1430858b54daSJérôme Glisse EXPORT_SYMBOL(hmm_device_new); 1431858b54daSJérôme Glisse 1432858b54daSJérôme Glisse void hmm_device_put(struct hmm_device *hmm_device) 1433858b54daSJérôme Glisse { 1434858b54daSJérôme Glisse put_device(&hmm_device->device); 1435858b54daSJérôme Glisse } 1436858b54daSJérôme Glisse EXPORT_SYMBOL(hmm_device_put); 1437858b54daSJérôme Glisse 1438858b54daSJérôme Glisse static int __init hmm_init(void) 1439858b54daSJérôme Glisse { 1440858b54daSJérôme Glisse int ret; 1441858b54daSJérôme Glisse 1442858b54daSJérôme Glisse ret = alloc_chrdev_region(&hmm_device_devt, 0, 1443858b54daSJérôme Glisse HMM_DEVICE_MAX, 1444858b54daSJérôme Glisse "hmm_device"); 1445858b54daSJérôme Glisse if (ret) 1446858b54daSJérôme Glisse return ret; 1447858b54daSJérôme Glisse 1448858b54daSJérôme Glisse hmm_device_class = class_create(THIS_MODULE, "hmm_device"); 1449858b54daSJérôme Glisse if (IS_ERR(hmm_device_class)) { 1450858b54daSJérôme Glisse unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX); 1451858b54daSJérôme Glisse return PTR_ERR(hmm_device_class); 1452858b54daSJérôme Glisse } 1453858b54daSJérôme Glisse return 0; 1454858b54daSJérôme Glisse } 1455858b54daSJérôme Glisse 1456858b54daSJérôme Glisse device_initcall(hmm_init); 1457df6ad698SJérôme Glisse #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ 1458