1133ff0eaSJérôme Glisse /* 2133ff0eaSJérôme Glisse * Copyright 2013 Red Hat Inc. 3133ff0eaSJérôme Glisse * 4133ff0eaSJérôme Glisse * This program is free software; you can redistribute it and/or modify 5133ff0eaSJérôme Glisse * it under the terms of the GNU General Public License as published by 6133ff0eaSJérôme Glisse * the Free Software Foundation; either version 2 of the License, or 7133ff0eaSJérôme Glisse * (at your option) any later version. 8133ff0eaSJérôme Glisse * 9133ff0eaSJérôme Glisse * This program is distributed in the hope that it will be useful, 10133ff0eaSJérôme Glisse * but WITHOUT ANY WARRANTY; without even the implied warranty of 11133ff0eaSJérôme Glisse * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12133ff0eaSJérôme Glisse * GNU General Public License for more details. 13133ff0eaSJérôme Glisse * 14133ff0eaSJérôme Glisse * Authors: Jérôme Glisse <jglisse@redhat.com> 15133ff0eaSJérôme Glisse */ 16133ff0eaSJérôme Glisse /* 17133ff0eaSJérôme Glisse * Refer to include/linux/hmm.h for information about heterogeneous memory 18133ff0eaSJérôme Glisse * management or HMM for short. 19133ff0eaSJérôme Glisse */ 20133ff0eaSJérôme Glisse #include <linux/mm.h> 21133ff0eaSJérôme Glisse #include <linux/hmm.h> 22858b54daSJérôme Glisse #include <linux/init.h> 23da4c3c73SJérôme Glisse #include <linux/rmap.h> 24da4c3c73SJérôme Glisse #include <linux/swap.h> 25133ff0eaSJérôme Glisse #include <linux/slab.h> 26133ff0eaSJérôme Glisse #include <linux/sched.h> 274ef589dcSJérôme Glisse #include <linux/mmzone.h> 284ef589dcSJérôme Glisse #include <linux/pagemap.h> 29da4c3c73SJérôme Glisse #include <linux/swapops.h> 30da4c3c73SJérôme Glisse #include <linux/hugetlb.h> 314ef589dcSJérôme Glisse #include <linux/memremap.h> 327b2d55d2SJérôme Glisse #include <linux/jump_label.h> 33c0b12405SJérôme Glisse #include <linux/mmu_notifier.h> 344ef589dcSJérôme Glisse #include <linux/memory_hotplug.h> 354ef589dcSJérôme Glisse 364ef589dcSJérôme Glisse #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT) 37133ff0eaSJérôme Glisse 386b368cd4SJérôme Glisse #if IS_ENABLED(CONFIG_HMM_MIRROR) 39c0b12405SJérôme Glisse static const struct mmu_notifier_ops hmm_mmu_notifier_ops; 40c0b12405SJérôme Glisse 41133ff0eaSJérôme Glisse /* 42133ff0eaSJérôme Glisse * struct hmm - HMM per mm struct 43133ff0eaSJérôme Glisse * 44133ff0eaSJérôme Glisse * @mm: mm struct this HMM struct is bound to 45da4c3c73SJérôme Glisse * @lock: lock protecting ranges list 46c0b12405SJérôme Glisse * @sequence: we track updates to the CPU page table with a sequence number 47da4c3c73SJérôme Glisse * @ranges: list of range being snapshotted 48c0b12405SJérôme Glisse * @mirrors: list of mirrors for this mm 49c0b12405SJérôme Glisse * @mmu_notifier: mmu notifier to track updates to CPU page table 50c0b12405SJérôme Glisse * @mirrors_sem: read/write semaphore protecting the mirrors list 51133ff0eaSJérôme Glisse */ 52133ff0eaSJérôme Glisse struct hmm { 53133ff0eaSJérôme Glisse struct mm_struct *mm; 54da4c3c73SJérôme Glisse spinlock_t lock; 55c0b12405SJérôme Glisse atomic_t sequence; 56da4c3c73SJérôme Glisse struct list_head ranges; 57c0b12405SJérôme Glisse struct list_head mirrors; 58c0b12405SJérôme Glisse struct mmu_notifier mmu_notifier; 59c0b12405SJérôme Glisse struct rw_semaphore mirrors_sem; 60133ff0eaSJérôme Glisse }; 61133ff0eaSJérôme Glisse 62133ff0eaSJérôme Glisse /* 63133ff0eaSJérôme Glisse * hmm_register - register HMM against an mm (HMM internal) 64133ff0eaSJérôme Glisse * 65133ff0eaSJérôme Glisse * @mm: mm struct to attach to 66133ff0eaSJérôme Glisse * 67133ff0eaSJérôme Glisse * This is not intended to be used directly by device drivers. It allocates an 68133ff0eaSJérôme Glisse * HMM struct if mm does not have one, and initializes it. 69133ff0eaSJérôme Glisse */ 70133ff0eaSJérôme Glisse static struct hmm *hmm_register(struct mm_struct *mm) 71133ff0eaSJérôme Glisse { 72c0b12405SJérôme Glisse struct hmm *hmm = READ_ONCE(mm->hmm); 73c0b12405SJérôme Glisse bool cleanup = false; 74133ff0eaSJérôme Glisse 75133ff0eaSJérôme Glisse /* 76133ff0eaSJérôme Glisse * The hmm struct can only be freed once the mm_struct goes away, 77133ff0eaSJérôme Glisse * hence we should always have pre-allocated an new hmm struct 78133ff0eaSJérôme Glisse * above. 79133ff0eaSJérôme Glisse */ 80c0b12405SJérôme Glisse if (hmm) 81c0b12405SJérôme Glisse return hmm; 82c0b12405SJérôme Glisse 83c0b12405SJérôme Glisse hmm = kmalloc(sizeof(*hmm), GFP_KERNEL); 84c0b12405SJérôme Glisse if (!hmm) 85c0b12405SJérôme Glisse return NULL; 86c0b12405SJérôme Glisse INIT_LIST_HEAD(&hmm->mirrors); 87c0b12405SJérôme Glisse init_rwsem(&hmm->mirrors_sem); 88c0b12405SJérôme Glisse atomic_set(&hmm->sequence, 0); 89c0b12405SJérôme Glisse hmm->mmu_notifier.ops = NULL; 90da4c3c73SJérôme Glisse INIT_LIST_HEAD(&hmm->ranges); 91da4c3c73SJérôme Glisse spin_lock_init(&hmm->lock); 92c0b12405SJérôme Glisse hmm->mm = mm; 93c0b12405SJérôme Glisse 94c0b12405SJérôme Glisse /* 95c0b12405SJérôme Glisse * We should only get here if hold the mmap_sem in write mode ie on 96c0b12405SJérôme Glisse * registration of first mirror through hmm_mirror_register() 97c0b12405SJérôme Glisse */ 98c0b12405SJérôme Glisse hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops; 99c0b12405SJérôme Glisse if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) { 100c0b12405SJérôme Glisse kfree(hmm); 101c0b12405SJérôme Glisse return NULL; 102c0b12405SJérôme Glisse } 103c0b12405SJérôme Glisse 104c0b12405SJérôme Glisse spin_lock(&mm->page_table_lock); 105c0b12405SJérôme Glisse if (!mm->hmm) 106c0b12405SJérôme Glisse mm->hmm = hmm; 107c0b12405SJérôme Glisse else 108c0b12405SJérôme Glisse cleanup = true; 109c0b12405SJérôme Glisse spin_unlock(&mm->page_table_lock); 110c0b12405SJérôme Glisse 111c0b12405SJérôme Glisse if (cleanup) { 112c0b12405SJérôme Glisse mmu_notifier_unregister(&hmm->mmu_notifier, mm); 113c0b12405SJérôme Glisse kfree(hmm); 114c0b12405SJérôme Glisse } 115c0b12405SJérôme Glisse 116133ff0eaSJérôme Glisse return mm->hmm; 117133ff0eaSJérôme Glisse } 118133ff0eaSJérôme Glisse 119133ff0eaSJérôme Glisse void hmm_mm_destroy(struct mm_struct *mm) 120133ff0eaSJérôme Glisse { 121133ff0eaSJérôme Glisse kfree(mm->hmm); 122133ff0eaSJérôme Glisse } 123c0b12405SJérôme Glisse 124c0b12405SJérôme Glisse static void hmm_invalidate_range(struct hmm *hmm, 125c0b12405SJérôme Glisse enum hmm_update_type action, 126c0b12405SJérôme Glisse unsigned long start, 127c0b12405SJérôme Glisse unsigned long end) 128c0b12405SJérôme Glisse { 129c0b12405SJérôme Glisse struct hmm_mirror *mirror; 130da4c3c73SJérôme Glisse struct hmm_range *range; 131da4c3c73SJérôme Glisse 132da4c3c73SJérôme Glisse spin_lock(&hmm->lock); 133da4c3c73SJérôme Glisse list_for_each_entry(range, &hmm->ranges, list) { 134da4c3c73SJérôme Glisse unsigned long addr, idx, npages; 135da4c3c73SJérôme Glisse 136da4c3c73SJérôme Glisse if (end < range->start || start >= range->end) 137da4c3c73SJérôme Glisse continue; 138da4c3c73SJérôme Glisse 139da4c3c73SJérôme Glisse range->valid = false; 140da4c3c73SJérôme Glisse addr = max(start, range->start); 141da4c3c73SJérôme Glisse idx = (addr - range->start) >> PAGE_SHIFT; 142da4c3c73SJérôme Glisse npages = (min(range->end, end) - addr) >> PAGE_SHIFT; 143da4c3c73SJérôme Glisse memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages); 144da4c3c73SJérôme Glisse } 145da4c3c73SJérôme Glisse spin_unlock(&hmm->lock); 146c0b12405SJérôme Glisse 147c0b12405SJérôme Glisse down_read(&hmm->mirrors_sem); 148c0b12405SJérôme Glisse list_for_each_entry(mirror, &hmm->mirrors, list) 149c0b12405SJérôme Glisse mirror->ops->sync_cpu_device_pagetables(mirror, action, 150c0b12405SJérôme Glisse start, end); 151c0b12405SJérôme Glisse up_read(&hmm->mirrors_sem); 152c0b12405SJérôme Glisse } 153c0b12405SJérôme Glisse 154e1401513SRalph Campbell static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) 155e1401513SRalph Campbell { 156e1401513SRalph Campbell struct hmm_mirror *mirror; 157e1401513SRalph Campbell struct hmm *hmm = mm->hmm; 158e1401513SRalph Campbell 159e1401513SRalph Campbell down_write(&hmm->mirrors_sem); 160e1401513SRalph Campbell mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror, 161e1401513SRalph Campbell list); 162e1401513SRalph Campbell while (mirror) { 163e1401513SRalph Campbell list_del_init(&mirror->list); 164e1401513SRalph Campbell if (mirror->ops->release) { 165e1401513SRalph Campbell /* 166e1401513SRalph Campbell * Drop mirrors_sem so callback can wait on any pending 167e1401513SRalph Campbell * work that might itself trigger mmu_notifier callback 168e1401513SRalph Campbell * and thus would deadlock with us. 169e1401513SRalph Campbell */ 170e1401513SRalph Campbell up_write(&hmm->mirrors_sem); 171e1401513SRalph Campbell mirror->ops->release(mirror); 172e1401513SRalph Campbell down_write(&hmm->mirrors_sem); 173e1401513SRalph Campbell } 174e1401513SRalph Campbell mirror = list_first_entry_or_null(&hmm->mirrors, 175e1401513SRalph Campbell struct hmm_mirror, list); 176e1401513SRalph Campbell } 177e1401513SRalph Campbell up_write(&hmm->mirrors_sem); 178e1401513SRalph Campbell } 179e1401513SRalph Campbell 180*93065ac7SMichal Hocko static int hmm_invalidate_range_start(struct mmu_notifier *mn, 181c0b12405SJérôme Glisse struct mm_struct *mm, 182c0b12405SJérôme Glisse unsigned long start, 183*93065ac7SMichal Hocko unsigned long end, 184*93065ac7SMichal Hocko bool blockable) 185c0b12405SJérôme Glisse { 186c0b12405SJérôme Glisse struct hmm *hmm = mm->hmm; 187c0b12405SJérôme Glisse 188c0b12405SJérôme Glisse VM_BUG_ON(!hmm); 189c0b12405SJérôme Glisse 190c0b12405SJérôme Glisse atomic_inc(&hmm->sequence); 191*93065ac7SMichal Hocko 192*93065ac7SMichal Hocko return 0; 193c0b12405SJérôme Glisse } 194c0b12405SJérôme Glisse 195c0b12405SJérôme Glisse static void hmm_invalidate_range_end(struct mmu_notifier *mn, 196c0b12405SJérôme Glisse struct mm_struct *mm, 197c0b12405SJérôme Glisse unsigned long start, 198c0b12405SJérôme Glisse unsigned long end) 199c0b12405SJérôme Glisse { 200c0b12405SJérôme Glisse struct hmm *hmm = mm->hmm; 201c0b12405SJérôme Glisse 202c0b12405SJérôme Glisse VM_BUG_ON(!hmm); 203c0b12405SJérôme Glisse 204c0b12405SJérôme Glisse hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE, start, end); 205c0b12405SJérôme Glisse } 206c0b12405SJérôme Glisse 207c0b12405SJérôme Glisse static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { 208e1401513SRalph Campbell .release = hmm_release, 209c0b12405SJérôme Glisse .invalidate_range_start = hmm_invalidate_range_start, 210c0b12405SJérôme Glisse .invalidate_range_end = hmm_invalidate_range_end, 211c0b12405SJérôme Glisse }; 212c0b12405SJérôme Glisse 213c0b12405SJérôme Glisse /* 214c0b12405SJérôme Glisse * hmm_mirror_register() - register a mirror against an mm 215c0b12405SJérôme Glisse * 216c0b12405SJérôme Glisse * @mirror: new mirror struct to register 217c0b12405SJérôme Glisse * @mm: mm to register against 218c0b12405SJérôme Glisse * 219c0b12405SJérôme Glisse * To start mirroring a process address space, the device driver must register 220c0b12405SJérôme Glisse * an HMM mirror struct. 221c0b12405SJérôme Glisse * 222c0b12405SJérôme Glisse * THE mm->mmap_sem MUST BE HELD IN WRITE MODE ! 223c0b12405SJérôme Glisse */ 224c0b12405SJérôme Glisse int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) 225c0b12405SJérôme Glisse { 226c0b12405SJérôme Glisse /* Sanity check */ 227c0b12405SJérôme Glisse if (!mm || !mirror || !mirror->ops) 228c0b12405SJérôme Glisse return -EINVAL; 229c0b12405SJérôme Glisse 230c01cbba2SJérôme Glisse again: 231c0b12405SJérôme Glisse mirror->hmm = hmm_register(mm); 232c0b12405SJérôme Glisse if (!mirror->hmm) 233c0b12405SJérôme Glisse return -ENOMEM; 234c0b12405SJérôme Glisse 235c0b12405SJérôme Glisse down_write(&mirror->hmm->mirrors_sem); 236c01cbba2SJérôme Glisse if (mirror->hmm->mm == NULL) { 237c01cbba2SJérôme Glisse /* 238c01cbba2SJérôme Glisse * A racing hmm_mirror_unregister() is about to destroy the hmm 239c01cbba2SJérôme Glisse * struct. Try again to allocate a new one. 240c01cbba2SJérôme Glisse */ 241c01cbba2SJérôme Glisse up_write(&mirror->hmm->mirrors_sem); 242c01cbba2SJérôme Glisse mirror->hmm = NULL; 243c01cbba2SJérôme Glisse goto again; 244c01cbba2SJérôme Glisse } else { 245c0b12405SJérôme Glisse list_add(&mirror->list, &mirror->hmm->mirrors); 246c0b12405SJérôme Glisse up_write(&mirror->hmm->mirrors_sem); 247c01cbba2SJérôme Glisse } 248c0b12405SJérôme Glisse 249c0b12405SJérôme Glisse return 0; 250c0b12405SJérôme Glisse } 251c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_register); 252c0b12405SJérôme Glisse 253c0b12405SJérôme Glisse /* 254c0b12405SJérôme Glisse * hmm_mirror_unregister() - unregister a mirror 255c0b12405SJérôme Glisse * 256c0b12405SJérôme Glisse * @mirror: new mirror struct to register 257c0b12405SJérôme Glisse * 258c0b12405SJérôme Glisse * Stop mirroring a process address space, and cleanup. 259c0b12405SJérôme Glisse */ 260c0b12405SJérôme Glisse void hmm_mirror_unregister(struct hmm_mirror *mirror) 261c0b12405SJérôme Glisse { 262c01cbba2SJérôme Glisse bool should_unregister = false; 263c01cbba2SJérôme Glisse struct mm_struct *mm; 264c01cbba2SJérôme Glisse struct hmm *hmm; 265c0b12405SJérôme Glisse 266c01cbba2SJérôme Glisse if (mirror->hmm == NULL) 267c01cbba2SJérôme Glisse return; 268c01cbba2SJérôme Glisse 269c01cbba2SJérôme Glisse hmm = mirror->hmm; 270c0b12405SJérôme Glisse down_write(&hmm->mirrors_sem); 271e1401513SRalph Campbell list_del_init(&mirror->list); 272c01cbba2SJérôme Glisse should_unregister = list_empty(&hmm->mirrors); 273c01cbba2SJérôme Glisse mirror->hmm = NULL; 274c01cbba2SJérôme Glisse mm = hmm->mm; 275c01cbba2SJérôme Glisse hmm->mm = NULL; 276c0b12405SJérôme Glisse up_write(&hmm->mirrors_sem); 277c01cbba2SJérôme Glisse 278c01cbba2SJérôme Glisse if (!should_unregister || mm == NULL) 279c01cbba2SJérôme Glisse return; 280c01cbba2SJérôme Glisse 281c01cbba2SJérôme Glisse spin_lock(&mm->page_table_lock); 282c01cbba2SJérôme Glisse if (mm->hmm == hmm) 283c01cbba2SJérôme Glisse mm->hmm = NULL; 284c01cbba2SJérôme Glisse spin_unlock(&mm->page_table_lock); 285c01cbba2SJérôme Glisse 286c01cbba2SJérôme Glisse mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm); 287c01cbba2SJérôme Glisse kfree(hmm); 288c0b12405SJérôme Glisse } 289c0b12405SJérôme Glisse EXPORT_SYMBOL(hmm_mirror_unregister); 290da4c3c73SJérôme Glisse 29174eee180SJérôme Glisse struct hmm_vma_walk { 29274eee180SJérôme Glisse struct hmm_range *range; 29374eee180SJérôme Glisse unsigned long last; 29474eee180SJérôme Glisse bool fault; 29574eee180SJérôme Glisse bool block; 29674eee180SJérôme Glisse }; 29774eee180SJérôme Glisse 2982aee09d8SJérôme Glisse static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, 2992aee09d8SJérôme Glisse bool write_fault, uint64_t *pfn) 30074eee180SJérôme Glisse { 30174eee180SJérôme Glisse unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE; 30274eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 303f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 30474eee180SJérôme Glisse struct vm_area_struct *vma = walk->vma; 30550a7ca3cSSouptick Joarder vm_fault_t ret; 30674eee180SJérôme Glisse 30774eee180SJérôme Glisse flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY; 3082aee09d8SJérôme Glisse flags |= write_fault ? FAULT_FLAG_WRITE : 0; 30950a7ca3cSSouptick Joarder ret = handle_mm_fault(vma, addr, flags); 31050a7ca3cSSouptick Joarder if (ret & VM_FAULT_RETRY) 31174eee180SJérôme Glisse return -EBUSY; 31250a7ca3cSSouptick Joarder if (ret & VM_FAULT_ERROR) { 313f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_ERROR]; 31474eee180SJérôme Glisse return -EFAULT; 31574eee180SJérôme Glisse } 31674eee180SJérôme Glisse 31774eee180SJérôme Glisse return -EAGAIN; 31874eee180SJérôme Glisse } 31974eee180SJérôme Glisse 320da4c3c73SJérôme Glisse static int hmm_pfns_bad(unsigned long addr, 321da4c3c73SJérôme Glisse unsigned long end, 322da4c3c73SJérôme Glisse struct mm_walk *walk) 323da4c3c73SJérôme Glisse { 324c719547fSJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 325c719547fSJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 326ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 327da4c3c73SJérôme Glisse unsigned long i; 328da4c3c73SJérôme Glisse 329da4c3c73SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 330da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, i++) 331f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_ERROR]; 332da4c3c73SJérôme Glisse 333da4c3c73SJérôme Glisse return 0; 334da4c3c73SJérôme Glisse } 335da4c3c73SJérôme Glisse 3365504ed29SJérôme Glisse /* 3375504ed29SJérôme Glisse * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s) 3385504ed29SJérôme Glisse * @start: range virtual start address (inclusive) 3395504ed29SJérôme Glisse * @end: range virtual end address (exclusive) 3402aee09d8SJérôme Glisse * @fault: should we fault or not ? 3412aee09d8SJérôme Glisse * @write_fault: write fault ? 3425504ed29SJérôme Glisse * @walk: mm_walk structure 3435504ed29SJérôme Glisse * Returns: 0 on success, -EAGAIN after page fault, or page fault error 3445504ed29SJérôme Glisse * 3455504ed29SJérôme Glisse * This function will be called whenever pmd_none() or pte_none() returns true, 3465504ed29SJérôme Glisse * or whenever there is no page directory covering the virtual address range. 3475504ed29SJérôme Glisse */ 3482aee09d8SJérôme Glisse static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, 3492aee09d8SJérôme Glisse bool fault, bool write_fault, 350da4c3c73SJérôme Glisse struct mm_walk *walk) 351da4c3c73SJérôme Glisse { 35274eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 35374eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 354ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 355da4c3c73SJérôme Glisse unsigned long i; 356da4c3c73SJérôme Glisse 35774eee180SJérôme Glisse hmm_vma_walk->last = addr; 358da4c3c73SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 35974eee180SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, i++) { 360f88a1e90SJérôme Glisse pfns[i] = range->values[HMM_PFN_NONE]; 3612aee09d8SJérôme Glisse if (fault || write_fault) { 36274eee180SJérôme Glisse int ret; 363da4c3c73SJérôme Glisse 3642aee09d8SJérôme Glisse ret = hmm_vma_do_fault(walk, addr, write_fault, 3652aee09d8SJérôme Glisse &pfns[i]); 36674eee180SJérôme Glisse if (ret != -EAGAIN) 36774eee180SJérôme Glisse return ret; 36874eee180SJérôme Glisse } 36974eee180SJérôme Glisse } 37074eee180SJérôme Glisse 3712aee09d8SJérôme Glisse return (fault || write_fault) ? -EAGAIN : 0; 3722aee09d8SJérôme Glisse } 3732aee09d8SJérôme Glisse 3742aee09d8SJérôme Glisse static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 3752aee09d8SJérôme Glisse uint64_t pfns, uint64_t cpu_flags, 3762aee09d8SJérôme Glisse bool *fault, bool *write_fault) 3772aee09d8SJérôme Glisse { 378f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 379f88a1e90SJérôme Glisse 3802aee09d8SJérôme Glisse *fault = *write_fault = false; 3812aee09d8SJérôme Glisse if (!hmm_vma_walk->fault) 3822aee09d8SJérôme Glisse return; 3832aee09d8SJérôme Glisse 3842aee09d8SJérôme Glisse /* We aren't ask to do anything ... */ 385f88a1e90SJérôme Glisse if (!(pfns & range->flags[HMM_PFN_VALID])) 3862aee09d8SJérôme Glisse return; 387f88a1e90SJérôme Glisse /* If this is device memory than only fault if explicitly requested */ 388f88a1e90SJérôme Glisse if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { 389f88a1e90SJérôme Glisse /* Do we fault on device memory ? */ 390f88a1e90SJérôme Glisse if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { 391f88a1e90SJérôme Glisse *write_fault = pfns & range->flags[HMM_PFN_WRITE]; 392f88a1e90SJérôme Glisse *fault = true; 393f88a1e90SJérôme Glisse } 3942aee09d8SJérôme Glisse return; 3952aee09d8SJérôme Glisse } 396f88a1e90SJérôme Glisse 397f88a1e90SJérôme Glisse /* If CPU page table is not valid then we need to fault */ 398f88a1e90SJérôme Glisse *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); 399f88a1e90SJérôme Glisse /* Need to write fault ? */ 400f88a1e90SJérôme Glisse if ((pfns & range->flags[HMM_PFN_WRITE]) && 401f88a1e90SJérôme Glisse !(cpu_flags & range->flags[HMM_PFN_WRITE])) { 402f88a1e90SJérôme Glisse *write_fault = true; 4032aee09d8SJérôme Glisse *fault = true; 4042aee09d8SJérôme Glisse } 4052aee09d8SJérôme Glisse } 4062aee09d8SJérôme Glisse 4072aee09d8SJérôme Glisse static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 4082aee09d8SJérôme Glisse const uint64_t *pfns, unsigned long npages, 4092aee09d8SJérôme Glisse uint64_t cpu_flags, bool *fault, 4102aee09d8SJérôme Glisse bool *write_fault) 4112aee09d8SJérôme Glisse { 4122aee09d8SJérôme Glisse unsigned long i; 4132aee09d8SJérôme Glisse 4142aee09d8SJérôme Glisse if (!hmm_vma_walk->fault) { 4152aee09d8SJérôme Glisse *fault = *write_fault = false; 4162aee09d8SJérôme Glisse return; 4172aee09d8SJérôme Glisse } 4182aee09d8SJérôme Glisse 4192aee09d8SJérôme Glisse for (i = 0; i < npages; ++i) { 4202aee09d8SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags, 4212aee09d8SJérôme Glisse fault, write_fault); 4222aee09d8SJérôme Glisse if ((*fault) || (*write_fault)) 4232aee09d8SJérôme Glisse return; 4242aee09d8SJérôme Glisse } 4252aee09d8SJérôme Glisse } 4262aee09d8SJérôme Glisse 4272aee09d8SJérôme Glisse static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, 4282aee09d8SJérôme Glisse struct mm_walk *walk) 4292aee09d8SJérôme Glisse { 4302aee09d8SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 4312aee09d8SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 4322aee09d8SJérôme Glisse bool fault, write_fault; 4332aee09d8SJérôme Glisse unsigned long i, npages; 4342aee09d8SJérôme Glisse uint64_t *pfns; 4352aee09d8SJérôme Glisse 4362aee09d8SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 4372aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 4382aee09d8SJérôme Glisse pfns = &range->pfns[i]; 4392aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, 4402aee09d8SJérôme Glisse 0, &fault, &write_fault); 4412aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 4422aee09d8SJérôme Glisse } 4432aee09d8SJérôme Glisse 444f88a1e90SJérôme Glisse static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) 4452aee09d8SJérôme Glisse { 4462aee09d8SJérôme Glisse if (pmd_protnone(pmd)) 4472aee09d8SJérôme Glisse return 0; 448f88a1e90SJérôme Glisse return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | 449f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 450f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 451da4c3c73SJérôme Glisse } 452da4c3c73SJérôme Glisse 45353f5c3f4SJérôme Glisse static int hmm_vma_handle_pmd(struct mm_walk *walk, 45453f5c3f4SJérôme Glisse unsigned long addr, 45553f5c3f4SJérôme Glisse unsigned long end, 45653f5c3f4SJérôme Glisse uint64_t *pfns, 45753f5c3f4SJérôme Glisse pmd_t pmd) 45853f5c3f4SJérôme Glisse { 45953f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 460f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 4612aee09d8SJérôme Glisse unsigned long pfn, npages, i; 4622aee09d8SJérôme Glisse bool fault, write_fault; 463f88a1e90SJérôme Glisse uint64_t cpu_flags; 46453f5c3f4SJérôme Glisse 4652aee09d8SJérôme Glisse npages = (end - addr) >> PAGE_SHIFT; 466f88a1e90SJérôme Glisse cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); 4672aee09d8SJérôme Glisse hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, 4682aee09d8SJérôme Glisse &fault, &write_fault); 46953f5c3f4SJérôme Glisse 4702aee09d8SJérôme Glisse if (pmd_protnone(pmd) || fault || write_fault) 4712aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 47253f5c3f4SJérôme Glisse 47353f5c3f4SJérôme Glisse pfn = pmd_pfn(pmd) + pte_index(addr); 47453f5c3f4SJérôme Glisse for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) 475f88a1e90SJérôme Glisse pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags; 47653f5c3f4SJérôme Glisse hmm_vma_walk->last = end; 47753f5c3f4SJérôme Glisse return 0; 47853f5c3f4SJérôme Glisse } 47953f5c3f4SJérôme Glisse 480f88a1e90SJérôme Glisse static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) 4812aee09d8SJérôme Glisse { 4822aee09d8SJérôme Glisse if (pte_none(pte) || !pte_present(pte)) 4832aee09d8SJérôme Glisse return 0; 484f88a1e90SJérôme Glisse return pte_write(pte) ? range->flags[HMM_PFN_VALID] | 485f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 486f88a1e90SJérôme Glisse range->flags[HMM_PFN_VALID]; 4872aee09d8SJérôme Glisse } 4882aee09d8SJérôme Glisse 48953f5c3f4SJérôme Glisse static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, 49053f5c3f4SJérôme Glisse unsigned long end, pmd_t *pmdp, pte_t *ptep, 49153f5c3f4SJérôme Glisse uint64_t *pfn) 49253f5c3f4SJérôme Glisse { 49353f5c3f4SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 494f88a1e90SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 49553f5c3f4SJérôme Glisse struct vm_area_struct *vma = walk->vma; 4962aee09d8SJérôme Glisse bool fault, write_fault; 4972aee09d8SJérôme Glisse uint64_t cpu_flags; 49853f5c3f4SJérôme Glisse pte_t pte = *ptep; 499f88a1e90SJérôme Glisse uint64_t orig_pfn = *pfn; 50053f5c3f4SJérôme Glisse 501f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_NONE]; 502f88a1e90SJérôme Glisse cpu_flags = pte_to_hmm_pfn_flags(range, pte); 503f88a1e90SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 5042aee09d8SJérôme Glisse &fault, &write_fault); 50553f5c3f4SJérôme Glisse 50653f5c3f4SJérôme Glisse if (pte_none(pte)) { 5072aee09d8SJérôme Glisse if (fault || write_fault) 50853f5c3f4SJérôme Glisse goto fault; 50953f5c3f4SJérôme Glisse return 0; 51053f5c3f4SJérôme Glisse } 51153f5c3f4SJérôme Glisse 51253f5c3f4SJérôme Glisse if (!pte_present(pte)) { 51353f5c3f4SJérôme Glisse swp_entry_t entry = pte_to_swp_entry(pte); 51453f5c3f4SJérôme Glisse 51553f5c3f4SJérôme Glisse if (!non_swap_entry(entry)) { 5162aee09d8SJérôme Glisse if (fault || write_fault) 51753f5c3f4SJérôme Glisse goto fault; 51853f5c3f4SJérôme Glisse return 0; 51953f5c3f4SJérôme Glisse } 52053f5c3f4SJérôme Glisse 52153f5c3f4SJérôme Glisse /* 52253f5c3f4SJérôme Glisse * This is a special swap entry, ignore migration, use 52353f5c3f4SJérôme Glisse * device and report anything else as error. 52453f5c3f4SJérôme Glisse */ 52553f5c3f4SJérôme Glisse if (is_device_private_entry(entry)) { 526f88a1e90SJérôme Glisse cpu_flags = range->flags[HMM_PFN_VALID] | 527f88a1e90SJérôme Glisse range->flags[HMM_PFN_DEVICE_PRIVATE]; 5282aee09d8SJérôme Glisse cpu_flags |= is_write_device_private_entry(entry) ? 529f88a1e90SJérôme Glisse range->flags[HMM_PFN_WRITE] : 0; 530f88a1e90SJérôme Glisse hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, 531f88a1e90SJérôme Glisse &fault, &write_fault); 532f88a1e90SJérôme Glisse if (fault || write_fault) 533f88a1e90SJérôme Glisse goto fault; 534f88a1e90SJérôme Glisse *pfn = hmm_pfn_from_pfn(range, swp_offset(entry)); 535f88a1e90SJérôme Glisse *pfn |= cpu_flags; 53653f5c3f4SJérôme Glisse return 0; 53753f5c3f4SJérôme Glisse } 53853f5c3f4SJérôme Glisse 53953f5c3f4SJérôme Glisse if (is_migration_entry(entry)) { 5402aee09d8SJérôme Glisse if (fault || write_fault) { 54153f5c3f4SJérôme Glisse pte_unmap(ptep); 54253f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 54353f5c3f4SJérôme Glisse migration_entry_wait(vma->vm_mm, 54453f5c3f4SJérôme Glisse pmdp, addr); 54553f5c3f4SJérôme Glisse return -EAGAIN; 54653f5c3f4SJérôme Glisse } 54753f5c3f4SJérôme Glisse return 0; 54853f5c3f4SJérôme Glisse } 54953f5c3f4SJérôme Glisse 55053f5c3f4SJérôme Glisse /* Report error for everything else */ 551f88a1e90SJérôme Glisse *pfn = range->values[HMM_PFN_ERROR]; 55253f5c3f4SJérôme Glisse return -EFAULT; 55353f5c3f4SJérôme Glisse } 55453f5c3f4SJérôme Glisse 5552aee09d8SJérôme Glisse if (fault || write_fault) 55653f5c3f4SJérôme Glisse goto fault; 55753f5c3f4SJérôme Glisse 558f88a1e90SJérôme Glisse *pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags; 55953f5c3f4SJérôme Glisse return 0; 56053f5c3f4SJérôme Glisse 56153f5c3f4SJérôme Glisse fault: 56253f5c3f4SJérôme Glisse pte_unmap(ptep); 56353f5c3f4SJérôme Glisse /* Fault any virtual address we were asked to fault */ 5642aee09d8SJérôme Glisse return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); 56553f5c3f4SJérôme Glisse } 56653f5c3f4SJérôme Glisse 567da4c3c73SJérôme Glisse static int hmm_vma_walk_pmd(pmd_t *pmdp, 568da4c3c73SJérôme Glisse unsigned long start, 569da4c3c73SJérôme Glisse unsigned long end, 570da4c3c73SJérôme Glisse struct mm_walk *walk) 571da4c3c73SJérôme Glisse { 57274eee180SJérôme Glisse struct hmm_vma_walk *hmm_vma_walk = walk->private; 57374eee180SJérôme Glisse struct hmm_range *range = hmm_vma_walk->range; 574ff05c0c6SJérôme Glisse uint64_t *pfns = range->pfns; 575da4c3c73SJérôme Glisse unsigned long addr = start, i; 576da4c3c73SJérôme Glisse pte_t *ptep; 577da4c3c73SJérôme Glisse 578da4c3c73SJérôme Glisse i = (addr - range->start) >> PAGE_SHIFT; 579da4c3c73SJérôme Glisse 580da4c3c73SJérôme Glisse again: 581da4c3c73SJérôme Glisse if (pmd_none(*pmdp)) 582da4c3c73SJérôme Glisse return hmm_vma_walk_hole(start, end, walk); 583da4c3c73SJérôme Glisse 58453f5c3f4SJérôme Glisse if (pmd_huge(*pmdp) && (range->vma->vm_flags & VM_HUGETLB)) 585da4c3c73SJérôme Glisse return hmm_pfns_bad(start, end, walk); 586da4c3c73SJérôme Glisse 587da4c3c73SJérôme Glisse if (pmd_devmap(*pmdp) || pmd_trans_huge(*pmdp)) { 588da4c3c73SJérôme Glisse pmd_t pmd; 589da4c3c73SJérôme Glisse 590da4c3c73SJérôme Glisse /* 591da4c3c73SJérôme Glisse * No need to take pmd_lock here, even if some other threads 592da4c3c73SJérôme Glisse * is splitting the huge pmd we will get that event through 593da4c3c73SJérôme Glisse * mmu_notifier callback. 594da4c3c73SJérôme Glisse * 595da4c3c73SJérôme Glisse * So just read pmd value and check again its a transparent 596da4c3c73SJérôme Glisse * huge or device mapping one and compute corresponding pfn 597da4c3c73SJérôme Glisse * values. 598da4c3c73SJérôme Glisse */ 599da4c3c73SJérôme Glisse pmd = pmd_read_atomic(pmdp); 600da4c3c73SJérôme Glisse barrier(); 601da4c3c73SJérôme Glisse if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) 602da4c3c73SJérôme Glisse goto again; 603da4c3c73SJérôme Glisse 60453f5c3f4SJérôme Glisse return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd); 605da4c3c73SJérôme Glisse } 606da4c3c73SJérôme Glisse 607da4c3c73SJérôme Glisse if (pmd_bad(*pmdp)) 608da4c3c73SJérôme Glisse return hmm_pfns_bad(start, end, walk); 609da4c3c73SJérôme Glisse 610da4c3c73SJérôme Glisse ptep = pte_offset_map(pmdp, addr); 611da4c3c73SJérôme Glisse for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { 61253f5c3f4SJérôme Glisse int r; 613da4c3c73SJérôme Glisse 61453f5c3f4SJérôme Glisse r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]); 61553f5c3f4SJérôme Glisse if (r) { 61653f5c3f4SJérôme Glisse /* hmm_vma_handle_pte() did unmap pte directory */ 61774eee180SJérôme Glisse hmm_vma_walk->last = addr; 61853f5c3f4SJérôme Glisse return r; 61974eee180SJérôme Glisse } 620da4c3c73SJérôme Glisse } 621da4c3c73SJérôme Glisse pte_unmap(ptep - 1); 622da4c3c73SJérôme Glisse 62353f5c3f4SJérôme Glisse hmm_vma_walk->last = addr; 624da4c3c73SJérôme Glisse return 0; 625da4c3c73SJérôme Glisse } 626da4c3c73SJérôme Glisse 627f88a1e90SJérôme Glisse static void hmm_pfns_clear(struct hmm_range *range, 628f88a1e90SJérôme Glisse uint64_t *pfns, 62933cd47dcSJérôme Glisse unsigned long addr, 63033cd47dcSJérôme Glisse unsigned long end) 63133cd47dcSJérôme Glisse { 63233cd47dcSJérôme Glisse for (; addr < end; addr += PAGE_SIZE, pfns++) 633f88a1e90SJérôme Glisse *pfns = range->values[HMM_PFN_NONE]; 63433cd47dcSJérôme Glisse } 63533cd47dcSJérôme Glisse 636855ce7d2SJérôme Glisse static void hmm_pfns_special(struct hmm_range *range) 637855ce7d2SJérôme Glisse { 638855ce7d2SJérôme Glisse unsigned long addr = range->start, i = 0; 639855ce7d2SJérôme Glisse 640855ce7d2SJérôme Glisse for (; addr < range->end; addr += PAGE_SIZE, i++) 641f88a1e90SJérôme Glisse range->pfns[i] = range->values[HMM_PFN_SPECIAL]; 642855ce7d2SJérôme Glisse } 643855ce7d2SJérôme Glisse 644da4c3c73SJérôme Glisse /* 645da4c3c73SJérôme Glisse * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses 64608232a45SJérôme Glisse * @range: range being snapshotted 64786586a41SJérôme Glisse * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid 64886586a41SJérôme Glisse * vma permission, 0 success 649da4c3c73SJérôme Glisse * 650da4c3c73SJérôme Glisse * This snapshots the CPU page table for a range of virtual addresses. Snapshot 651da4c3c73SJérôme Glisse * validity is tracked by range struct. See hmm_vma_range_done() for further 652da4c3c73SJérôme Glisse * information. 653da4c3c73SJérôme Glisse * 654da4c3c73SJérôme Glisse * The range struct is initialized here. It tracks the CPU page table, but only 655da4c3c73SJérôme Glisse * if the function returns success (0), in which case the caller must then call 656da4c3c73SJérôme Glisse * hmm_vma_range_done() to stop CPU page table update tracking on this range. 657da4c3c73SJérôme Glisse * 658da4c3c73SJérôme Glisse * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS 659da4c3c73SJérôme Glisse * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED ! 660da4c3c73SJérôme Glisse */ 66108232a45SJérôme Glisse int hmm_vma_get_pfns(struct hmm_range *range) 662da4c3c73SJérôme Glisse { 66308232a45SJérôme Glisse struct vm_area_struct *vma = range->vma; 66474eee180SJérôme Glisse struct hmm_vma_walk hmm_vma_walk; 665da4c3c73SJérôme Glisse struct mm_walk mm_walk; 666da4c3c73SJérôme Glisse struct hmm *hmm; 667da4c3c73SJérôme Glisse 668da4c3c73SJérôme Glisse /* Sanity check, this really should not happen ! */ 66908232a45SJérôme Glisse if (range->start < vma->vm_start || range->start >= vma->vm_end) 670da4c3c73SJérôme Glisse return -EINVAL; 67108232a45SJérôme Glisse if (range->end < vma->vm_start || range->end > vma->vm_end) 672da4c3c73SJérôme Glisse return -EINVAL; 673da4c3c73SJérôme Glisse 674da4c3c73SJérôme Glisse hmm = hmm_register(vma->vm_mm); 675da4c3c73SJérôme Glisse if (!hmm) 676da4c3c73SJérôme Glisse return -ENOMEM; 677da4c3c73SJérôme Glisse /* Caller must have registered a mirror, via hmm_mirror_register() ! */ 678da4c3c73SJérôme Glisse if (!hmm->mmu_notifier.ops) 679da4c3c73SJérôme Glisse return -EINVAL; 680da4c3c73SJérôme Glisse 681855ce7d2SJérôme Glisse /* FIXME support hugetlb fs */ 682e1fb4a08SDave Jiang if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) || 683e1fb4a08SDave Jiang vma_is_dax(vma)) { 684855ce7d2SJérôme Glisse hmm_pfns_special(range); 685855ce7d2SJérôme Glisse return -EINVAL; 686855ce7d2SJérôme Glisse } 687855ce7d2SJérôme Glisse 68886586a41SJérôme Glisse if (!(vma->vm_flags & VM_READ)) { 68986586a41SJérôme Glisse /* 69086586a41SJérôme Glisse * If vma do not allow read access, then assume that it does 69186586a41SJérôme Glisse * not allow write access, either. Architecture that allow 69286586a41SJérôme Glisse * write without read access are not supported by HMM, because 69386586a41SJérôme Glisse * operations such has atomic access would not work. 69486586a41SJérôme Glisse */ 695f88a1e90SJérôme Glisse hmm_pfns_clear(range, range->pfns, range->start, range->end); 69686586a41SJérôme Glisse return -EPERM; 69786586a41SJérôme Glisse } 69886586a41SJérôme Glisse 699da4c3c73SJérôme Glisse /* Initialize range to track CPU page table update */ 700da4c3c73SJérôme Glisse spin_lock(&hmm->lock); 701da4c3c73SJérôme Glisse range->valid = true; 702da4c3c73SJérôme Glisse list_add_rcu(&range->list, &hmm->ranges); 703da4c3c73SJérôme Glisse spin_unlock(&hmm->lock); 704da4c3c73SJérôme Glisse 70574eee180SJérôme Glisse hmm_vma_walk.fault = false; 70674eee180SJérôme Glisse hmm_vma_walk.range = range; 70774eee180SJérôme Glisse mm_walk.private = &hmm_vma_walk; 70874eee180SJérôme Glisse 709da4c3c73SJérôme Glisse mm_walk.vma = vma; 710da4c3c73SJérôme Glisse mm_walk.mm = vma->vm_mm; 711da4c3c73SJérôme Glisse mm_walk.pte_entry = NULL; 712da4c3c73SJérôme Glisse mm_walk.test_walk = NULL; 713da4c3c73SJérôme Glisse mm_walk.hugetlb_entry = NULL; 714da4c3c73SJérôme Glisse mm_walk.pmd_entry = hmm_vma_walk_pmd; 715da4c3c73SJérôme Glisse mm_walk.pte_hole = hmm_vma_walk_hole; 716da4c3c73SJérôme Glisse 71708232a45SJérôme Glisse walk_page_range(range->start, range->end, &mm_walk); 718da4c3c73SJérôme Glisse return 0; 719da4c3c73SJérôme Glisse } 720da4c3c73SJérôme Glisse EXPORT_SYMBOL(hmm_vma_get_pfns); 721da4c3c73SJérôme Glisse 722da4c3c73SJérôme Glisse /* 723da4c3c73SJérôme Glisse * hmm_vma_range_done() - stop tracking change to CPU page table over a range 724da4c3c73SJérôme Glisse * @range: range being tracked 725da4c3c73SJérôme Glisse * Returns: false if range data has been invalidated, true otherwise 726da4c3c73SJérôme Glisse * 727da4c3c73SJérôme Glisse * Range struct is used to track updates to the CPU page table after a call to 728da4c3c73SJérôme Glisse * either hmm_vma_get_pfns() or hmm_vma_fault(). Once the device driver is done 729da4c3c73SJérôme Glisse * using the data, or wants to lock updates to the data it got from those 730da4c3c73SJérôme Glisse * functions, it must call the hmm_vma_range_done() function, which will then 731da4c3c73SJérôme Glisse * stop tracking CPU page table updates. 732da4c3c73SJérôme Glisse * 733da4c3c73SJérôme Glisse * Note that device driver must still implement general CPU page table update 734da4c3c73SJérôme Glisse * tracking either by using hmm_mirror (see hmm_mirror_register()) or by using 735da4c3c73SJérôme Glisse * the mmu_notifier API directly. 736da4c3c73SJérôme Glisse * 737da4c3c73SJérôme Glisse * CPU page table update tracking done through hmm_range is only temporary and 738da4c3c73SJérôme Glisse * to be used while trying to duplicate CPU page table contents for a range of 739da4c3c73SJérôme Glisse * virtual addresses. 740da4c3c73SJérôme Glisse * 741da4c3c73SJérôme Glisse * There are two ways to use this : 742da4c3c73SJérôme Glisse * again: 74308232a45SJérôme Glisse * hmm_vma_get_pfns(range); or hmm_vma_fault(...); 744da4c3c73SJérôme Glisse * trans = device_build_page_table_update_transaction(pfns); 745da4c3c73SJérôme Glisse * device_page_table_lock(); 74608232a45SJérôme Glisse * if (!hmm_vma_range_done(range)) { 747da4c3c73SJérôme Glisse * device_page_table_unlock(); 748da4c3c73SJérôme Glisse * goto again; 749da4c3c73SJérôme Glisse * } 750da4c3c73SJérôme Glisse * device_commit_transaction(trans); 751da4c3c73SJérôme Glisse * device_page_table_unlock(); 752da4c3c73SJérôme Glisse * 753da4c3c73SJérôme Glisse * Or: 75408232a45SJérôme Glisse * hmm_vma_get_pfns(range); or hmm_vma_fault(...); 755da4c3c73SJérôme Glisse * device_page_table_lock(); 75608232a45SJérôme Glisse * hmm_vma_range_done(range); 75708232a45SJérôme Glisse * device_update_page_table(range->pfns); 758da4c3c73SJérôme Glisse * device_page_table_unlock(); 759da4c3c73SJérôme Glisse */ 76008232a45SJérôme Glisse bool hmm_vma_range_done(struct hmm_range *range) 761da4c3c73SJérôme Glisse { 762da4c3c73SJérôme Glisse unsigned long npages = (range->end - range->start) >> PAGE_SHIFT; 763da4c3c73SJérôme Glisse struct hmm *hmm; 764da4c3c73SJérôme Glisse 765da4c3c73SJérôme Glisse if (range->end <= range->start) { 766da4c3c73SJérôme Glisse BUG(); 767da4c3c73SJérôme Glisse return false; 768da4c3c73SJérôme Glisse } 769da4c3c73SJérôme Glisse 77008232a45SJérôme Glisse hmm = hmm_register(range->vma->vm_mm); 771da4c3c73SJérôme Glisse if (!hmm) { 772da4c3c73SJérôme Glisse memset(range->pfns, 0, sizeof(*range->pfns) * npages); 773da4c3c73SJérôme Glisse return false; 774da4c3c73SJérôme Glisse } 775da4c3c73SJérôme Glisse 776da4c3c73SJérôme Glisse spin_lock(&hmm->lock); 777da4c3c73SJérôme Glisse list_del_rcu(&range->list); 778da4c3c73SJérôme Glisse spin_unlock(&hmm->lock); 779da4c3c73SJérôme Glisse 780da4c3c73SJérôme Glisse return range->valid; 781da4c3c73SJérôme Glisse } 782da4c3c73SJérôme Glisse EXPORT_SYMBOL(hmm_vma_range_done); 78374eee180SJérôme Glisse 78474eee180SJérôme Glisse /* 78574eee180SJérôme Glisse * hmm_vma_fault() - try to fault some address in a virtual address range 78608232a45SJérôme Glisse * @range: range being faulted 78774eee180SJérôme Glisse * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) 78874eee180SJérôme Glisse * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop) 78974eee180SJérôme Glisse * 79074eee180SJérôme Glisse * This is similar to a regular CPU page fault except that it will not trigger 79174eee180SJérôme Glisse * any memory migration if the memory being faulted is not accessible by CPUs. 79274eee180SJérôme Glisse * 793ff05c0c6SJérôme Glisse * On error, for one virtual address in the range, the function will mark the 794ff05c0c6SJérôme Glisse * corresponding HMM pfn entry with an error flag. 79574eee180SJérôme Glisse * 79674eee180SJérôme Glisse * Expected use pattern: 79774eee180SJérôme Glisse * retry: 79874eee180SJérôme Glisse * down_read(&mm->mmap_sem); 79974eee180SJérôme Glisse * // Find vma and address device wants to fault, initialize hmm_pfn_t 80074eee180SJérôme Glisse * // array accordingly 80108232a45SJérôme Glisse * ret = hmm_vma_fault(range, write, block); 80274eee180SJérôme Glisse * switch (ret) { 80374eee180SJérôme Glisse * case -EAGAIN: 80408232a45SJérôme Glisse * hmm_vma_range_done(range); 80574eee180SJérôme Glisse * // You might want to rate limit or yield to play nicely, you may 80674eee180SJérôme Glisse * // also commit any valid pfn in the array assuming that you are 80774eee180SJérôme Glisse * // getting true from hmm_vma_range_monitor_end() 80874eee180SJérôme Glisse * goto retry; 80974eee180SJérôme Glisse * case 0: 81074eee180SJérôme Glisse * break; 81186586a41SJérôme Glisse * case -ENOMEM: 81286586a41SJérôme Glisse * case -EINVAL: 81386586a41SJérôme Glisse * case -EPERM: 81474eee180SJérôme Glisse * default: 81574eee180SJérôme Glisse * // Handle error ! 81674eee180SJérôme Glisse * up_read(&mm->mmap_sem) 81774eee180SJérôme Glisse * return; 81874eee180SJérôme Glisse * } 81974eee180SJérôme Glisse * // Take device driver lock that serialize device page table update 82074eee180SJérôme Glisse * driver_lock_device_page_table_update(); 82108232a45SJérôme Glisse * hmm_vma_range_done(range); 82274eee180SJérôme Glisse * // Commit pfns we got from hmm_vma_fault() 82374eee180SJérôme Glisse * driver_unlock_device_page_table_update(); 82474eee180SJérôme Glisse * up_read(&mm->mmap_sem) 82574eee180SJérôme Glisse * 82674eee180SJérôme Glisse * YOU MUST CALL hmm_vma_range_done() AFTER THIS FUNCTION RETURN SUCCESS (0) 82774eee180SJérôme Glisse * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION ! 82874eee180SJérôme Glisse * 82974eee180SJérôme Glisse * YOU HAVE BEEN WARNED ! 83074eee180SJérôme Glisse */ 8312aee09d8SJérôme Glisse int hmm_vma_fault(struct hmm_range *range, bool block) 83274eee180SJérôme Glisse { 83308232a45SJérôme Glisse struct vm_area_struct *vma = range->vma; 83408232a45SJérôme Glisse unsigned long start = range->start; 83574eee180SJérôme Glisse struct hmm_vma_walk hmm_vma_walk; 83674eee180SJérôme Glisse struct mm_walk mm_walk; 83774eee180SJérôme Glisse struct hmm *hmm; 83874eee180SJérôme Glisse int ret; 83974eee180SJérôme Glisse 84074eee180SJérôme Glisse /* Sanity check, this really should not happen ! */ 84108232a45SJérôme Glisse if (range->start < vma->vm_start || range->start >= vma->vm_end) 84274eee180SJérôme Glisse return -EINVAL; 84308232a45SJérôme Glisse if (range->end < vma->vm_start || range->end > vma->vm_end) 84474eee180SJérôme Glisse return -EINVAL; 84574eee180SJérôme Glisse 84674eee180SJérôme Glisse hmm = hmm_register(vma->vm_mm); 84774eee180SJérôme Glisse if (!hmm) { 848f88a1e90SJérôme Glisse hmm_pfns_clear(range, range->pfns, range->start, range->end); 84974eee180SJérôme Glisse return -ENOMEM; 85074eee180SJérôme Glisse } 85174eee180SJérôme Glisse /* Caller must have registered a mirror using hmm_mirror_register() */ 85274eee180SJérôme Glisse if (!hmm->mmu_notifier.ops) 85374eee180SJérôme Glisse return -EINVAL; 85474eee180SJérôme Glisse 855855ce7d2SJérôme Glisse /* FIXME support hugetlb fs */ 856e1fb4a08SDave Jiang if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) || 857e1fb4a08SDave Jiang vma_is_dax(vma)) { 858855ce7d2SJérôme Glisse hmm_pfns_special(range); 859855ce7d2SJérôme Glisse return -EINVAL; 860855ce7d2SJérôme Glisse } 861855ce7d2SJérôme Glisse 86286586a41SJérôme Glisse if (!(vma->vm_flags & VM_READ)) { 86386586a41SJérôme Glisse /* 86486586a41SJérôme Glisse * If vma do not allow read access, then assume that it does 86586586a41SJérôme Glisse * not allow write access, either. Architecture that allow 86686586a41SJérôme Glisse * write without read access are not supported by HMM, because 86786586a41SJérôme Glisse * operations such has atomic access would not work. 86886586a41SJérôme Glisse */ 869f88a1e90SJérôme Glisse hmm_pfns_clear(range, range->pfns, range->start, range->end); 87086586a41SJérôme Glisse return -EPERM; 87186586a41SJérôme Glisse } 87274eee180SJérôme Glisse 87386586a41SJérôme Glisse /* Initialize range to track CPU page table update */ 87486586a41SJérôme Glisse spin_lock(&hmm->lock); 87586586a41SJérôme Glisse range->valid = true; 87686586a41SJérôme Glisse list_add_rcu(&range->list, &hmm->ranges); 87786586a41SJérôme Glisse spin_unlock(&hmm->lock); 87886586a41SJérôme Glisse 87974eee180SJérôme Glisse hmm_vma_walk.fault = true; 88074eee180SJérôme Glisse hmm_vma_walk.block = block; 88174eee180SJérôme Glisse hmm_vma_walk.range = range; 88274eee180SJérôme Glisse mm_walk.private = &hmm_vma_walk; 88374eee180SJérôme Glisse hmm_vma_walk.last = range->start; 88474eee180SJérôme Glisse 88574eee180SJérôme Glisse mm_walk.vma = vma; 88674eee180SJérôme Glisse mm_walk.mm = vma->vm_mm; 88774eee180SJérôme Glisse mm_walk.pte_entry = NULL; 88874eee180SJérôme Glisse mm_walk.test_walk = NULL; 88974eee180SJérôme Glisse mm_walk.hugetlb_entry = NULL; 89074eee180SJérôme Glisse mm_walk.pmd_entry = hmm_vma_walk_pmd; 89174eee180SJérôme Glisse mm_walk.pte_hole = hmm_vma_walk_hole; 89274eee180SJérôme Glisse 89374eee180SJérôme Glisse do { 89408232a45SJérôme Glisse ret = walk_page_range(start, range->end, &mm_walk); 89574eee180SJérôme Glisse start = hmm_vma_walk.last; 89674eee180SJérôme Glisse } while (ret == -EAGAIN); 89774eee180SJérôme Glisse 89874eee180SJérôme Glisse if (ret) { 89974eee180SJérôme Glisse unsigned long i; 90074eee180SJérôme Glisse 90174eee180SJérôme Glisse i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; 902f88a1e90SJérôme Glisse hmm_pfns_clear(range, &range->pfns[i], hmm_vma_walk.last, 903f88a1e90SJérôme Glisse range->end); 90408232a45SJérôme Glisse hmm_vma_range_done(range); 90574eee180SJérôme Glisse } 90674eee180SJérôme Glisse return ret; 90774eee180SJérôme Glisse } 90874eee180SJérôme Glisse EXPORT_SYMBOL(hmm_vma_fault); 909c0b12405SJérôme Glisse #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ 9104ef589dcSJérôme Glisse 9114ef589dcSJérôme Glisse 912df6ad698SJérôme Glisse #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) 9134ef589dcSJérôme Glisse struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, 9144ef589dcSJérôme Glisse unsigned long addr) 9154ef589dcSJérôme Glisse { 9164ef589dcSJérôme Glisse struct page *page; 9174ef589dcSJérôme Glisse 9184ef589dcSJérôme Glisse page = alloc_page_vma(GFP_HIGHUSER, vma, addr); 9194ef589dcSJérôme Glisse if (!page) 9204ef589dcSJérôme Glisse return NULL; 9214ef589dcSJérôme Glisse lock_page(page); 9224ef589dcSJérôme Glisse return page; 9234ef589dcSJérôme Glisse } 9244ef589dcSJérôme Glisse EXPORT_SYMBOL(hmm_vma_alloc_locked_page); 9254ef589dcSJérôme Glisse 9264ef589dcSJérôme Glisse 9274ef589dcSJérôme Glisse static void hmm_devmem_ref_release(struct percpu_ref *ref) 9284ef589dcSJérôme Glisse { 9294ef589dcSJérôme Glisse struct hmm_devmem *devmem; 9304ef589dcSJérôme Glisse 9314ef589dcSJérôme Glisse devmem = container_of(ref, struct hmm_devmem, ref); 9324ef589dcSJérôme Glisse complete(&devmem->completion); 9334ef589dcSJérôme Glisse } 9344ef589dcSJérôme Glisse 9354ef589dcSJérôme Glisse static void hmm_devmem_ref_exit(void *data) 9364ef589dcSJérôme Glisse { 9374ef589dcSJérôme Glisse struct percpu_ref *ref = data; 9384ef589dcSJérôme Glisse struct hmm_devmem *devmem; 9394ef589dcSJérôme Glisse 9404ef589dcSJérôme Glisse devmem = container_of(ref, struct hmm_devmem, ref); 9414ef589dcSJérôme Glisse percpu_ref_exit(ref); 9424ef589dcSJérôme Glisse devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data); 9434ef589dcSJérôme Glisse } 9444ef589dcSJérôme Glisse 9454ef589dcSJérôme Glisse static void hmm_devmem_ref_kill(void *data) 9464ef589dcSJérôme Glisse { 9474ef589dcSJérôme Glisse struct percpu_ref *ref = data; 9484ef589dcSJérôme Glisse struct hmm_devmem *devmem; 9494ef589dcSJérôme Glisse 9504ef589dcSJérôme Glisse devmem = container_of(ref, struct hmm_devmem, ref); 9514ef589dcSJérôme Glisse percpu_ref_kill(ref); 9524ef589dcSJérôme Glisse wait_for_completion(&devmem->completion); 9534ef589dcSJérôme Glisse devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data); 9544ef589dcSJérôme Glisse } 9554ef589dcSJérôme Glisse 9564ef589dcSJérôme Glisse static int hmm_devmem_fault(struct vm_area_struct *vma, 9574ef589dcSJérôme Glisse unsigned long addr, 9584ef589dcSJérôme Glisse const struct page *page, 9594ef589dcSJérôme Glisse unsigned int flags, 9604ef589dcSJérôme Glisse pmd_t *pmdp) 9614ef589dcSJérôme Glisse { 9624ef589dcSJérôme Glisse struct hmm_devmem *devmem = page->pgmap->data; 9634ef589dcSJérôme Glisse 9644ef589dcSJérôme Glisse return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp); 9654ef589dcSJérôme Glisse } 9664ef589dcSJérôme Glisse 9674ef589dcSJérôme Glisse static void hmm_devmem_free(struct page *page, void *data) 9684ef589dcSJérôme Glisse { 9694ef589dcSJérôme Glisse struct hmm_devmem *devmem = data; 9704ef589dcSJérôme Glisse 9714ef589dcSJérôme Glisse devmem->ops->free(devmem, page); 9724ef589dcSJérôme Glisse } 9734ef589dcSJérôme Glisse 9744ef589dcSJérôme Glisse static DEFINE_MUTEX(hmm_devmem_lock); 9754ef589dcSJérôme Glisse static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL); 9764ef589dcSJérôme Glisse 9774ef589dcSJérôme Glisse static void hmm_devmem_radix_release(struct resource *resource) 9784ef589dcSJérôme Glisse { 9791e926419SColin Ian King resource_size_t key; 9804ef589dcSJérôme Glisse 9814ef589dcSJérôme Glisse mutex_lock(&hmm_devmem_lock); 9824ef589dcSJérôme Glisse for (key = resource->start; 9834ef589dcSJérôme Glisse key <= resource->end; 9844ef589dcSJérôme Glisse key += PA_SECTION_SIZE) 9854ef589dcSJérôme Glisse radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT); 9864ef589dcSJérôme Glisse mutex_unlock(&hmm_devmem_lock); 9874ef589dcSJérôme Glisse } 9884ef589dcSJérôme Glisse 9894ef589dcSJérôme Glisse static void hmm_devmem_release(struct device *dev, void *data) 9904ef589dcSJérôme Glisse { 9914ef589dcSJérôme Glisse struct hmm_devmem *devmem = data; 9924ef589dcSJérôme Glisse struct resource *resource = devmem->resource; 9934ef589dcSJérôme Glisse unsigned long start_pfn, npages; 9944ef589dcSJérôme Glisse struct zone *zone; 9954ef589dcSJérôme Glisse struct page *page; 9964ef589dcSJérôme Glisse 9974ef589dcSJérôme Glisse if (percpu_ref_tryget_live(&devmem->ref)) { 9984ef589dcSJérôme Glisse dev_WARN(dev, "%s: page mapping is still live!\n", __func__); 9994ef589dcSJérôme Glisse percpu_ref_put(&devmem->ref); 10004ef589dcSJérôme Glisse } 10014ef589dcSJérôme Glisse 10024ef589dcSJérôme Glisse /* pages are dead and unused, undo the arch mapping */ 10034ef589dcSJérôme Glisse start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT; 10044ef589dcSJérôme Glisse npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT; 10054ef589dcSJérôme Glisse 10064ef589dcSJérôme Glisse page = pfn_to_page(start_pfn); 10074ef589dcSJérôme Glisse zone = page_zone(page); 10084ef589dcSJérôme Glisse 10094ef589dcSJérôme Glisse mem_hotplug_begin(); 1010d3df0a42SJérôme Glisse if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) 1011da024512SChristoph Hellwig __remove_pages(zone, start_pfn, npages, NULL); 1012d3df0a42SJérôme Glisse else 1013d3df0a42SJérôme Glisse arch_remove_memory(start_pfn << PAGE_SHIFT, 1014da024512SChristoph Hellwig npages << PAGE_SHIFT, NULL); 10154ef589dcSJérôme Glisse mem_hotplug_done(); 10164ef589dcSJérôme Glisse 10174ef589dcSJérôme Glisse hmm_devmem_radix_release(resource); 10184ef589dcSJérôme Glisse } 10194ef589dcSJérôme Glisse 10204ef589dcSJérôme Glisse static int hmm_devmem_pages_create(struct hmm_devmem *devmem) 10214ef589dcSJérôme Glisse { 10224ef589dcSJérôme Glisse resource_size_t key, align_start, align_size, align_end; 10234ef589dcSJérôme Glisse struct device *device = devmem->device; 10244ef589dcSJérôme Glisse int ret, nid, is_ram; 10254ef589dcSJérôme Glisse unsigned long pfn; 10264ef589dcSJérôme Glisse 10274ef589dcSJérôme Glisse align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1); 10284ef589dcSJérôme Glisse align_size = ALIGN(devmem->resource->start + 10294ef589dcSJérôme Glisse resource_size(devmem->resource), 10304ef589dcSJérôme Glisse PA_SECTION_SIZE) - align_start; 10314ef589dcSJérôme Glisse 10324ef589dcSJérôme Glisse is_ram = region_intersects(align_start, align_size, 10334ef589dcSJérôme Glisse IORESOURCE_SYSTEM_RAM, 10344ef589dcSJérôme Glisse IORES_DESC_NONE); 10354ef589dcSJérôme Glisse if (is_ram == REGION_MIXED) { 10364ef589dcSJérôme Glisse WARN_ONCE(1, "%s attempted on mixed region %pr\n", 10374ef589dcSJérôme Glisse __func__, devmem->resource); 10384ef589dcSJérôme Glisse return -ENXIO; 10394ef589dcSJérôme Glisse } 10404ef589dcSJérôme Glisse if (is_ram == REGION_INTERSECTS) 10414ef589dcSJérôme Glisse return -ENXIO; 10424ef589dcSJérôme Glisse 1043d3df0a42SJérôme Glisse if (devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY) 1044d3df0a42SJérôme Glisse devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; 1045d3df0a42SJérôme Glisse else 10464ef589dcSJérôme Glisse devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; 1047d3df0a42SJérôme Glisse 1048e7744aa2SLogan Gunthorpe devmem->pagemap.res = *devmem->resource; 10494ef589dcSJérôme Glisse devmem->pagemap.page_fault = hmm_devmem_fault; 10504ef589dcSJérôme Glisse devmem->pagemap.page_free = hmm_devmem_free; 10514ef589dcSJérôme Glisse devmem->pagemap.dev = devmem->device; 10524ef589dcSJérôme Glisse devmem->pagemap.ref = &devmem->ref; 10534ef589dcSJérôme Glisse devmem->pagemap.data = devmem; 10544ef589dcSJérôme Glisse 10554ef589dcSJérôme Glisse mutex_lock(&hmm_devmem_lock); 10564ef589dcSJérôme Glisse align_end = align_start + align_size - 1; 10574ef589dcSJérôme Glisse for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) { 10584ef589dcSJérôme Glisse struct hmm_devmem *dup; 10594ef589dcSJérôme Glisse 106018be460eSTejun Heo dup = radix_tree_lookup(&hmm_devmem_radix, 106118be460eSTejun Heo key >> PA_SECTION_SHIFT); 10624ef589dcSJérôme Glisse if (dup) { 10634ef589dcSJérôme Glisse dev_err(device, "%s: collides with mapping for %s\n", 10644ef589dcSJérôme Glisse __func__, dev_name(dup->device)); 10654ef589dcSJérôme Glisse mutex_unlock(&hmm_devmem_lock); 10664ef589dcSJérôme Glisse ret = -EBUSY; 10674ef589dcSJérôme Glisse goto error; 10684ef589dcSJérôme Glisse } 10694ef589dcSJérôme Glisse ret = radix_tree_insert(&hmm_devmem_radix, 10704ef589dcSJérôme Glisse key >> PA_SECTION_SHIFT, 10714ef589dcSJérôme Glisse devmem); 10724ef589dcSJérôme Glisse if (ret) { 10734ef589dcSJérôme Glisse dev_err(device, "%s: failed: %d\n", __func__, ret); 10744ef589dcSJérôme Glisse mutex_unlock(&hmm_devmem_lock); 10754ef589dcSJérôme Glisse goto error_radix; 10764ef589dcSJérôme Glisse } 10774ef589dcSJérôme Glisse } 10784ef589dcSJérôme Glisse mutex_unlock(&hmm_devmem_lock); 10794ef589dcSJérôme Glisse 10804ef589dcSJérôme Glisse nid = dev_to_node(device); 10814ef589dcSJérôme Glisse if (nid < 0) 10824ef589dcSJérôme Glisse nid = numa_mem_id(); 10834ef589dcSJérôme Glisse 10844ef589dcSJérôme Glisse mem_hotplug_begin(); 10854ef589dcSJérôme Glisse /* 10864ef589dcSJérôme Glisse * For device private memory we call add_pages() as we only need to 10874ef589dcSJérôme Glisse * allocate and initialize struct page for the device memory. More- 10884ef589dcSJérôme Glisse * over the device memory is un-accessible thus we do not want to 10894ef589dcSJérôme Glisse * create a linear mapping for the memory like arch_add_memory() 10904ef589dcSJérôme Glisse * would do. 1091d3df0a42SJérôme Glisse * 1092d3df0a42SJérôme Glisse * For device public memory, which is accesible by the CPU, we do 1093d3df0a42SJérôme Glisse * want the linear mapping and thus use arch_add_memory(). 10944ef589dcSJérôme Glisse */ 1095d3df0a42SJérôme Glisse if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC) 109624e6d5a5SChristoph Hellwig ret = arch_add_memory(nid, align_start, align_size, NULL, 109724e6d5a5SChristoph Hellwig false); 1098d3df0a42SJérôme Glisse else 10994ef589dcSJérôme Glisse ret = add_pages(nid, align_start >> PAGE_SHIFT, 110024e6d5a5SChristoph Hellwig align_size >> PAGE_SHIFT, NULL, false); 11014ef589dcSJérôme Glisse if (ret) { 11024ef589dcSJérôme Glisse mem_hotplug_done(); 11034ef589dcSJérôme Glisse goto error_add_memory; 11044ef589dcSJérôme Glisse } 11054ef589dcSJérôme Glisse move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], 11064ef589dcSJérôme Glisse align_start >> PAGE_SHIFT, 1107a99583e7SChristoph Hellwig align_size >> PAGE_SHIFT, NULL); 11084ef589dcSJérôme Glisse mem_hotplug_done(); 11094ef589dcSJérôme Glisse 11104ef589dcSJérôme Glisse for (pfn = devmem->pfn_first; pfn < devmem->pfn_last; pfn++) { 11114ef589dcSJérôme Glisse struct page *page = pfn_to_page(pfn); 11124ef589dcSJérôme Glisse 11134ef589dcSJérôme Glisse page->pgmap = &devmem->pagemap; 11144ef589dcSJérôme Glisse } 11154ef589dcSJérôme Glisse return 0; 11164ef589dcSJérôme Glisse 11174ef589dcSJérôme Glisse error_add_memory: 11184ef589dcSJérôme Glisse untrack_pfn(NULL, PHYS_PFN(align_start), align_size); 11194ef589dcSJérôme Glisse error_radix: 11204ef589dcSJérôme Glisse hmm_devmem_radix_release(devmem->resource); 11214ef589dcSJérôme Glisse error: 11224ef589dcSJérôme Glisse return ret; 11234ef589dcSJérôme Glisse } 11244ef589dcSJérôme Glisse 11254ef589dcSJérôme Glisse static int hmm_devmem_match(struct device *dev, void *data, void *match_data) 11264ef589dcSJérôme Glisse { 11274ef589dcSJérôme Glisse struct hmm_devmem *devmem = data; 11284ef589dcSJérôme Glisse 11294ef589dcSJérôme Glisse return devmem->resource == match_data; 11304ef589dcSJérôme Glisse } 11314ef589dcSJérôme Glisse 11324ef589dcSJérôme Glisse static void hmm_devmem_pages_remove(struct hmm_devmem *devmem) 11334ef589dcSJérôme Glisse { 11344ef589dcSJérôme Glisse devres_release(devmem->device, &hmm_devmem_release, 11354ef589dcSJérôme Glisse &hmm_devmem_match, devmem->resource); 11364ef589dcSJérôme Glisse } 11374ef589dcSJérôme Glisse 11384ef589dcSJérôme Glisse /* 11394ef589dcSJérôme Glisse * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory 11404ef589dcSJérôme Glisse * 11414ef589dcSJérôme Glisse * @ops: memory event device driver callback (see struct hmm_devmem_ops) 11424ef589dcSJérôme Glisse * @device: device struct to bind the resource too 11434ef589dcSJérôme Glisse * @size: size in bytes of the device memory to add 11444ef589dcSJérôme Glisse * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise 11454ef589dcSJérôme Glisse * 11464ef589dcSJérôme Glisse * This function first finds an empty range of physical address big enough to 11474ef589dcSJérôme Glisse * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which 11484ef589dcSJérôme Glisse * in turn allocates struct pages. It does not do anything beyond that; all 11494ef589dcSJérôme Glisse * events affecting the memory will go through the various callbacks provided 11504ef589dcSJérôme Glisse * by hmm_devmem_ops struct. 11514ef589dcSJérôme Glisse * 11524ef589dcSJérôme Glisse * Device driver should call this function during device initialization and 11534ef589dcSJérôme Glisse * is then responsible of memory management. HMM only provides helpers. 11544ef589dcSJérôme Glisse */ 11554ef589dcSJérôme Glisse struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, 11564ef589dcSJérôme Glisse struct device *device, 11574ef589dcSJérôme Glisse unsigned long size) 11584ef589dcSJérôme Glisse { 11594ef589dcSJérôme Glisse struct hmm_devmem *devmem; 11604ef589dcSJérôme Glisse resource_size_t addr; 11614ef589dcSJérôme Glisse int ret; 11624ef589dcSJérôme Glisse 1163e7638488SDan Williams dev_pagemap_get_ops(); 11644ef589dcSJérôme Glisse 11654ef589dcSJérôme Glisse devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem), 11664ef589dcSJérôme Glisse GFP_KERNEL, dev_to_node(device)); 11674ef589dcSJérôme Glisse if (!devmem) 11684ef589dcSJérôme Glisse return ERR_PTR(-ENOMEM); 11694ef589dcSJérôme Glisse 11704ef589dcSJérôme Glisse init_completion(&devmem->completion); 11714ef589dcSJérôme Glisse devmem->pfn_first = -1UL; 11724ef589dcSJérôme Glisse devmem->pfn_last = -1UL; 11734ef589dcSJérôme Glisse devmem->resource = NULL; 11744ef589dcSJérôme Glisse devmem->device = device; 11754ef589dcSJérôme Glisse devmem->ops = ops; 11764ef589dcSJérôme Glisse 11774ef589dcSJérôme Glisse ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, 11784ef589dcSJérôme Glisse 0, GFP_KERNEL); 11794ef589dcSJérôme Glisse if (ret) 11804ef589dcSJérôme Glisse goto error_percpu_ref; 11814ef589dcSJérôme Glisse 11824ef589dcSJérôme Glisse ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref); 11834ef589dcSJérôme Glisse if (ret) 11844ef589dcSJérôme Glisse goto error_devm_add_action; 11854ef589dcSJérôme Glisse 11864ef589dcSJérôme Glisse size = ALIGN(size, PA_SECTION_SIZE); 11874ef589dcSJérôme Glisse addr = min((unsigned long)iomem_resource.end, 11884ef589dcSJérôme Glisse (1UL << MAX_PHYSMEM_BITS) - 1); 11894ef589dcSJérôme Glisse addr = addr - size + 1UL; 11904ef589dcSJérôme Glisse 11914ef589dcSJérôme Glisse /* 11924ef589dcSJérôme Glisse * FIXME add a new helper to quickly walk resource tree and find free 11934ef589dcSJérôme Glisse * range 11944ef589dcSJérôme Glisse * 11954ef589dcSJérôme Glisse * FIXME what about ioport_resource resource ? 11964ef589dcSJérôme Glisse */ 11974ef589dcSJérôme Glisse for (; addr > size && addr >= iomem_resource.start; addr -= size) { 11984ef589dcSJérôme Glisse ret = region_intersects(addr, size, 0, IORES_DESC_NONE); 11994ef589dcSJérôme Glisse if (ret != REGION_DISJOINT) 12004ef589dcSJérôme Glisse continue; 12014ef589dcSJérôme Glisse 12024ef589dcSJérôme Glisse devmem->resource = devm_request_mem_region(device, addr, size, 12034ef589dcSJérôme Glisse dev_name(device)); 12044ef589dcSJérôme Glisse if (!devmem->resource) { 12054ef589dcSJérôme Glisse ret = -ENOMEM; 12064ef589dcSJérôme Glisse goto error_no_resource; 12074ef589dcSJérôme Glisse } 12084ef589dcSJérôme Glisse break; 12094ef589dcSJérôme Glisse } 12104ef589dcSJérôme Glisse if (!devmem->resource) { 12114ef589dcSJérôme Glisse ret = -ERANGE; 12124ef589dcSJérôme Glisse goto error_no_resource; 12134ef589dcSJérôme Glisse } 12144ef589dcSJérôme Glisse 12154ef589dcSJérôme Glisse devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; 12164ef589dcSJérôme Glisse devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; 12174ef589dcSJérôme Glisse devmem->pfn_last = devmem->pfn_first + 12184ef589dcSJérôme Glisse (resource_size(devmem->resource) >> PAGE_SHIFT); 12194ef589dcSJérôme Glisse 12204ef589dcSJérôme Glisse ret = hmm_devmem_pages_create(devmem); 12214ef589dcSJérôme Glisse if (ret) 12224ef589dcSJérôme Glisse goto error_pages; 12234ef589dcSJérôme Glisse 12244ef589dcSJérôme Glisse devres_add(device, devmem); 12254ef589dcSJérôme Glisse 12264ef589dcSJérôme Glisse ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref); 12274ef589dcSJérôme Glisse if (ret) { 12284ef589dcSJérôme Glisse hmm_devmem_remove(devmem); 12294ef589dcSJérôme Glisse return ERR_PTR(ret); 12304ef589dcSJérôme Glisse } 12314ef589dcSJérôme Glisse 12324ef589dcSJérôme Glisse return devmem; 12334ef589dcSJérôme Glisse 12344ef589dcSJérôme Glisse error_pages: 12354ef589dcSJérôme Glisse devm_release_mem_region(device, devmem->resource->start, 12364ef589dcSJérôme Glisse resource_size(devmem->resource)); 12374ef589dcSJérôme Glisse error_no_resource: 12384ef589dcSJérôme Glisse error_devm_add_action: 12394ef589dcSJérôme Glisse hmm_devmem_ref_kill(&devmem->ref); 12404ef589dcSJérôme Glisse hmm_devmem_ref_exit(&devmem->ref); 12414ef589dcSJérôme Glisse error_percpu_ref: 12424ef589dcSJérôme Glisse devres_free(devmem); 12434ef589dcSJérôme Glisse return ERR_PTR(ret); 12444ef589dcSJérôme Glisse } 12454ef589dcSJérôme Glisse EXPORT_SYMBOL(hmm_devmem_add); 12464ef589dcSJérôme Glisse 1247d3df0a42SJérôme Glisse struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, 1248d3df0a42SJérôme Glisse struct device *device, 1249d3df0a42SJérôme Glisse struct resource *res) 1250d3df0a42SJérôme Glisse { 1251d3df0a42SJérôme Glisse struct hmm_devmem *devmem; 1252d3df0a42SJérôme Glisse int ret; 1253d3df0a42SJérôme Glisse 1254d3df0a42SJérôme Glisse if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY) 1255d3df0a42SJérôme Glisse return ERR_PTR(-EINVAL); 1256d3df0a42SJérôme Glisse 1257e7638488SDan Williams dev_pagemap_get_ops(); 1258d3df0a42SJérôme Glisse 1259d3df0a42SJérôme Glisse devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem), 1260d3df0a42SJérôme Glisse GFP_KERNEL, dev_to_node(device)); 1261d3df0a42SJérôme Glisse if (!devmem) 1262d3df0a42SJérôme Glisse return ERR_PTR(-ENOMEM); 1263d3df0a42SJérôme Glisse 1264d3df0a42SJérôme Glisse init_completion(&devmem->completion); 1265d3df0a42SJérôme Glisse devmem->pfn_first = -1UL; 1266d3df0a42SJérôme Glisse devmem->pfn_last = -1UL; 1267d3df0a42SJérôme Glisse devmem->resource = res; 1268d3df0a42SJérôme Glisse devmem->device = device; 1269d3df0a42SJérôme Glisse devmem->ops = ops; 1270d3df0a42SJérôme Glisse 1271d3df0a42SJérôme Glisse ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, 1272d3df0a42SJérôme Glisse 0, GFP_KERNEL); 1273d3df0a42SJérôme Glisse if (ret) 1274d3df0a42SJérôme Glisse goto error_percpu_ref; 1275d3df0a42SJérôme Glisse 1276d3df0a42SJérôme Glisse ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref); 1277d3df0a42SJérôme Glisse if (ret) 1278d3df0a42SJérôme Glisse goto error_devm_add_action; 1279d3df0a42SJérôme Glisse 1280d3df0a42SJérôme Glisse 1281d3df0a42SJérôme Glisse devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; 1282d3df0a42SJérôme Glisse devmem->pfn_last = devmem->pfn_first + 1283d3df0a42SJérôme Glisse (resource_size(devmem->resource) >> PAGE_SHIFT); 1284d3df0a42SJérôme Glisse 1285d3df0a42SJérôme Glisse ret = hmm_devmem_pages_create(devmem); 1286d3df0a42SJérôme Glisse if (ret) 1287d3df0a42SJérôme Glisse goto error_devm_add_action; 1288d3df0a42SJérôme Glisse 1289d3df0a42SJérôme Glisse devres_add(device, devmem); 1290d3df0a42SJérôme Glisse 1291d3df0a42SJérôme Glisse ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref); 1292d3df0a42SJérôme Glisse if (ret) { 1293d3df0a42SJérôme Glisse hmm_devmem_remove(devmem); 1294d3df0a42SJérôme Glisse return ERR_PTR(ret); 1295d3df0a42SJérôme Glisse } 1296d3df0a42SJérôme Glisse 1297d3df0a42SJérôme Glisse return devmem; 1298d3df0a42SJérôme Glisse 1299d3df0a42SJérôme Glisse error_devm_add_action: 1300d3df0a42SJérôme Glisse hmm_devmem_ref_kill(&devmem->ref); 1301d3df0a42SJérôme Glisse hmm_devmem_ref_exit(&devmem->ref); 1302d3df0a42SJérôme Glisse error_percpu_ref: 1303d3df0a42SJérôme Glisse devres_free(devmem); 1304d3df0a42SJérôme Glisse return ERR_PTR(ret); 1305d3df0a42SJérôme Glisse } 1306d3df0a42SJérôme Glisse EXPORT_SYMBOL(hmm_devmem_add_resource); 1307d3df0a42SJérôme Glisse 13084ef589dcSJérôme Glisse /* 13094ef589dcSJérôme Glisse * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE) 13104ef589dcSJérôme Glisse * 13114ef589dcSJérôme Glisse * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory 13124ef589dcSJérôme Glisse * 13134ef589dcSJérôme Glisse * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf 13144ef589dcSJérôme Glisse * of the device driver. It will free struct page and remove the resource that 13154ef589dcSJérôme Glisse * reserved the physical address range for this device memory. 13164ef589dcSJérôme Glisse */ 13174ef589dcSJérôme Glisse void hmm_devmem_remove(struct hmm_devmem *devmem) 13184ef589dcSJérôme Glisse { 13194ef589dcSJérôme Glisse resource_size_t start, size; 13204ef589dcSJérôme Glisse struct device *device; 1321d3df0a42SJérôme Glisse bool cdm = false; 13224ef589dcSJérôme Glisse 13234ef589dcSJérôme Glisse if (!devmem) 13244ef589dcSJérôme Glisse return; 13254ef589dcSJérôme Glisse 13264ef589dcSJérôme Glisse device = devmem->device; 13274ef589dcSJérôme Glisse start = devmem->resource->start; 13284ef589dcSJérôme Glisse size = resource_size(devmem->resource); 13294ef589dcSJérôme Glisse 1330d3df0a42SJérôme Glisse cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY; 13314ef589dcSJérôme Glisse hmm_devmem_ref_kill(&devmem->ref); 13324ef589dcSJérôme Glisse hmm_devmem_ref_exit(&devmem->ref); 13334ef589dcSJérôme Glisse hmm_devmem_pages_remove(devmem); 13344ef589dcSJérôme Glisse 1335d3df0a42SJérôme Glisse if (!cdm) 13364ef589dcSJérôme Glisse devm_release_mem_region(device, start, size); 13374ef589dcSJérôme Glisse } 13384ef589dcSJérôme Glisse EXPORT_SYMBOL(hmm_devmem_remove); 1339858b54daSJérôme Glisse 1340858b54daSJérôme Glisse /* 1341858b54daSJérôme Glisse * A device driver that wants to handle multiple devices memory through a 1342858b54daSJérôme Glisse * single fake device can use hmm_device to do so. This is purely a helper 1343858b54daSJérôme Glisse * and it is not needed to make use of any HMM functionality. 1344858b54daSJérôme Glisse */ 1345858b54daSJérôme Glisse #define HMM_DEVICE_MAX 256 1346858b54daSJérôme Glisse 1347858b54daSJérôme Glisse static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX); 1348858b54daSJérôme Glisse static DEFINE_SPINLOCK(hmm_device_lock); 1349858b54daSJérôme Glisse static struct class *hmm_device_class; 1350858b54daSJérôme Glisse static dev_t hmm_device_devt; 1351858b54daSJérôme Glisse 1352858b54daSJérôme Glisse static void hmm_device_release(struct device *device) 1353858b54daSJérôme Glisse { 1354858b54daSJérôme Glisse struct hmm_device *hmm_device; 1355858b54daSJérôme Glisse 1356858b54daSJérôme Glisse hmm_device = container_of(device, struct hmm_device, device); 1357858b54daSJérôme Glisse spin_lock(&hmm_device_lock); 1358858b54daSJérôme Glisse clear_bit(hmm_device->minor, hmm_device_mask); 1359858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1360858b54daSJérôme Glisse 1361858b54daSJérôme Glisse kfree(hmm_device); 1362858b54daSJérôme Glisse } 1363858b54daSJérôme Glisse 1364858b54daSJérôme Glisse struct hmm_device *hmm_device_new(void *drvdata) 1365858b54daSJérôme Glisse { 1366858b54daSJérôme Glisse struct hmm_device *hmm_device; 1367858b54daSJérôme Glisse 1368858b54daSJérôme Glisse hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL); 1369858b54daSJérôme Glisse if (!hmm_device) 1370858b54daSJérôme Glisse return ERR_PTR(-ENOMEM); 1371858b54daSJérôme Glisse 1372858b54daSJérôme Glisse spin_lock(&hmm_device_lock); 1373858b54daSJérôme Glisse hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX); 1374858b54daSJérôme Glisse if (hmm_device->minor >= HMM_DEVICE_MAX) { 1375858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1376858b54daSJérôme Glisse kfree(hmm_device); 1377858b54daSJérôme Glisse return ERR_PTR(-EBUSY); 1378858b54daSJérôme Glisse } 1379858b54daSJérôme Glisse set_bit(hmm_device->minor, hmm_device_mask); 1380858b54daSJérôme Glisse spin_unlock(&hmm_device_lock); 1381858b54daSJérôme Glisse 1382858b54daSJérôme Glisse dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor); 1383858b54daSJérôme Glisse hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt), 1384858b54daSJérôme Glisse hmm_device->minor); 1385858b54daSJérôme Glisse hmm_device->device.release = hmm_device_release; 1386858b54daSJérôme Glisse dev_set_drvdata(&hmm_device->device, drvdata); 1387858b54daSJérôme Glisse hmm_device->device.class = hmm_device_class; 1388858b54daSJérôme Glisse device_initialize(&hmm_device->device); 1389858b54daSJérôme Glisse 1390858b54daSJérôme Glisse return hmm_device; 1391858b54daSJérôme Glisse } 1392858b54daSJérôme Glisse EXPORT_SYMBOL(hmm_device_new); 1393858b54daSJérôme Glisse 1394858b54daSJérôme Glisse void hmm_device_put(struct hmm_device *hmm_device) 1395858b54daSJérôme Glisse { 1396858b54daSJérôme Glisse put_device(&hmm_device->device); 1397858b54daSJérôme Glisse } 1398858b54daSJérôme Glisse EXPORT_SYMBOL(hmm_device_put); 1399858b54daSJérôme Glisse 1400858b54daSJérôme Glisse static int __init hmm_init(void) 1401858b54daSJérôme Glisse { 1402858b54daSJérôme Glisse int ret; 1403858b54daSJérôme Glisse 1404858b54daSJérôme Glisse ret = alloc_chrdev_region(&hmm_device_devt, 0, 1405858b54daSJérôme Glisse HMM_DEVICE_MAX, 1406858b54daSJérôme Glisse "hmm_device"); 1407858b54daSJérôme Glisse if (ret) 1408858b54daSJérôme Glisse return ret; 1409858b54daSJérôme Glisse 1410858b54daSJérôme Glisse hmm_device_class = class_create(THIS_MODULE, "hmm_device"); 1411858b54daSJérôme Glisse if (IS_ERR(hmm_device_class)) { 1412858b54daSJérôme Glisse unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX); 1413858b54daSJérôme Glisse return PTR_ERR(hmm_device_class); 1414858b54daSJérôme Glisse } 1415858b54daSJérôme Glisse return 0; 1416858b54daSJérôme Glisse } 1417858b54daSJérôme Glisse 1418858b54daSJérôme Glisse device_initcall(hmm_init); 1419df6ad698SJérôme Glisse #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ 1420