Lines Matching +full:device +full:- +full:addr
1 // SPDX-License-Identifier: GPL-2.0
4 * mirror and zone device private memory migration APIs of the kernel.
6 * space and can use the device to read/write any valid virtual address.
14 #include <linux/device.h>
42 * representing a piece of device memory. dmirror_devmem_alloc_page
44 * real device. zone_device_data points to that backing page. But
46 * physical CPU-accessible memory that we can use directly.
49 (page)->zone_device_data : (page))
54 …start address for SPM (special purpose memory) used for device 0. By setting this Coherent device …
59 …start address for SPM (special purpose memory) used for device 1. By setting this Coherent device …
70 unsigned long addr; member
87 * Data attached to the open device file.
98 * ZONE_DEVICE pages for migration and simulating device memory.
107 * Per device data.
112 struct device device; member
128 unsigned long addr, in dmirror_bounce_init() argument
131 bounce->addr = addr; in dmirror_bounce_init()
132 bounce->size = size; in dmirror_bounce_init()
133 bounce->cpages = 0; in dmirror_bounce_init()
134 bounce->ptr = vmalloc(size); in dmirror_bounce_init()
135 if (!bounce->ptr) in dmirror_bounce_init()
136 return -ENOMEM; in dmirror_bounce_init()
142 return (mdevice->zone_device_type == in dmirror_is_private_zone()
149 return (dmirror->mdevice->zone_device_type == in dmirror_select_device()
157 vfree(bounce->ptr); in dmirror_bounce_fini()
162 struct cdev *cdev = inode->i_cdev; in dmirror_fops_open()
169 return -ENOMEM; in dmirror_fops_open()
171 dmirror->mdevice = container_of(cdev, struct dmirror_device, cdevice); in dmirror_fops_open()
172 mutex_init(&dmirror->mutex); in dmirror_fops_open()
173 xa_init(&dmirror->pt); in dmirror_fops_open()
175 ret = mmu_interval_notifier_insert(&dmirror->notifier, current->mm, in dmirror_fops_open()
182 filp->private_data = dmirror; in dmirror_fops_open()
188 struct dmirror *dmirror = filp->private_data; in dmirror_fops_release()
190 mmu_interval_notifier_remove(&dmirror->notifier); in dmirror_fops_release()
191 xa_destroy(&dmirror->pt); in dmirror_fops_release()
205 return dmirror_page_to_chunk(page)->mdevice; in dmirror_page_to_device()
210 unsigned long *pfns = range->hmm_pfns; in dmirror_do_fault()
213 for (pfn = (range->start >> PAGE_SHIFT); in dmirror_do_fault()
214 pfn < (range->end >> PAGE_SHIFT); in dmirror_do_fault()
232 else if (WARN_ON(range->default_flags & HMM_PFN_WRITE)) in dmirror_do_fault()
233 return -EFAULT; in dmirror_do_fault()
234 entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC); in dmirror_do_fault()
253 xa_for_each_range(&dmirror->pt, pfn, entry, start >> PAGE_SHIFT, in dmirror_do_update()
255 xa_erase(&dmirror->pt, pfn); in dmirror_do_update()
265 * Ignore invalidation callbacks for device private pages since in dmirror_interval_invalidate()
268 if (range->event == MMU_NOTIFY_MIGRATE && in dmirror_interval_invalidate()
269 range->owner == dmirror->mdevice) in dmirror_interval_invalidate()
273 mutex_lock(&dmirror->mutex); in dmirror_interval_invalidate()
274 else if (!mutex_trylock(&dmirror->mutex)) in dmirror_interval_invalidate()
278 dmirror_do_update(dmirror, range->start, range->end); in dmirror_interval_invalidate()
280 mutex_unlock(&dmirror->mutex); in dmirror_interval_invalidate()
291 struct mm_struct *mm = dmirror->notifier.mm; in dmirror_range_fault()
298 ret = -EBUSY; in dmirror_range_fault()
302 range->notifier_seq = mmu_interval_read_begin(range->notifier); in dmirror_range_fault()
307 if (ret == -EBUSY) in dmirror_range_fault()
312 mutex_lock(&dmirror->mutex); in dmirror_range_fault()
313 if (mmu_interval_read_retry(range->notifier, in dmirror_range_fault()
314 range->notifier_seq)) { in dmirror_range_fault()
315 mutex_unlock(&dmirror->mutex); in dmirror_range_fault()
323 mutex_unlock(&dmirror->mutex); in dmirror_range_fault()
331 struct mm_struct *mm = dmirror->notifier.mm; in dmirror_fault()
332 unsigned long addr; in dmirror_fault() local
335 .notifier = &dmirror->notifier, in dmirror_fault()
340 .dev_private_owner = dmirror->mdevice, in dmirror_fault()
348 for (addr = start; addr < end; addr = range.end) { in dmirror_fault()
349 range.start = addr; in dmirror_fault()
350 range.end = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end); in dmirror_fault()
367 ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK); in dmirror_do_read()
373 entry = xa_load(&dmirror->pt, pfn); in dmirror_do_read()
376 return -ENOENT; in dmirror_do_read()
381 bounce->cpages++; in dmirror_do_read()
391 unsigned long size = cmd->npages << PAGE_SHIFT; in dmirror_read()
394 start = cmd->addr; in dmirror_read()
397 return -EINVAL; in dmirror_read()
404 mutex_lock(&dmirror->mutex); in dmirror_read()
406 mutex_unlock(&dmirror->mutex); in dmirror_read()
407 if (ret != -ENOENT) in dmirror_read()
410 start = cmd->addr + (bounce.cpages << PAGE_SHIFT); in dmirror_read()
414 cmd->faults++; in dmirror_read()
418 if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr, in dmirror_read()
420 ret = -EFAULT; in dmirror_read()
422 cmd->cpages = bounce.cpages; in dmirror_read()
433 ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK); in dmirror_do_write()
439 entry = xa_load(&dmirror->pt, pfn); in dmirror_do_write()
442 return -ENOENT; in dmirror_do_write()
447 bounce->cpages++; in dmirror_do_write()
457 unsigned long size = cmd->npages << PAGE_SHIFT; in dmirror_write()
460 start = cmd->addr; in dmirror_write()
463 return -EINVAL; in dmirror_write()
468 if (copy_from_user(bounce.ptr, u64_to_user_ptr(cmd->ptr), in dmirror_write()
470 ret = -EFAULT; in dmirror_write()
475 mutex_lock(&dmirror->mutex); in dmirror_write()
477 mutex_unlock(&dmirror->mutex); in dmirror_write()
478 if (ret != -ENOENT) in dmirror_write()
481 start = cmd->addr + (bounce.cpages << PAGE_SHIFT); in dmirror_write()
485 cmd->faults++; in dmirror_write()
489 cmd->cpages = bounce.cpages; in dmirror_write()
503 int ret = -ENOMEM; in dmirror_allocate_chunk()
509 switch (mdevice->zone_device_type) { in dmirror_allocate_chunk()
515 devmem->pagemap.range.start = res->start; in dmirror_allocate_chunk()
516 devmem->pagemap.range.end = res->end; in dmirror_allocate_chunk()
517 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; in dmirror_allocate_chunk()
520 devmem->pagemap.range.start = (MINOR(mdevice->cdevice.dev) - 2) ? in dmirror_allocate_chunk()
523 devmem->pagemap.range.end = devmem->pagemap.range.start + in dmirror_allocate_chunk()
524 DEVMEM_CHUNK_SIZE - 1; in dmirror_allocate_chunk()
525 devmem->pagemap.type = MEMORY_DEVICE_COHERENT; in dmirror_allocate_chunk()
528 ret = -EINVAL; in dmirror_allocate_chunk()
532 devmem->pagemap.nr_range = 1; in dmirror_allocate_chunk()
533 devmem->pagemap.ops = &dmirror_devmem_ops; in dmirror_allocate_chunk()
534 devmem->pagemap.owner = mdevice; in dmirror_allocate_chunk()
536 mutex_lock(&mdevice->devmem_lock); in dmirror_allocate_chunk()
538 if (mdevice->devmem_count == mdevice->devmem_capacity) { in dmirror_allocate_chunk()
542 new_capacity = mdevice->devmem_capacity + in dmirror_allocate_chunk()
544 new_chunks = krealloc(mdevice->devmem_chunks, in dmirror_allocate_chunk()
549 mdevice->devmem_capacity = new_capacity; in dmirror_allocate_chunk()
550 mdevice->devmem_chunks = new_chunks; in dmirror_allocate_chunk()
552 ptr = memremap_pages(&devmem->pagemap, numa_node_id()); in dmirror_allocate_chunk()
557 ret = -EFAULT; in dmirror_allocate_chunk()
561 devmem->mdevice = mdevice; in dmirror_allocate_chunk()
562 pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT; in dmirror_allocate_chunk()
563 pfn_last = pfn_first + (range_len(&devmem->pagemap.range) >> PAGE_SHIFT); in dmirror_allocate_chunk()
564 mdevice->devmem_chunks[mdevice->devmem_count++] = devmem; in dmirror_allocate_chunk()
566 mutex_unlock(&mdevice->devmem_lock); in dmirror_allocate_chunk()
570 mdevice->devmem_count, in dmirror_allocate_chunk()
571 mdevice->devmem_count * (DEVMEM_CHUNK_SIZE / (1024 * 1024)), in dmirror_allocate_chunk()
574 spin_lock(&mdevice->lock); in dmirror_allocate_chunk()
578 page->zone_device_data = mdevice->free_pages; in dmirror_allocate_chunk()
579 mdevice->free_pages = page; in dmirror_allocate_chunk()
582 *ppage = mdevice->free_pages; in dmirror_allocate_chunk()
583 mdevice->free_pages = (*ppage)->zone_device_data; in dmirror_allocate_chunk()
584 mdevice->calloc++; in dmirror_allocate_chunk()
586 spin_unlock(&mdevice->lock); in dmirror_allocate_chunk()
591 mutex_unlock(&mdevice->devmem_lock); in dmirror_allocate_chunk()
592 if (res && devmem->pagemap.type == MEMORY_DEVICE_PRIVATE) in dmirror_allocate_chunk()
593 release_mem_region(devmem->pagemap.range.start, in dmirror_allocate_chunk()
594 range_len(&devmem->pagemap.range)); in dmirror_allocate_chunk()
607 * For ZONE_DEVICE private type, this is a fake device so we allocate in dmirror_devmem_alloc_page()
608 * real system memory to store our device memory. in dmirror_devmem_alloc_page()
617 spin_lock(&mdevice->lock); in dmirror_devmem_alloc_page()
619 if (mdevice->free_pages) { in dmirror_devmem_alloc_page()
620 dpage = mdevice->free_pages; in dmirror_devmem_alloc_page()
621 mdevice->free_pages = dpage->zone_device_data; in dmirror_devmem_alloc_page()
622 mdevice->calloc++; in dmirror_devmem_alloc_page()
623 spin_unlock(&mdevice->lock); in dmirror_devmem_alloc_page()
625 spin_unlock(&mdevice->lock); in dmirror_devmem_alloc_page()
631 dpage->zone_device_data = rpage; in dmirror_devmem_alloc_page()
643 struct dmirror_device *mdevice = dmirror->mdevice; in dmirror_migrate_alloc_and_copy()
644 const unsigned long *src = args->src; in dmirror_migrate_alloc_and_copy()
645 unsigned long *dst = args->dst; in dmirror_migrate_alloc_and_copy()
646 unsigned long addr; in dmirror_migrate_alloc_and_copy() local
648 for (addr = args->start; addr < args->end; addr += PAGE_SIZE, in dmirror_migrate_alloc_and_copy()
659 * unallocated pte_none() or read-only zero page. in dmirror_migrate_alloc_and_copy()
663 "page already in device spage pfn: 0x%lx\n", in dmirror_migrate_alloc_and_copy()
678 * Normally, a device would use the page->zone_device_data to in dmirror_migrate_alloc_and_copy()
680 * the simulated device memory and that page holds the pointer in dmirror_migrate_alloc_and_copy()
683 rpage->zone_device_data = dmirror; in dmirror_migrate_alloc_and_copy()
689 (!spage && args->vma->vm_flags & VM_WRITE)) in dmirror_migrate_alloc_and_copy()
702 entry = xa_load(&dmirror->pt, pfn); in dmirror_check_atomic()
704 return -EPERM; in dmirror_check_atomic()
710 static int dmirror_atomic_map(unsigned long addr, struct page *page, in dmirror_atomic_map() argument
715 /* Map the migrated pages into the device's page tables. */ in dmirror_atomic_map()
716 mutex_lock(&dmirror->mutex); in dmirror_atomic_map()
719 entry = xa_store(&dmirror->pt, addr >> PAGE_SHIFT, entry, GFP_ATOMIC); in dmirror_atomic_map()
721 mutex_unlock(&dmirror->mutex); in dmirror_atomic_map()
725 mutex_unlock(&dmirror->mutex); in dmirror_atomic_map()
732 unsigned long start = args->start; in dmirror_migrate_finalize_and_map()
733 unsigned long end = args->end; in dmirror_migrate_finalize_and_map()
734 const unsigned long *src = args->src; in dmirror_migrate_finalize_and_map()
735 const unsigned long *dst = args->dst; in dmirror_migrate_finalize_and_map()
738 /* Map the migrated pages into the device's page tables. */ in dmirror_migrate_finalize_and_map()
739 mutex_lock(&dmirror->mutex); in dmirror_migrate_finalize_and_map()
756 entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC); in dmirror_migrate_finalize_and_map()
758 mutex_unlock(&dmirror->mutex); in dmirror_migrate_finalize_and_map()
763 mutex_unlock(&dmirror->mutex); in dmirror_migrate_finalize_and_map()
770 unsigned long start, end, addr; in dmirror_exclusive() local
771 unsigned long size = cmd->npages << PAGE_SHIFT; in dmirror_exclusive()
772 struct mm_struct *mm = dmirror->notifier.mm; in dmirror_exclusive()
776 start = cmd->addr; in dmirror_exclusive()
779 return -EINVAL; in dmirror_exclusive()
783 return -EINVAL; in dmirror_exclusive()
786 for (addr = start; !ret && addr < end; addr += PAGE_SIZE) { in dmirror_exclusive()
790 page = make_device_exclusive(mm, addr, NULL, &folio); in dmirror_exclusive()
796 ret = dmirror_atomic_map(addr, page, dmirror); in dmirror_exclusive()
810 mutex_lock(&dmirror->mutex); in dmirror_exclusive()
812 mutex_unlock(&dmirror->mutex); in dmirror_exclusive()
814 if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr, in dmirror_exclusive()
816 ret = -EFAULT; in dmirror_exclusive()
819 cmd->cpages = bounce.cpages; in dmirror_exclusive()
827 const unsigned long *src = args->src; in dmirror_devmem_fault_alloc_and_copy()
828 unsigned long *dst = args->dst; in dmirror_devmem_fault_alloc_and_copy()
829 unsigned long start = args->start; in dmirror_devmem_fault_alloc_and_copy()
830 unsigned long end = args->end; in dmirror_devmem_fault_alloc_and_copy()
831 unsigned long addr; in dmirror_devmem_fault_alloc_and_copy() local
833 for (addr = start; addr < end; addr += PAGE_SIZE, in dmirror_devmem_fault_alloc_and_copy()
845 dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr); in dmirror_devmem_fault_alloc_and_copy()
852 xa_erase(&dmirror->pt, addr >> PAGE_SHIFT); in dmirror_devmem_fault_alloc_and_copy()
867 for (i = 0; i < migrate->npages; i++) { in dmirror_successful_migrated_pages()
868 if (migrate->src[i] & MIGRATE_PFN_VALID && in dmirror_successful_migrated_pages()
869 migrate->src[i] & MIGRATE_PFN_MIGRATE) in dmirror_successful_migrated_pages()
878 unsigned long start, end, addr; in dmirror_migrate_to_system() local
879 unsigned long size = cmd->npages << PAGE_SHIFT; in dmirror_migrate_to_system()
880 struct mm_struct *mm = dmirror->notifier.mm; in dmirror_migrate_to_system()
888 start = cmd->addr; in dmirror_migrate_to_system()
891 return -EINVAL; in dmirror_migrate_to_system()
895 return -EINVAL; in dmirror_migrate_to_system()
897 cmd->cpages = 0; in dmirror_migrate_to_system()
899 for (addr = start; addr < end; addr = next) { in dmirror_migrate_to_system()
900 vma = vma_lookup(mm, addr); in dmirror_migrate_to_system()
901 if (!vma || !(vma->vm_flags & VM_READ)) { in dmirror_migrate_to_system()
902 ret = -EINVAL; in dmirror_migrate_to_system()
905 next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT)); in dmirror_migrate_to_system()
906 if (next > vma->vm_end) in dmirror_migrate_to_system()
907 next = vma->vm_end; in dmirror_migrate_to_system()
912 args.start = addr; in dmirror_migrate_to_system()
914 args.pgmap_owner = dmirror->mdevice; in dmirror_migrate_to_system()
921 pr_debug("Migrating from device mem to sys mem\n"); in dmirror_migrate_to_system()
925 cmd->cpages += dmirror_successful_migrated_pages(&args); in dmirror_migrate_to_system()
938 unsigned long start, end, addr; in dmirror_migrate_to_device() local
939 unsigned long size = cmd->npages << PAGE_SHIFT; in dmirror_migrate_to_device()
940 struct mm_struct *mm = dmirror->notifier.mm; in dmirror_migrate_to_device()
949 start = cmd->addr; in dmirror_migrate_to_device()
952 return -EINVAL; in dmirror_migrate_to_device()
956 return -EINVAL; in dmirror_migrate_to_device()
959 for (addr = start; addr < end; addr = next) { in dmirror_migrate_to_device()
960 vma = vma_lookup(mm, addr); in dmirror_migrate_to_device()
961 if (!vma || !(vma->vm_flags & VM_READ)) { in dmirror_migrate_to_device()
962 ret = -EINVAL; in dmirror_migrate_to_device()
965 next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT)); in dmirror_migrate_to_device()
966 if (next > vma->vm_end) in dmirror_migrate_to_device()
967 next = vma->vm_end; in dmirror_migrate_to_device()
972 args.start = addr; in dmirror_migrate_to_device()
974 args.pgmap_owner = dmirror->mdevice; in dmirror_migrate_to_device()
980 pr_debug("Migrating from sys mem to device mem\n"); in dmirror_migrate_to_device()
991 * Only for pages in device zone in dmirror_migrate_to_device()
996 mutex_lock(&dmirror->mutex); in dmirror_migrate_to_device()
998 mutex_unlock(&dmirror->mutex); in dmirror_migrate_to_device()
1000 if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr, in dmirror_migrate_to_device()
1002 ret = -EFAULT; in dmirror_migrate_to_device()
1004 cmd->cpages = bounce.cpages; in dmirror_migrate_to_device()
1030 /* Is the page migrated to this device or some other? */ in dmirror_mkentry()
1031 if (dmirror->mdevice == dmirror_page_to_device(page)) in dmirror_mkentry()
1036 /* Is the page migrated to this device or some other? */ in dmirror_mkentry()
1037 if (dmirror->mdevice == dmirror_page_to_device(page)) in dmirror_mkentry()
1061 struct dmirror *dmirror = dmi->dmirror; in dmirror_snapshot_invalidate()
1064 mutex_lock(&dmirror->mutex); in dmirror_snapshot_invalidate()
1065 else if (!mutex_trylock(&dmirror->mutex)) in dmirror_snapshot_invalidate()
1074 mutex_unlock(&dmirror->mutex); in dmirror_snapshot_invalidate()
1086 struct mm_struct *mm = dmirror->notifier.mm; in dmirror_range_snapshot()
1095 range->notifier = ¬ifier.notifier; in dmirror_range_snapshot()
1097 ret = mmu_interval_notifier_insert(range->notifier, mm, in dmirror_range_snapshot()
1098 range->start, range->end - range->start, in dmirror_range_snapshot()
1105 ret = -EBUSY; in dmirror_range_snapshot()
1109 range->notifier_seq = mmu_interval_read_begin(range->notifier); in dmirror_range_snapshot()
1115 if (ret == -EBUSY) in dmirror_range_snapshot()
1120 mutex_lock(&dmirror->mutex); in dmirror_range_snapshot()
1121 if (mmu_interval_read_retry(range->notifier, in dmirror_range_snapshot()
1122 range->notifier_seq)) { in dmirror_range_snapshot()
1123 mutex_unlock(&dmirror->mutex); in dmirror_range_snapshot()
1129 n = (range->end - range->start) >> PAGE_SHIFT; in dmirror_range_snapshot()
1131 dmirror_mkentry(dmirror, range, perm + i, range->hmm_pfns[i]); in dmirror_range_snapshot()
1133 mutex_unlock(&dmirror->mutex); in dmirror_range_snapshot()
1135 mmu_interval_notifier_remove(range->notifier); in dmirror_range_snapshot()
1142 struct mm_struct *mm = dmirror->notifier.mm; in dmirror_snapshot()
1144 unsigned long size = cmd->npages << PAGE_SHIFT; in dmirror_snapshot()
1145 unsigned long addr; in dmirror_snapshot() local
1152 .dev_private_owner = dmirror->mdevice, in dmirror_snapshot()
1156 start = cmd->addr; in dmirror_snapshot()
1159 return -EINVAL; in dmirror_snapshot()
1163 return -EINVAL; in dmirror_snapshot()
1169 uptr = u64_to_user_ptr(cmd->ptr); in dmirror_snapshot()
1170 for (addr = start; addr < end; addr = next) { in dmirror_snapshot()
1173 next = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end); in dmirror_snapshot()
1174 range.start = addr; in dmirror_snapshot()
1181 n = (range.end - range.start) >> PAGE_SHIFT; in dmirror_snapshot()
1183 ret = -EFAULT; in dmirror_snapshot()
1187 cmd->cpages += n; in dmirror_snapshot()
1197 unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT; in dmirror_device_evict_chunk()
1198 unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT; in dmirror_device_evict_chunk()
1199 unsigned long npages = end_pfn - start_pfn + 1; in dmirror_device_evict_chunk()
1232 /* Removes free pages from the free list so they can't be re-allocated */
1235 struct dmirror_device *mdevice = devmem->mdevice; in dmirror_remove_free_pages()
1238 for (page = mdevice->free_pages; page; page = page->zone_device_data) in dmirror_remove_free_pages()
1240 mdevice->free_pages = page->zone_device_data; in dmirror_remove_free_pages()
1247 mutex_lock(&mdevice->devmem_lock); in dmirror_device_remove_chunks()
1248 if (mdevice->devmem_chunks) { in dmirror_device_remove_chunks()
1249 for (i = 0; i < mdevice->devmem_count; i++) { in dmirror_device_remove_chunks()
1251 mdevice->devmem_chunks[i]; in dmirror_device_remove_chunks()
1253 spin_lock(&mdevice->lock); in dmirror_device_remove_chunks()
1254 devmem->remove = true; in dmirror_device_remove_chunks()
1256 spin_unlock(&mdevice->lock); in dmirror_device_remove_chunks()
1259 memunmap_pages(&devmem->pagemap); in dmirror_device_remove_chunks()
1260 if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE) in dmirror_device_remove_chunks()
1261 release_mem_region(devmem->pagemap.range.start, in dmirror_device_remove_chunks()
1262 range_len(&devmem->pagemap.range)); in dmirror_device_remove_chunks()
1265 mdevice->devmem_count = 0; in dmirror_device_remove_chunks()
1266 mdevice->devmem_capacity = 0; in dmirror_device_remove_chunks()
1267 mdevice->free_pages = NULL; in dmirror_device_remove_chunks()
1268 kfree(mdevice->devmem_chunks); in dmirror_device_remove_chunks()
1269 mdevice->devmem_chunks = NULL; in dmirror_device_remove_chunks()
1271 mutex_unlock(&mdevice->devmem_lock); in dmirror_device_remove_chunks()
1283 dmirror = filp->private_data; in dmirror_fops_unlocked_ioctl()
1285 return -EINVAL; in dmirror_fops_unlocked_ioctl()
1288 return -EFAULT; in dmirror_fops_unlocked_ioctl()
1290 if (cmd.addr & ~PAGE_MASK) in dmirror_fops_unlocked_ioctl()
1291 return -EINVAL; in dmirror_fops_unlocked_ioctl()
1292 if (cmd.addr >= (cmd.addr + (cmd.npages << PAGE_SHIFT))) in dmirror_fops_unlocked_ioctl()
1293 return -EINVAL; in dmirror_fops_unlocked_ioctl()
1320 ret = dmirror_check_atomic(dmirror, cmd.addr, in dmirror_fops_unlocked_ioctl()
1321 cmd.addr + (cmd.npages << PAGE_SHIFT)); in dmirror_fops_unlocked_ioctl()
1329 dmirror_device_remove_chunks(dmirror->mdevice); in dmirror_fops_unlocked_ioctl()
1334 return -EINVAL; in dmirror_fops_unlocked_ioctl()
1340 return -EFAULT; in dmirror_fops_unlocked_ioctl()
1347 unsigned long addr; in dmirror_fops_mmap() local
1349 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { in dmirror_fops_mmap()
1355 return -ENOMEM; in dmirror_fops_mmap()
1357 ret = vm_insert_page(vma, addr, page); in dmirror_fops_mmap()
1386 spin_lock(&mdevice->lock); in dmirror_devmem_free()
1389 if (!dmirror_page_to_chunk(page)->remove) { in dmirror_devmem_free()
1390 mdevice->cfree++; in dmirror_devmem_free()
1391 page->zone_device_data = mdevice->free_pages; in dmirror_devmem_free()
1392 mdevice->free_pages = page; in dmirror_devmem_free()
1394 spin_unlock(&mdevice->lock); in dmirror_devmem_free()
1407 * Normally, a device would use the page->zone_device_data to point to in dmirror_devmem_fault()
1409 * device memory and that page holds the pointer to the mirror. in dmirror_devmem_fault()
1411 rpage = vmf->page->zone_device_data; in dmirror_devmem_fault()
1412 dmirror = rpage->zone_device_data; in dmirror_devmem_fault()
1415 args.vma = vmf->vma; in dmirror_devmem_fault()
1416 args.start = vmf->address; in dmirror_devmem_fault()
1420 args.pgmap_owner = dmirror->mdevice; in dmirror_devmem_fault()
1422 args.fault_page = vmf->page; in dmirror_devmem_fault()
1432 * No device finalize step is needed since in dmirror_devmem_fault()
1434 * invalidated the device page table. in dmirror_devmem_fault()
1451 mutex_init(&mdevice->devmem_lock); in dmirror_device_init()
1452 spin_lock_init(&mdevice->lock); in dmirror_device_init()
1454 cdev_init(&mdevice->cdevice, &dmirror_fops); in dmirror_device_init()
1455 mdevice->cdevice.owner = THIS_MODULE; in dmirror_device_init()
1456 device_initialize(&mdevice->device); in dmirror_device_init()
1457 mdevice->device.devt = dev; in dmirror_device_init()
1459 ret = dev_set_name(&mdevice->device, "hmm_dmirror%u", id); in dmirror_device_init()
1463 ret = cdev_device_add(&mdevice->cdevice, &mdevice->device); in dmirror_device_init()
1474 cdev_device_del(&mdevice->cdevice, &mdevice->device); in dmirror_device_remove()
1509 while (--id >= 0) in hmm_dmirror_init()