Lines Matching +full:iommu +full:- +full:ctx

1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
9 #include <linux/dma-mapping.h>
28 * for GPU-accelerated applications by allowing memory sharing and
33 * - Notifiers:
36 * recommendation of 512M or larger. They maintain a Red-BlacK tree and a
38 * tracked within a GPU SVM Red-BlacK tree and list and are dynamically
41 * - Ranges:
48 * event. As mentioned above, ranges are tracked in a notifier's Red-Black
51 * - Operations:
52 * Define the interface for driver-specific GPU SVM operations such as
55 * - Device Memory Allocations:
59 * - Device Memory Operations:
60 * Define the interface for driver-specific device memory operations
66 * driver-specific virtual functions (vfuncs). This infrastructure is sufficient
72 * - GPU page fault handler:
76 * - Garbage collector:
81 * - Notifier callback:
96 * This lock corresponds to the ``driver->update`` lock mentioned in
98 * global lock to a per-notifier lock if finer-grained locking is deemed
119 * complicated. Given that partial unmappings are rare and driver-defined range
137 * potentially required driver locking (e.g., DMA-resv locks).
141 * .. code-block:: c
153 * err = -EAGAIN;
162 * struct drm_gpusvm_ctx ctx = {};
172 * &ctx);
180 * gpuva_start, gpuva_end, gpusvm->mm,
181 * ctx->timeslice_ms);
186 * err = drm_gpusvm_range_get_pages(gpusvm, range, &ctx);
187 * if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) { // CPU mappings changed
188 * if (err == -EOPNOTSUPP)
196 * if (err == -EAGAIN) // CPU mappings changed
206 * .. code-block:: c
214 * if (range->flags.partial_unmap)
231 * .. code-block:: c
237 * struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
240 * driver_invalidate_device_pages(gpusvm, mmu_range->start, mmu_range->end);
242 * drm_gpusvm_for_each_range(range, notifier, mmu_range->start,
243 * mmu_range->end) {
244 * drm_gpusvm_range_unmap_pages(gpusvm, range, &ctx);
246 * if (mmu_range->event != MMU_NOTIFY_UNMAP)
256 * npages_in_range() - Calculate the number of pages in a given range
270 return (end - start) >> PAGE_SHIFT; in npages_in_range()
274 * drm_gpusvm_notifier_find() - Find GPU SVM notifier from GPU SVM
287 itree = interval_tree_iter_first(&gpusvm->root, start, end - 1); in drm_gpusvm_notifier_find()
297 * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
310 itree = interval_tree_iter_first(&notifier->root, start, end - 1); in drm_gpusvm_range_find()
320 * drm_gpusvm_notifier_invalidate() - Invalidate a GPU SVM notifier.
327 * gpusvm->notifier_lock.
338 struct drm_gpusvm *gpusvm = notifier->gpusvm; in drm_gpusvm_notifier_invalidate()
343 down_write(&gpusvm->notifier_lock); in drm_gpusvm_notifier_invalidate()
345 gpusvm->ops->invalidate(gpusvm, notifier, mmu_range); in drm_gpusvm_notifier_invalidate()
346 up_write(&gpusvm->notifier_lock); in drm_gpusvm_notifier_invalidate()
352 * drm_gpusvm_notifier_ops - MMU interval notifier operations for GPU SVM
359 * drm_gpusvm_init() - Initialize the GPU SVM.
392 if (!ops->invalidate || !num_chunks) in drm_gpusvm_init()
393 return -EINVAL; in drm_gpusvm_init()
398 return -EINVAL; in drm_gpusvm_init()
401 gpusvm->name = name; in drm_gpusvm_init()
402 gpusvm->drm = drm; in drm_gpusvm_init()
403 gpusvm->mm = mm; in drm_gpusvm_init()
404 gpusvm->mm_start = mm_start; in drm_gpusvm_init()
405 gpusvm->mm_range = mm_range; in drm_gpusvm_init()
406 gpusvm->notifier_size = notifier_size; in drm_gpusvm_init()
407 gpusvm->ops = ops; in drm_gpusvm_init()
408 gpusvm->chunk_sizes = chunk_sizes; in drm_gpusvm_init()
409 gpusvm->num_chunks = num_chunks; in drm_gpusvm_init()
411 gpusvm->root = RB_ROOT_CACHED; in drm_gpusvm_init()
412 INIT_LIST_HEAD(&gpusvm->notifier_list); in drm_gpusvm_init()
414 init_rwsem(&gpusvm->notifier_lock); in drm_gpusvm_init()
417 might_lock(&gpusvm->notifier_lock); in drm_gpusvm_init()
421 gpusvm->lock_dep_map = NULL; in drm_gpusvm_init()
429 * to_drm_gpusvm_notifier() - retrieve the container struct for a given rbtree node
440 * drm_gpusvm_notifier_insert() - Insert GPU SVM notifier
452 interval_tree_insert(&notifier->itree, &gpusvm->root); in drm_gpusvm_notifier_insert()
454 node = rb_prev(&notifier->itree.rb); in drm_gpusvm_notifier_insert()
456 head = &(to_drm_gpusvm_notifier(node))->entry; in drm_gpusvm_notifier_insert()
458 head = &gpusvm->notifier_list; in drm_gpusvm_notifier_insert()
460 list_add(&notifier->entry, head); in drm_gpusvm_notifier_insert()
464 * drm_gpusvm_notifier_remove() - Remove GPU SVM notifier
473 interval_tree_remove(&notifier->itree, &gpusvm->root); in drm_gpusvm_notifier_remove()
474 list_del(&notifier->entry); in drm_gpusvm_notifier_remove()
478 * drm_gpusvm_fini() - Finalize the GPU SVM.
494 mmu_interval_notifier_remove(&notifier->notifier); in drm_gpusvm_fini()
495 notifier->flags.removed = true; in drm_gpusvm_fini()
502 if (gpusvm->mm) in drm_gpusvm_fini()
503 mmdrop(gpusvm->mm); in drm_gpusvm_fini()
504 WARN_ON(!RB_EMPTY_ROOT(&gpusvm->root.rb_root)); in drm_gpusvm_fini()
509 * drm_gpusvm_notifier_alloc() - Allocate GPU SVM notifier
522 if (gpusvm->ops->notifier_alloc) in drm_gpusvm_notifier_alloc()
523 notifier = gpusvm->ops->notifier_alloc(); in drm_gpusvm_notifier_alloc()
528 return ERR_PTR(-ENOMEM); in drm_gpusvm_notifier_alloc()
530 notifier->gpusvm = gpusvm; in drm_gpusvm_notifier_alloc()
531 notifier->itree.start = ALIGN_DOWN(fault_addr, gpusvm->notifier_size); in drm_gpusvm_notifier_alloc()
532 notifier->itree.last = ALIGN(fault_addr + 1, gpusvm->notifier_size) - 1; in drm_gpusvm_notifier_alloc()
533 INIT_LIST_HEAD(&notifier->entry); in drm_gpusvm_notifier_alloc()
534 notifier->root = RB_ROOT_CACHED; in drm_gpusvm_notifier_alloc()
535 INIT_LIST_HEAD(&notifier->range_list); in drm_gpusvm_notifier_alloc()
541 * drm_gpusvm_notifier_free() - Free GPU SVM notifier
550 WARN_ON(!RB_EMPTY_ROOT(&notifier->root.rb_root)); in drm_gpusvm_notifier_free()
552 if (gpusvm->ops->notifier_free) in drm_gpusvm_notifier_free()
553 gpusvm->ops->notifier_free(notifier); in drm_gpusvm_notifier_free()
559 * to_drm_gpusvm_range() - retrieve the container struct for a given rbtree node
570 * drm_gpusvm_range_insert() - Insert GPU SVM range
582 drm_gpusvm_notifier_lock(notifier->gpusvm); in drm_gpusvm_range_insert()
583 interval_tree_insert(&range->itree, &notifier->root); in drm_gpusvm_range_insert()
585 node = rb_prev(&range->itree.rb); in drm_gpusvm_range_insert()
587 head = &(to_drm_gpusvm_range(node))->entry; in drm_gpusvm_range_insert()
589 head = &notifier->range_list; in drm_gpusvm_range_insert()
591 list_add(&range->entry, head); in drm_gpusvm_range_insert()
592 drm_gpusvm_notifier_unlock(notifier->gpusvm); in drm_gpusvm_range_insert()
596 * __drm_gpusvm_range_remove() - Remove GPU SVM range
605 interval_tree_remove(&range->itree, &notifier->root); in __drm_gpusvm_range_remove()
606 list_del(&range->entry); in __drm_gpusvm_range_remove()
610 * drm_gpusvm_range_alloc() - Allocate GPU SVM range
629 if (gpusvm->ops->range_alloc) in drm_gpusvm_range_alloc()
630 range = gpusvm->ops->range_alloc(gpusvm); in drm_gpusvm_range_alloc()
635 return ERR_PTR(-ENOMEM); in drm_gpusvm_range_alloc()
637 kref_init(&range->refcount); in drm_gpusvm_range_alloc()
638 range->gpusvm = gpusvm; in drm_gpusvm_range_alloc()
639 range->notifier = notifier; in drm_gpusvm_range_alloc()
640 range->itree.start = ALIGN_DOWN(fault_addr, chunk_size); in drm_gpusvm_range_alloc()
641 range->itree.last = ALIGN(fault_addr + 1, chunk_size) - 1; in drm_gpusvm_range_alloc()
642 INIT_LIST_HEAD(&range->entry); in drm_gpusvm_range_alloc()
643 range->pages.notifier_seq = LONG_MAX; in drm_gpusvm_range_alloc()
644 range->pages.flags.migrate_devmem = migrate_devmem ? 1 : 0; in drm_gpusvm_range_alloc()
650 * drm_gpusvm_hmm_pfn_to_order() - Get the largest CPU mapping order.
670 size -= (hmm_pfn & ~HMM_PFN_FLAGS) & (size - 1); in drm_gpusvm_hmm_pfn_to_order()
673 size -= (hmm_pfn_index - npages); in drm_gpusvm_hmm_pfn_to_order()
679 * drm_gpusvm_check_pages() - Check pages
698 .notifier = &notifier->notifier, in drm_gpusvm_check_pages()
709 mmap_assert_locked(gpusvm->mm); in drm_gpusvm_check_pages()
715 hmm_range.notifier_seq = mmu_interval_read_begin(&notifier->notifier); in drm_gpusvm_check_pages()
720 if (err == -EBUSY) { in drm_gpusvm_check_pages()
725 mmu_interval_read_begin(&notifier->notifier); in drm_gpusvm_check_pages()
735 err = -EFAULT; in drm_gpusvm_check_pages()
747 * drm_gpusvm_range_chunk_size() - Determine chunk size for GPU SVM range
777 for (; i < gpusvm->num_chunks; ++i) { in drm_gpusvm_range_chunk_size()
778 start = ALIGN_DOWN(fault_addr, gpusvm->chunk_sizes[i]); in drm_gpusvm_range_chunk_size()
779 end = ALIGN(fault_addr + 1, gpusvm->chunk_sizes[i]); in drm_gpusvm_range_chunk_size()
781 if (start >= vas->vm_start && end <= vas->vm_end && in drm_gpusvm_range_chunk_size()
788 if (i == gpusvm->num_chunks) in drm_gpusvm_range_chunk_size()
795 if (end - start != SZ_4K) { in drm_gpusvm_range_chunk_size()
806 * this check, or prefault, on BMG 'xe_exec_system_allocator --r in drm_gpusvm_range_chunk_size()
807 * process-many-malloc' fails. In the failure case, each process in drm_gpusvm_range_chunk_size()
814 * problem goes away if 'xe_exec_system_allocator --r in drm_gpusvm_range_chunk_size()
815 * process-many-malloc' mallocs at least 64k at a time. in drm_gpusvm_range_chunk_size()
817 if (end - start <= check_pages_threshold && in drm_gpusvm_range_chunk_size()
824 return end - start; in drm_gpusvm_range_chunk_size()
829 * drm_gpusvm_driver_lock_held() - Assert GPU SVM driver lock is held
836 if ((gpusvm)->lock_dep_map) in drm_gpusvm_driver_lock_held()
837 lockdep_assert(lock_is_held_type((gpusvm)->lock_dep_map, 0)); in drm_gpusvm_driver_lock_held()
846 * drm_gpusvm_find_vma_start() - Find start address for first VMA in range
859 struct mm_struct *mm = gpusvm->mm; in drm_gpusvm_find_vma_start()
870 addr = vma->vm_start; in drm_gpusvm_find_vma_start()
880 * drm_gpusvm_range_find_or_insert() - Find or insert GPU SVM range
885 * @ctx: GPU SVM context
897 const struct drm_gpusvm_ctx *ctx) in drm_gpusvm_range_find_or_insert() argument
901 struct mm_struct *mm = gpusvm->mm; in drm_gpusvm_range_find_or_insert()
910 if (fault_addr < gpusvm->mm_start || in drm_gpusvm_range_find_or_insert()
911 fault_addr > gpusvm->mm_start + gpusvm->mm_range) in drm_gpusvm_range_find_or_insert()
912 return ERR_PTR(-EINVAL); in drm_gpusvm_range_find_or_insert()
915 return ERR_PTR(-EFAULT); in drm_gpusvm_range_find_or_insert()
925 err = mmu_interval_notifier_insert(&notifier->notifier, in drm_gpusvm_range_find_or_insert()
938 err = -ENOENT; in drm_gpusvm_range_find_or_insert()
942 if (!ctx->read_only && !(vas->vm_flags & VM_WRITE)) { in drm_gpusvm_range_find_or_insert()
943 err = -EPERM; in drm_gpusvm_range_find_or_insert()
951 * XXX: Short-circuiting migration based on migrate_vma_* current in drm_gpusvm_range_find_or_insert()
955 migrate_devmem = ctx->devmem_possible && in drm_gpusvm_range_find_or_insert()
961 ctx->check_pages_threshold, in drm_gpusvm_range_find_or_insert()
962 ctx->device_private_page_owner); in drm_gpusvm_range_find_or_insert()
964 err = -EINVAL; in drm_gpusvm_range_find_or_insert()
988 mmu_interval_notifier_remove(&notifier->notifier); in drm_gpusvm_range_find_or_insert()
999 * __drm_gpusvm_unmap_pages() - Unmap pages associated with GPU SVM pages (internal)
1011 struct drm_pagemap *dpagemap = svm_pages->dpagemap; in __drm_gpusvm_unmap_pages()
1012 struct device *dev = gpusvm->drm->dev; in __drm_gpusvm_unmap_pages()
1015 lockdep_assert_held(&gpusvm->notifier_lock); in __drm_gpusvm_unmap_pages()
1017 if (svm_pages->flags.has_dma_mapping) { in __drm_gpusvm_unmap_pages()
1019 .__flags = svm_pages->flags.__flags, in __drm_gpusvm_unmap_pages()
1023 struct drm_pagemap_addr *addr = &svm_pages->dma_addr[j]; in __drm_gpusvm_unmap_pages()
1025 if (addr->proto == DRM_INTERCONNECT_SYSTEM) in __drm_gpusvm_unmap_pages()
1027 addr->addr, in __drm_gpusvm_unmap_pages()
1028 PAGE_SIZE << addr->order, in __drm_gpusvm_unmap_pages()
1029 addr->dir); in __drm_gpusvm_unmap_pages()
1030 else if (dpagemap && dpagemap->ops->device_unmap) in __drm_gpusvm_unmap_pages()
1031 dpagemap->ops->device_unmap(dpagemap, in __drm_gpusvm_unmap_pages()
1033 i += 1 << addr->order; in __drm_gpusvm_unmap_pages()
1039 WRITE_ONCE(svm_pages->flags.__flags, flags.__flags); in __drm_gpusvm_unmap_pages()
1041 svm_pages->dpagemap = NULL; in __drm_gpusvm_unmap_pages()
1046 * __drm_gpusvm_free_pages() - Free dma array associated with GPU SVM pages
1055 lockdep_assert_held(&gpusvm->notifier_lock); in __drm_gpusvm_free_pages()
1057 if (svm_pages->dma_addr) { in __drm_gpusvm_free_pages()
1058 kvfree(svm_pages->dma_addr); in __drm_gpusvm_free_pages()
1059 svm_pages->dma_addr = NULL; in __drm_gpusvm_free_pages()
1064 * drm_gpusvm_free_pages() - Free dma-mapping associated with GPU SVM pages
1085 * drm_gpusvm_range_remove() - Remove GPU SVM range
1109 __drm_gpusvm_unmap_pages(gpusvm, &range->pages, npages); in drm_gpusvm_range_remove()
1110 __drm_gpusvm_free_pages(gpusvm, &range->pages); in drm_gpusvm_range_remove()
1116 if (RB_EMPTY_ROOT(&notifier->root.rb_root)) { in drm_gpusvm_range_remove()
1117 if (!notifier->flags.removed) in drm_gpusvm_range_remove()
1118 mmu_interval_notifier_remove(&notifier->notifier); in drm_gpusvm_range_remove()
1126 * drm_gpusvm_range_get() - Get a reference to GPU SVM range
1136 kref_get(&range->refcount); in drm_gpusvm_range_get()
1143 * drm_gpusvm_range_destroy() - Destroy GPU SVM range
1147 * reaches zero. If a custom range-free function is provided, it is invoked to
1154 struct drm_gpusvm *gpusvm = range->gpusvm; in drm_gpusvm_range_destroy()
1156 if (gpusvm->ops->range_free) in drm_gpusvm_range_destroy()
1157 gpusvm->ops->range_free(range); in drm_gpusvm_range_destroy()
1163 * drm_gpusvm_range_put() - Put a reference to GPU SVM range
1171 kref_put(&range->refcount, drm_gpusvm_range_destroy); in drm_gpusvm_range_put()
1176 * drm_gpusvm_pages_valid() - GPU SVM range pages valid
1181 * called holding gpusvm->notifier_lock and as the last step before committing a
1192 lockdep_assert_held(&gpusvm->notifier_lock); in drm_gpusvm_pages_valid()
1194 return svm_pages->flags.has_devmem_pages || svm_pages->flags.has_dma_mapping; in drm_gpusvm_pages_valid()
1198 * drm_gpusvm_range_pages_valid() - GPU SVM range pages valid
1203 * called holding gpusvm->notifier_lock and as the last step before committing a
1214 return drm_gpusvm_pages_valid(gpusvm, &range->pages); in drm_gpusvm_range_pages_valid()
1219 * drm_gpusvm_range_pages_valid_unlocked() - GPU SVM range pages valid unlocked
1224 * called without holding gpusvm->notifier_lock.
1233 if (!svm_pages->dma_addr) in drm_gpusvm_pages_valid_unlocked()
1246 * drm_gpusvm_get_pages() - Get pages and populate GPU SVM pages struct
1248 * @svm_pages: The SVM pages to populate. This will contain the dma-addresses
1253 * @ctx: GPU SVM context
1265 const struct drm_gpusvm_ctx *ctx) in drm_gpusvm_get_pages() argument
1268 .default_flags = HMM_PFN_REQ_FAULT | (ctx->read_only ? 0 : in drm_gpusvm_get_pages()
1273 .dev_private_owner = ctx->device_private_page_owner, in drm_gpusvm_get_pages()
1287 enum dma_data_direction dma_dir = ctx->read_only ? DMA_TO_DEVICE : in drm_gpusvm_get_pages()
1297 return -ENOMEM; in drm_gpusvm_get_pages()
1300 err = -EFAULT; in drm_gpusvm_get_pages()
1310 if (err == -EBUSY) { in drm_gpusvm_get_pages()
1332 flags.__flags = svm_pages->flags.__flags; in drm_gpusvm_get_pages()
1335 err = -EFAULT; in drm_gpusvm_get_pages()
1345 if (!svm_pages->dma_addr) { in drm_gpusvm_get_pages()
1348 svm_pages->dma_addr = in drm_gpusvm_get_pages()
1349 kvmalloc_array(npages, sizeof(*svm_pages->dma_addr), GFP_KERNEL); in drm_gpusvm_get_pages()
1350 if (!svm_pages->dma_addr) { in drm_gpusvm_get_pages()
1351 err = -ENOMEM; in drm_gpusvm_get_pages()
1366 if (zdd != page->zone_device_data && i > 0) { in drm_gpusvm_get_pages()
1367 err = -EOPNOTSUPP; in drm_gpusvm_get_pages()
1370 zdd = page->zone_device_data; in drm_gpusvm_get_pages()
1373 err = -EOPNOTSUPP; in drm_gpusvm_get_pages()
1379 if (drm_WARN_ON(gpusvm->drm, !dpagemap)) { in drm_gpusvm_get_pages()
1385 err = -EAGAIN; in drm_gpusvm_get_pages()
1389 svm_pages->dma_addr[j] = in drm_gpusvm_get_pages()
1390 dpagemap->ops->device_map(dpagemap, in drm_gpusvm_get_pages()
1391 gpusvm->drm->dev, in drm_gpusvm_get_pages()
1394 if (dma_mapping_error(gpusvm->drm->dev, in drm_gpusvm_get_pages()
1395 svm_pages->dma_addr[j].addr)) { in drm_gpusvm_get_pages()
1396 err = -EFAULT; in drm_gpusvm_get_pages()
1403 err = -EOPNOTSUPP; in drm_gpusvm_get_pages()
1407 if (ctx->devmem_only) { in drm_gpusvm_get_pages()
1408 err = -EFAULT; in drm_gpusvm_get_pages()
1412 addr = dma_map_page(gpusvm->drm->dev, in drm_gpusvm_get_pages()
1416 if (dma_mapping_error(gpusvm->drm->dev, addr)) { in drm_gpusvm_get_pages()
1417 err = -EFAULT; in drm_gpusvm_get_pages()
1421 svm_pages->dma_addr[j] = drm_pagemap_addr_encode in drm_gpusvm_get_pages()
1432 svm_pages->dpagemap = dpagemap; in drm_gpusvm_get_pages()
1436 WRITE_ONCE(svm_pages->flags.__flags, flags.__flags); in drm_gpusvm_get_pages()
1441 svm_pages->notifier_seq = hmm_range.notifier_seq; in drm_gpusvm_get_pages()
1450 if (err == -EAGAIN) in drm_gpusvm_get_pages()
1457 * drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range
1460 * @ctx: GPU SVM context
1469 const struct drm_gpusvm_ctx *ctx) in drm_gpusvm_range_get_pages() argument
1471 return drm_gpusvm_get_pages(gpusvm, &range->pages, gpusvm->mm, in drm_gpusvm_range_get_pages()
1472 &range->notifier->notifier, in drm_gpusvm_range_get_pages()
1474 drm_gpusvm_range_end(range), ctx); in drm_gpusvm_range_get_pages()
1479 * drm_gpusvm_unmap_pages() - Unmap GPU svm pages
1483 * @ctx: GPU SVM context
1486 * @in_notifier is set, it is assumed that gpusvm->notifier_lock is held in
1487 * write mode; if it is clear, it acquires gpusvm->notifier_lock in read mode.
1489 * IOMMU security model.
1494 const struct drm_gpusvm_ctx *ctx) in drm_gpusvm_unmap_pages() argument
1496 if (ctx->in_notifier) in drm_gpusvm_unmap_pages()
1497 lockdep_assert_held_write(&gpusvm->notifier_lock); in drm_gpusvm_unmap_pages()
1503 if (!ctx->in_notifier) in drm_gpusvm_unmap_pages()
1509 * drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range
1512 * @ctx: GPU SVM context
1515 * is set, it is assumed that gpusvm->notifier_lock is held in write mode; if it
1516 * is clear, it acquires gpusvm->notifier_lock in read mode. Must be called on
1517 * each GPU SVM range attached to notifier in gpusvm->ops->invalidate for IOMMU
1522 const struct drm_gpusvm_ctx *ctx) in drm_gpusvm_range_unmap_pages() argument
1527 return drm_gpusvm_unmap_pages(gpusvm, &range->pages, npages, ctx); in drm_gpusvm_range_unmap_pages()
1532 * drm_gpusvm_range_evict() - Evict GPU SVM range
1543 struct mmu_interval_notifier *notifier = &range->notifier->notifier; in drm_gpusvm_range_evict()
1557 struct mm_struct *mm = gpusvm->mm; in drm_gpusvm_range_evict()
1560 return -EFAULT; in drm_gpusvm_range_evict()
1564 return -ENOMEM; in drm_gpusvm_range_evict()
1570 err = -ETIME; in drm_gpusvm_range_evict()
1577 if (err != -EBUSY) in drm_gpusvm_range_evict()
1589 * drm_gpusvm_has_mapping() - Check if GPU SVM has mapping for the given address range
1613 * drm_gpusvm_range_set_unmapped() - Mark a GPU SVM range as unmapped
1623 lockdep_assert_held_write(&range->gpusvm->notifier_lock); in drm_gpusvm_range_set_unmapped()
1625 range->pages.flags.unmapped = true; in drm_gpusvm_range_set_unmapped()
1626 if (drm_gpusvm_range_start(range) < mmu_range->start || in drm_gpusvm_range_set_unmapped()
1627 drm_gpusvm_range_end(range) > mmu_range->end) in drm_gpusvm_range_set_unmapped()
1628 range->pages.flags.partial_unmap = true; in drm_gpusvm_range_set_unmapped()