Lines Matching full:range

46  *	space is chosen for the range size. Ranges are expected to be
53 * range allocation, notifier allocation, and invalidations.
74 * optionally migrate the range to device memory, and create GPU bindings.
92 * range RB tree and list, as well as the range's DMA mappings and sequence
94 * except for the recheck range's pages being valid
115 * being that a subset of the range still has CPU and GPU mappings. If the
116 * backing store for the range is in device memory, a subset of the backing
117 * store has references. One option would be to split the range and device
119 * complicated. Given that partial unmappings are rare and driver-defined range
122 * With no support for range splitting, upon partial unmapping of a range, the
123 * driver is expected to invalidate and destroy the entire range. If the range
143 * int driver_bind_range(struct drm_gpusvm *gpusvm, struct drm_gpusvm_range *range)
147 * driver_alloc_and_setup_memory_for_bind(gpusvm, range);
150 * if (drm_gpusvm_range_pages_valid(range))
151 * driver_commit_bind(gpusvm, range);
170 * range = drm_gpusvm_range_find_or_insert(gpusvm, fault_addr,
173 * if (IS_ERR(range)) {
174 * err = PTR_ERR(range);
178 * if (driver_migration_policy(range)) {
186 * err = drm_gpusvm_range_get_pages(gpusvm, range, &ctx);
189 * drm_gpusvm_range_evict(gpusvm, range);
195 * err = driver_bind_range(gpusvm, range);
209 * struct drm_gpusvm_range *range)
214 * if (range->flags.partial_unmap)
215 * drm_gpusvm_range_evict(gpusvm, range);
217 * driver_unbind_range(range);
218 * drm_gpusvm_range_remove(gpusvm, range);
225 * for_each_range_in_garbage_collector(gpusvm, range)
226 * __driver_garbage_collector(gpusvm, range);
238 * struct drm_gpusvm_range *range = NULL;
242 * drm_gpusvm_for_each_range(range, notifier, mmu_range->start,
244 * drm_gpusvm_range_unmap_pages(gpusvm, range, &ctx);
249 * drm_gpusvm_range_set_unmapped(range, mmu_range);
250 * driver_garbage_collector_add(gpusvm, range);
256 * npages_in_range() - Calculate the number of pages in a given range
257 * @start: The start address of the range
258 * @end: The end address of the range
260 * This macro calculates the number of pages in a given memory range,
263 * determine the number of pages in the range.
265 * Return: The number of pages in the specified range.
274 * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
276 * @start: Start address of the range
277 * @end: End address of the range
301 * @start__: Start address of the range
302 * @end__: End address of the range
423 * @mm_range: Range of the GPU SVM.
426 * @chunk_sizes: Pointer to the array of chunk sizes used in range allocation.
552 struct drm_gpusvm_range *range, *__next; in drm_gpusvm_fini() local
560 drm_gpusvm_for_each_range_safe(range, __next, notifier, 0, in drm_gpusvm_fini()
562 drm_gpusvm_range_remove(gpusvm, range); in drm_gpusvm_fini()
632 * drm_gpusvm_range_insert() - Insert GPU SVM range
634 * @range: Pointer to the GPU SVM range structure
636 * This function inserts the GPU SVM range into the notifier RB tree and list.
639 struct drm_gpusvm_range *range) in drm_gpusvm_range_insert() argument
645 interval_tree_insert(&range->itree, &notifier->root); in drm_gpusvm_range_insert()
647 node = rb_prev(&range->itree.rb); in drm_gpusvm_range_insert()
653 list_add(&range->entry, head); in drm_gpusvm_range_insert()
658 * __drm_gpusvm_range_remove() - Remove GPU SVM range
660 * @range: Pointer to the GPU SVM range structure
662 * This macro removes the GPU SVM range from the notifier RB tree and list.
665 struct drm_gpusvm_range *range) in __drm_gpusvm_range_remove() argument
667 interval_tree_remove(&range->itree, &notifier->root); in __drm_gpusvm_range_remove()
668 list_del(&range->entry); in __drm_gpusvm_range_remove()
672 * drm_gpusvm_range_alloc() - Allocate GPU SVM range
679 * This function allocates and initializes the GPU SVM range structure.
681 * Return: Pointer to the allocated GPU SVM range on success, ERR_PTR() on failure.
689 struct drm_gpusvm_range *range; in drm_gpusvm_range_alloc() local
692 range = gpusvm->ops->range_alloc(gpusvm); in drm_gpusvm_range_alloc()
694 range = kzalloc(sizeof(*range), GFP_KERNEL); in drm_gpusvm_range_alloc()
696 if (!range) in drm_gpusvm_range_alloc()
699 kref_init(&range->refcount); in drm_gpusvm_range_alloc()
700 range->gpusvm = gpusvm; in drm_gpusvm_range_alloc()
701 range->notifier = notifier; in drm_gpusvm_range_alloc()
702 range->itree.start = ALIGN_DOWN(fault_addr, chunk_size); in drm_gpusvm_range_alloc()
703 range->itree.last = ALIGN(fault_addr + 1, chunk_size) - 1; in drm_gpusvm_range_alloc()
704 INIT_LIST_HEAD(&range->entry); in drm_gpusvm_range_alloc()
705 range->notifier_seq = LONG_MAX; in drm_gpusvm_range_alloc()
706 range->flags.migrate_devmem = migrate_devmem ? 1 : 0; in drm_gpusvm_range_alloc()
708 return range; in drm_gpusvm_range_alloc()
778 * drm_gpusvm_range_chunk_size() - Determine chunk size for GPU SVM range
787 * This function determines the chunk size for the GPU SVM range based on the
825 struct drm_gpusvm_range *range; in drm_gpusvm_range_chunk_size() local
827 range = drm_gpusvm_range_find(notifier, start, end); in drm_gpusvm_range_chunk_size()
828 if (range) { in drm_gpusvm_range_chunk_size()
834 * XXX: Only create range on pages CPU has faulted in. Without in drm_gpusvm_range_chunk_size()
875 * drm_gpusvm_find_vma_start() - Find start address for first VMA in range
880 * Returns: The start address of first VMA within the provided range,
909 * drm_gpusvm_range_find_or_insert() - Find or insert GPU SVM range
916 * This function finds or inserts a newly allocated a GPU SVM range based on the
917 * fault address. Caller must hold a lock to protect range lookup and insertion.
919 * Return: Pointer to the GPU SVM range on success, ERR_PTR() on failure.
929 struct drm_gpusvm_range *range; in drm_gpusvm_range_find_or_insert() local
976 range = drm_gpusvm_range_find(notifier, fault_addr, fault_addr + 1); in drm_gpusvm_range_find_or_insert()
977 if (range) in drm_gpusvm_range_find_or_insert()
996 range = drm_gpusvm_range_alloc(gpusvm, notifier, fault_addr, chunk_size, in drm_gpusvm_range_find_or_insert()
998 if (IS_ERR(range)) { in drm_gpusvm_range_find_or_insert()
999 err = PTR_ERR(range); in drm_gpusvm_range_find_or_insert()
1003 drm_gpusvm_range_insert(notifier, range); in drm_gpusvm_range_find_or_insert()
1011 return range; in drm_gpusvm_range_find_or_insert()
1027 * __drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range (internal)
1029 * @range: Pointer to the GPU SVM range structure
1032 * This function unmap pages associated with a GPU SVM range. Assumes and
1036 struct drm_gpusvm_range *range, in __drm_gpusvm_range_unmap_pages() argument
1040 struct drm_pagemap *dpagemap = range->dpagemap; in __drm_gpusvm_range_unmap_pages()
1045 if (range->flags.has_dma_mapping) { in __drm_gpusvm_range_unmap_pages()
1047 .__flags = range->flags.__flags, in __drm_gpusvm_range_unmap_pages()
1051 struct drm_pagemap_device_addr *addr = &range->dma_addr[j]; in __drm_gpusvm_range_unmap_pages()
1067 WRITE_ONCE(range->flags.__flags, flags.__flags); in __drm_gpusvm_range_unmap_pages()
1069 range->dpagemap = NULL; in __drm_gpusvm_range_unmap_pages()
1074 * drm_gpusvm_range_free_pages() - Free pages associated with a GPU SVM range
1076 * @range: Pointer to the GPU SVM range structure
1078 * This function frees the dma address array associated with a GPU SVM range.
1081 struct drm_gpusvm_range *range) in drm_gpusvm_range_free_pages() argument
1085 if (range->dma_addr) { in drm_gpusvm_range_free_pages()
1086 kvfree(range->dma_addr); in drm_gpusvm_range_free_pages()
1087 range->dma_addr = NULL; in drm_gpusvm_range_free_pages()
1092 * drm_gpusvm_range_remove() - Remove GPU SVM range
1094 * @range: Pointer to the GPU SVM range to be removed
1096 * This function removes the specified GPU SVM range and also removes the parent
1098 * hold a lock to protect range and notifier removal.
1101 struct drm_gpusvm_range *range) in drm_gpusvm_range_remove() argument
1103 unsigned long npages = npages_in_range(drm_gpusvm_range_start(range), in drm_gpusvm_range_remove()
1104 drm_gpusvm_range_end(range)); in drm_gpusvm_range_remove()
1110 drm_gpusvm_range_start(range)); in drm_gpusvm_range_remove()
1115 __drm_gpusvm_range_unmap_pages(gpusvm, range, npages); in drm_gpusvm_range_remove()
1116 drm_gpusvm_range_free_pages(gpusvm, range); in drm_gpusvm_range_remove()
1117 __drm_gpusvm_range_remove(notifier, range); in drm_gpusvm_range_remove()
1120 drm_gpusvm_range_put(range); in drm_gpusvm_range_remove()
1132 * drm_gpusvm_range_get() - Get a reference to GPU SVM range
1133 * @range: Pointer to the GPU SVM range
1135 * This function increments the reference count of the specified GPU SVM range.
1137 * Return: Pointer to the GPU SVM range.
1140 drm_gpusvm_range_get(struct drm_gpusvm_range *range) in drm_gpusvm_range_get() argument
1142 kref_get(&range->refcount); in drm_gpusvm_range_get()
1144 return range; in drm_gpusvm_range_get()
1149 * drm_gpusvm_range_destroy() - Destroy GPU SVM range
1150 * @refcount: Pointer to the reference counter embedded in the GPU SVM range
1152 * This function destroys the specified GPU SVM range when its reference count
1153 * reaches zero. If a custom range-free function is provided, it is invoked to
1154 * free the range; otherwise, the range is deallocated using kfree().
1158 struct drm_gpusvm_range *range = in drm_gpusvm_range_destroy() local
1160 struct drm_gpusvm *gpusvm = range->gpusvm; in drm_gpusvm_range_destroy()
1163 gpusvm->ops->range_free(range); in drm_gpusvm_range_destroy()
1165 kfree(range); in drm_gpusvm_range_destroy()
1169 * drm_gpusvm_range_put() - Put a reference to GPU SVM range
1170 * @range: Pointer to the GPU SVM range
1172 * This function decrements the reference count of the specified GPU SVM range
1175 void drm_gpusvm_range_put(struct drm_gpusvm_range *range) in drm_gpusvm_range_put() argument
1177 kref_put(&range->refcount, drm_gpusvm_range_destroy); in drm_gpusvm_range_put()
1182 * drm_gpusvm_range_pages_valid() - GPU SVM range pages valid
1184 * @range: Pointer to the GPU SVM range structure
1186 * This function determines if a GPU SVM range pages are valid. Expected be
1190 * function is required for finer grained checking (i.e., per range) if pages
1193 * Return: True if GPU SVM range has valid pages, False otherwise
1196 struct drm_gpusvm_range *range) in drm_gpusvm_range_pages_valid() argument
1200 return range->flags.has_devmem_pages || range->flags.has_dma_mapping; in drm_gpusvm_range_pages_valid()
1205 * drm_gpusvm_range_pages_valid_unlocked() - GPU SVM range pages valid unlocked
1207 * @range: Pointer to the GPU SVM range structure
1209 * This function determines if a GPU SVM range pages are valid. Expected be
1212 * Return: True if GPU SVM range has valid pages, False otherwise
1216 struct drm_gpusvm_range *range) in drm_gpusvm_range_pages_valid_unlocked() argument
1220 if (!range->dma_addr) in drm_gpusvm_range_pages_valid_unlocked()
1224 pages_valid = drm_gpusvm_range_pages_valid(gpusvm, range); in drm_gpusvm_range_pages_valid_unlocked()
1226 drm_gpusvm_range_free_pages(gpusvm, range); in drm_gpusvm_range_pages_valid_unlocked()
1233 * drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range
1235 * @range: Pointer to the GPU SVM range structure
1238 * This function gets pages for a GPU SVM range and ensures they are mapped for
1244 struct drm_gpusvm_range *range, in drm_gpusvm_range_get_pages() argument
1247 struct mmu_interval_notifier *notifier = &range->notifier->notifier; in drm_gpusvm_range_get_pages()
1252 .start = drm_gpusvm_range_start(range), in drm_gpusvm_range_get_pages()
1253 .end = drm_gpusvm_range_end(range), in drm_gpusvm_range_get_pages()
1261 unsigned long npages = npages_in_range(drm_gpusvm_range_start(range), in drm_gpusvm_range_get_pages()
1262 drm_gpusvm_range_end(range)); in drm_gpusvm_range_get_pages()
1273 if (drm_gpusvm_range_pages_valid_unlocked(gpusvm, range)) in drm_gpusvm_range_get_pages()
1313 flags.__flags = range->flags.__flags; in drm_gpusvm_range_get_pages()
1326 if (!range->dma_addr) { in drm_gpusvm_range_get_pages()
1329 range->dma_addr = kvmalloc_array(npages, in drm_gpusvm_range_get_pages()
1330 sizeof(*range->dma_addr), in drm_gpusvm_range_get_pages()
1332 if (!range->dma_addr) { in drm_gpusvm_range_get_pages()
1371 range->dma_addr[j] = in drm_gpusvm_range_get_pages()
1377 range->dma_addr[j].addr)) { in drm_gpusvm_range_get_pages()
1403 range->dma_addr[j] = drm_pagemap_device_addr_encode in drm_gpusvm_range_get_pages()
1414 range->dpagemap = dpagemap; in drm_gpusvm_range_get_pages()
1418 WRITE_ONCE(range->flags.__flags, flags.__flags); in drm_gpusvm_range_get_pages()
1423 range->notifier_seq = hmm_range.notifier_seq; in drm_gpusvm_range_get_pages()
1428 __drm_gpusvm_range_unmap_pages(gpusvm, range, num_dma_mapped); in drm_gpusvm_range_get_pages()
1439 * drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range
1440 * drm_gpusvm_range_evict() - Evict GPU SVM range
1442 * @range: Pointer to the GPU SVM range structure
1445 * This function unmaps pages associated with a GPU SVM range. If @in_notifier
1448 * each GPU SVM range attached to notifier in gpusvm->ops->invalidate for IOMMU
1452 struct drm_gpusvm_range *range, in drm_gpusvm_range_unmap_pages() argument
1455 unsigned long npages = npages_in_range(drm_gpusvm_range_start(range), in drm_gpusvm_range_unmap_pages()
1456 drm_gpusvm_range_end(range)); in drm_gpusvm_range_unmap_pages()
1463 __drm_gpusvm_range_unmap_pages(gpusvm, range, npages); in drm_gpusvm_range_unmap_pages()
1471 * drm_gpusvm_range_evict() - Evict GPU SVM range
1473 * @range: Pointer to the GPU SVM range to be removed
1475 * This function evicts the specified GPU SVM range.
1480 struct drm_gpusvm_range *range) in drm_gpusvm_range_evict() argument
1482 struct mmu_interval_notifier *notifier = &range->notifier->notifier; in drm_gpusvm_range_evict()
1486 .start = drm_gpusvm_range_start(range), in drm_gpusvm_range_evict()
1487 .end = drm_gpusvm_range_end(range), in drm_gpusvm_range_evict()
1493 unsigned long npages = npages_in_range(drm_gpusvm_range_start(range), in drm_gpusvm_range_evict()
1494 drm_gpusvm_range_end(range)); in drm_gpusvm_range_evict()
1528 * drm_gpusvm_has_mapping() - Check if GPU SVM has mapping for the given address range
1541 struct drm_gpusvm_range *range = NULL; in drm_gpusvm_has_mapping() local
1543 drm_gpusvm_for_each_range(range, notifier, start, end) in drm_gpusvm_has_mapping()
1552 * drm_gpusvm_range_set_unmapped() - Mark a GPU SVM range as unmapped
1553 * @range: Pointer to the GPU SVM range structure.
1554 * @mmu_range: Pointer to the MMU notifier range structure.
1556 * This function marks a GPU SVM range as unmapped and sets the partial_unmap flag
1557 * if the range partially falls within the provided MMU notifier range.
1559 void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range, in drm_gpusvm_range_set_unmapped() argument
1562 lockdep_assert_held_write(&range->gpusvm->notifier_lock); in drm_gpusvm_range_set_unmapped()
1564 range->flags.unmapped = true; in drm_gpusvm_range_set_unmapped()
1565 if (drm_gpusvm_range_start(range) < mmu_range->start || in drm_gpusvm_range_set_unmapped()
1566 drm_gpusvm_range_end(range) > mmu_range->end) in drm_gpusvm_range_set_unmapped()
1567 range->flags.partial_unmap = true; in drm_gpusvm_range_set_unmapped()