Lines Matching full:range
22 static bool xe_svm_range_in_vram(struct xe_svm_range *range) in xe_svm_range_in_vram() argument
25 * Advisory only check whether the range is currently backed by VRAM in xe_svm_range_in_vram()
31 .__flags = READ_ONCE(range->base.pages.flags.__flags), in xe_svm_range_in_vram()
37 static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range) in xe_svm_range_has_vram_binding() argument
40 return xe_svm_range_in_vram(range) && range->tile_present; in xe_svm_range_has_vram_binding()
65 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation) in xe_svm_range_debug() argument
67 range_debug(range, operation); in xe_svm_range_debug()
73 struct xe_svm_range *range; in xe_svm_range_alloc() local
75 range = kzalloc(sizeof(*range), GFP_KERNEL); in xe_svm_range_alloc()
76 if (!range) in xe_svm_range_alloc()
79 INIT_LIST_HEAD(&range->garbage_collector_link); in xe_svm_range_alloc()
82 return &range->base; in xe_svm_range_alloc()
85 static void xe_svm_range_free(struct drm_gpusvm_range *range) in xe_svm_range_free() argument
87 xe_vm_put(range_to_vm(range)); in xe_svm_range_free()
88 kfree(range); in xe_svm_range_free()
92 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range, in xe_svm_garbage_collector_add_range() argument
97 range_debug(range, "GARBAGE COLLECTOR ADD"); in xe_svm_garbage_collector_add_range()
99 drm_gpusvm_range_set_unmapped(&range->base, mmu_range); in xe_svm_garbage_collector_add_range()
102 if (list_empty(&range->garbage_collector_link)) in xe_svm_garbage_collector_add_range()
103 list_add_tail(&range->garbage_collector_link, in xe_svm_garbage_collector_add_range()
121 struct xe_svm_range *range = to_xe_range(r); in xe_svm_range_notifier_event_begin() local
129 range_debug(range, "NOTIFIER"); in xe_svm_range_notifier_event_begin()
132 if (range->base.pages.flags.unmapped || !range->tile_present) in xe_svm_range_notifier_event_begin()
135 range_debug(range, "NOTIFIER - EXECUTE"); in xe_svm_range_notifier_event_begin()
137 /* Adjust invalidation to range boundaries */ in xe_svm_range_notifier_event_begin()
138 *adj_start = min(xe_svm_range_start(range), mmu_range->start); in xe_svm_range_notifier_event_begin()
139 *adj_end = max(xe_svm_range_end(range), mmu_range->end); in xe_svm_range_notifier_event_begin()
147 if (xe_pt_zap_ptes_range(tile, vm, range)) { in xe_svm_range_notifier_event_begin()
152 WRITE_ONCE(range->tile_invalidated, in xe_svm_range_notifier_event_begin()
153 range->tile_invalidated | BIT(id)); in xe_svm_range_notifier_event_begin()
236 * invalidation is not required. Could walk range list twice to figure in xe_svm_invalidate()
271 struct xe_svm_range *range) in __xe_svm_garbage_collector() argument
275 range_debug(range, "GARBAGE COLLECTOR"); in __xe_svm_garbage_collector()
278 fence = xe_vm_range_unbind(vm, range); in __xe_svm_garbage_collector()
284 drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base); in __xe_svm_garbage_collector()
340 struct xe_svm_range *range; in xe_svm_garbage_collector() local
352 range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list, in xe_svm_garbage_collector()
353 typeof(*range), in xe_svm_garbage_collector()
355 if (!range) in xe_svm_garbage_collector()
358 range_start = xe_svm_range_start(range); in xe_svm_garbage_collector()
359 range_end = xe_svm_range_end(range); in xe_svm_garbage_collector()
361 list_del(&range->garbage_collector_link); in xe_svm_garbage_collector()
364 err = __xe_svm_garbage_collector(vm, range); in xe_svm_garbage_collector()
786 static bool xe_svm_range_is_valid(struct xe_svm_range *range, in xe_svm_range_is_valid() argument
790 return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present, in xe_svm_range_is_valid()
791 range->tile_invalidated) && in xe_svm_range_is_valid()
792 (!devmem_only || xe_svm_range_in_vram(range))); in xe_svm_range_is_valid()
795 /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
797 * @range: Pointer to the SVM range structure
799 * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
802 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range) in xe_svm_range_migrate_to_smem() argument
804 if (xe_svm_range_in_vram(range)) in xe_svm_range_migrate_to_smem()
805 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base); in xe_svm_range_migrate_to_smem()
809 * xe_svm_range_validate() - Check if the SVM range is valid
811 * @range: Pointer to the SVM range structure
813 * @devmem_preferred : if true range needs to be in devmem
815 * The xe_svm_range_validate() function checks if a range is
818 * Return: true if the range is valid, false otherwise
821 struct xe_svm_range *range, in xe_svm_range_validate() argument
828 ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask && in xe_svm_range_validate()
829 (devmem_preferred == range->base.pages.flags.has_devmem_pages); in xe_svm_range_validate()
845 * range [start, end] in the given VM. It adjusts the range based on the
848 * Return: The starting address of the VMA within the range,
926 * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
927 * @range: SVM range for which migration needs to be decided
928 * @vma: vma which has range
929 * @preferred_region_is_vram: preferred region for range is vram
931 * Return: True for range needing migration and migration is supported else false
933 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma, in xe_svm_range_needs_migrate_to_vram() argument
936 struct xe_vm *vm = range_to_vm(&range->base); in xe_svm_range_needs_migrate_to_vram()
937 u64 range_size = xe_svm_range_size(range); in xe_svm_range_needs_migrate_to_vram()
939 if (!range->base.pages.flags.migrate_devmem || !preferred_region_is_vram) in xe_svm_range_needs_migrate_to_vram()
944 if (xe_svm_range_in_vram(range)) { in xe_svm_range_needs_migrate_to_vram()
945 drm_info(&vm->xe->drm, "Range is already in VRAM\n"); in xe_svm_range_needs_migrate_to_vram()
950 drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n"); in xe_svm_range_needs_migrate_to_vram()
959 struct xe_svm_range *range) \
961 switch (xe_svm_range_size(range)) { \
980 struct xe_svm_range *range, \ in DECL_SVM_RANGE_COUNT_STATS()
985 switch (xe_svm_range_size(range)) { \ in DECL_SVM_RANGE_COUNT_STATS()
1023 struct xe_svm_range *range; local
1045 range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
1047 if (IS_ERR(range))
1048 return PTR_ERR(range);
1050 xe_svm_range_fault_count_stats_incr(gt, range);
1052 if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) {
1057 if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) {
1058 xe_svm_range_valid_fault_count_stats_incr(gt, range);
1059 range_debug(range, "PAGE FAULT - VALID");
1063 range_debug(range, "PAGE FAULT");
1066 xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {
1074 xe_svm_range_migrate_count_stats_incr(gt, range);
1075 err = xe_svm_alloc_vram(tile, range, &ctx);
1076 xe_svm_range_migrate_us_stats_incr(gt, range, migrate_start);
1106 range_debug(range, "GET PAGES");
1107 err = xe_svm_range_get_pages(vm, range, &ctx);
1115 range_debug(range, "PAGE FAULT - RETRY PAGES");
1124 range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
1128 xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start);
1129 range_debug(range, "PAGE FAULT - BIND");
1137 fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
1143 xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1152 xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1155 xe_svm_range_fault_us_stats_incr(gt, range, start);
1161 range_debug(range, "PAGE FAULT - RETRY BIND");
1213 * Check if an address range has SVM mappings.
1215 * Return: True if address range has a SVM mapping, False otherwise
1237 struct drm_gpusvm_range *range, *__next; in xe_svm_unmap_address_range() local
1239 drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) { in xe_svm_unmap_address_range()
1240 if (start > drm_gpusvm_range_start(range) || in xe_svm_unmap_address_range()
1241 end < drm_gpusvm_range_end(range)) { in xe_svm_unmap_address_range()
1242 if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range))) in xe_svm_unmap_address_range()
1243 drm_gpusvm_range_evict(&vm->svm.gpusvm, range); in xe_svm_unmap_address_range()
1244 drm_gpusvm_range_get(range); in xe_svm_unmap_address_range()
1245 __xe_svm_garbage_collector(vm, to_xe_range(range)); in xe_svm_unmap_address_range()
1246 if (!list_empty(&to_xe_range(range)->garbage_collector_link)) { in xe_svm_unmap_address_range()
1248 list_del(&to_xe_range(range)->garbage_collector_link); in xe_svm_unmap_address_range()
1251 drm_gpusvm_range_put(range); in xe_svm_unmap_address_range()
1272 * xe_svm_range_find_or_insert- Find or insert GPU SVM range
1274 * @addr: address for which range needs to be found/inserted
1278 * This function finds or inserts a newly allocated a SVM range based on the
1281 * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
1297 * xe_svm_range_get_pages() - Get pages for a SVM range
1299 * @range: Pointer to the xe SVM range structure
1302 * This function gets pages for a SVM range and ensures they are mapped for
1303 * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
1307 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range, in xe_svm_range_get_pages() argument
1312 err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx); in xe_svm_range_get_pages()
1314 range_debug(range, "PAGE FAULT - EVICT PAGES"); in xe_svm_range_get_pages()
1315 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base); in xe_svm_range_get_pages()
1322 * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
1324 * @start: Start of the input range
1325 * @end: End of the input range
1335 struct xe_svm_range *range; in xe_svm_ranges_zap_ptes_in_range() local
1350 range = to_xe_range(r); in xe_svm_ranges_zap_ptes_in_range()
1352 if (xe_pt_zap_ptes_range(tile, vm, range)) { in xe_svm_ranges_zap_ptes_in_range()
1361 WRITE_ONCE(range->tile_invalidated, in xe_svm_ranges_zap_ptes_in_range()
1362 range->tile_invalidated | BIT(id)); in xe_svm_ranges_zap_ptes_in_range()
1410 * xe_svm_alloc_vram()- Allocate device memory pages for range,
1413 * @range: SVM range
1418 int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range, in xe_svm_alloc_vram() argument
1423 xe_assert(tile_to_xe(tile), range->base.pages.flags.migrate_devmem); in xe_svm_alloc_vram()
1424 range_debug(range, "ALLOCATE VRAM"); in xe_svm_alloc_vram()
1427 return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range), in xe_svm_alloc_vram()
1428 xe_svm_range_end(range), in xe_svm_alloc_vram()
1429 range->base.gpusvm->mm, in xe_svm_alloc_vram()
1486 vr->pagemap.range.start = res->start; in xe_devm_add()
1487 vr->pagemap.range.end = res->end; in xe_devm_add()
1511 struct xe_svm_range *range, in xe_svm_alloc_vram() argument