Lines Matching refs:range
22 static bool xe_svm_range_in_vram(struct xe_svm_range *range)
25 * Advisory only check whether the range is currently backed by VRAM
31 .__flags = READ_ONCE(range->base.pages.flags.__flags),
37 static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
40 return xe_svm_range_in_vram(range) && range->tile_present;
65 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
67 range_debug(range, operation);
73 struct xe_svm_range *range;
75 range = kzalloc(sizeof(*range), GFP_KERNEL);
76 if (!range)
79 INIT_LIST_HEAD(&range->garbage_collector_link);
82 return &range->base;
85 static void xe_svm_range_free(struct drm_gpusvm_range *range)
87 xe_vm_put(range_to_vm(range));
88 kfree(range);
92 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
97 range_debug(range, "GARBAGE COLLECTOR ADD");
99 drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
102 if (list_empty(&range->garbage_collector_link))
103 list_add_tail(&range->garbage_collector_link,
120 struct xe_svm_range *range = to_xe_range(r);
128 range_debug(range, "NOTIFIER");
131 if (range->base.pages.flags.unmapped || !range->tile_present)
134 range_debug(range, "NOTIFIER - EXECUTE");
136 /* Adjust invalidation to range boundaries */
137 *adj_start = min(xe_svm_range_start(range), mmu_range->start);
138 *adj_end = max(xe_svm_range_end(range), mmu_range->end);
146 if (xe_pt_zap_ptes_range(tile, vm, range)) {
151 WRITE_ONCE(range->tile_invalidated,
152 range->tile_invalidated | BIT(id));
235 * invalidation is not required. Could walk range list twice to figure
270 struct xe_svm_range *range)
274 range_debug(range, "GARBAGE COLLECTOR");
277 fence = xe_vm_range_unbind(vm, range);
283 drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
339 struct xe_svm_range *range;
351 range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
352 typeof(*range),
354 if (!range)
357 range_start = xe_svm_range_start(range);
358 range_end = xe_svm_range_end(range);
360 list_del(&range->garbage_collector_link);
363 err = __xe_svm_garbage_collector(vm, range);
797 static bool xe_svm_range_is_valid(struct xe_svm_range *range,
801 return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
802 range->tile_invalidated) &&
803 (!devmem_only || xe_svm_range_in_vram(range)));
806 /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
808 * @range: Pointer to the SVM range structure
810 * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
813 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
815 if (xe_svm_range_in_vram(range))
816 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
820 * xe_svm_range_validate() - Check if the SVM range is valid
822 * @range: Pointer to the SVM range structure
824 * @devmem_preferred : if true range needs to be in devmem
826 * The xe_svm_range_validate() function checks if a range is
829 * Return: true if the range is valid, false otherwise
832 struct xe_svm_range *range,
839 ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
840 (devmem_preferred == range->base.pages.flags.has_devmem_pages);
856 * range [start, end] in the given VM. It adjusts the range based on the
859 * Return: The starting address of the VMA within the range,
950 * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
951 * @range: SVM range for which migration needs to be decided
952 * @vma: vma which has range
953 * @preferred_region_is_vram: preferred region for range is vram
955 * Return: True for range needing migration and migration is supported else false
957 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
960 struct xe_vm *vm = range_to_vm(&range->base);
961 u64 range_size = xe_svm_range_size(range);
963 if (!range->base.pages.flags.migrate_devmem || !preferred_region_is_vram)
968 if (xe_svm_range_in_vram(range)) {
974 drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
983 struct xe_svm_range *range) \
985 switch (xe_svm_range_size(range)) { \
1004 struct xe_svm_range *range, \
1009 switch (xe_svm_range_size(range)) { \
1047 struct xe_svm_range *range;
1069 range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
1071 if (IS_ERR(range))
1072 return PTR_ERR(range);
1074 xe_svm_range_fault_count_stats_incr(gt, range);
1076 if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) {
1081 if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) {
1082 xe_svm_range_valid_fault_count_stats_incr(gt, range);
1083 range_debug(range, "PAGE FAULT - VALID");
1087 range_debug(range, "PAGE FAULT");
1090 xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {
1098 xe_svm_range_migrate_count_stats_incr(gt, range);
1099 err = xe_svm_alloc_vram(tile, range, &ctx);
1100 xe_svm_range_migrate_us_stats_incr(gt, range, migrate_start);
1130 range_debug(range, "GET PAGES");
1131 err = xe_svm_range_get_pages(vm, range, &ctx);
1139 range_debug(range, "PAGE FAULT - RETRY PAGES");
1148 range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
1152 xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start);
1153 range_debug(range, "PAGE FAULT - BIND");
1161 fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
1167 xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1176 xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
1179 xe_svm_range_fault_us_stats_incr(gt, range, start);
1185 range_debug(range, "PAGE FAULT - RETRY BIND");
1237 * Check if an address range has SVM mappings.
1239 * Return: True if address range has a SVM mapping, False otherwise
1261 struct drm_gpusvm_range *range, *__next;
1263 drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
1264 if (start > drm_gpusvm_range_start(range) ||
1265 end < drm_gpusvm_range_end(range)) {
1266 if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
1267 drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
1268 drm_gpusvm_range_get(range);
1269 __xe_svm_garbage_collector(vm, to_xe_range(range));
1270 if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
1272 list_del(&to_xe_range(range)->garbage_collector_link);
1275 drm_gpusvm_range_put(range);
1296 * xe_svm_range_find_or_insert- Find or insert GPU SVM range
1298 * @addr: address for which range needs to be found/inserted
1302 * This function finds or inserts a newly allocated a SVM range based on the
1305 * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
1321 * xe_svm_range_get_pages() - Get pages for a SVM range
1323 * @range: Pointer to the xe SVM range structure
1326 * This function gets pages for a SVM range and ensures they are mapped for
1327 * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
1331 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
1336 err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
1338 range_debug(range, "PAGE FAULT - EVICT PAGES");
1339 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
1346 * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
1348 * @start: Start of the input range
1349 * @end: End of the input range
1359 struct xe_svm_range *range;
1374 range = to_xe_range(r);
1376 if (xe_pt_zap_ptes_range(tile, vm, range)) {
1385 WRITE_ONCE(range->tile_invalidated,
1386 range->tile_invalidated | BIT(id));
1434 * xe_svm_alloc_vram()- Allocate device memory pages for range,
1437 * @range: SVM range
1442 int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
1447 xe_assert(tile_to_xe(tile), range->base.pages.flags.migrate_devmem);
1448 range_debug(range, "ALLOCATE VRAM");
1451 return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
1452 xe_svm_range_end(range),
1453 range->base.gpusvm->mm,
1510 vr->pagemap.range.start = res->start;
1511 vr->pagemap.range.end = res->end;
1535 struct xe_svm_range *range,