Lines Matching +full:r +full:- +full:tile

1 // SPDX-License-Identifier: MIT
31 .__flags = READ_ONCE(range->base.pages.flags.__flags), in xe_svm_range_in_vram()
40 return xe_svm_range_in_vram(range) && range->tile_present; in xe_svm_range_has_vram_binding()
48 static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r) in range_to_vm() argument
50 return gpusvm_to_vm(r->gpusvm); in range_to_vm()
54 vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
57 (operation__), range_to_vm(&(r__)->base)->usm.asid, \
58 (r__)->base.gpusvm, \
61 (r__)->base.pages.notifier_seq, \
79 INIT_LIST_HEAD(&range->garbage_collector_link); in xe_svm_range_alloc()
82 return &range->base; in xe_svm_range_alloc()
95 struct xe_device *xe = vm->xe; in xe_svm_garbage_collector_add_range()
99 drm_gpusvm_range_set_unmapped(&range->base, mmu_range); in xe_svm_garbage_collector_add_range()
101 spin_lock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector_add_range()
102 if (list_empty(&range->garbage_collector_link)) in xe_svm_garbage_collector_add_range()
103 list_add_tail(&range->garbage_collector_link, in xe_svm_garbage_collector_add_range()
104 &vm->svm.garbage_collector.range_list); in xe_svm_garbage_collector_add_range()
105 spin_unlock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector_add_range()
107 queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq, in xe_svm_garbage_collector_add_range()
108 &vm->svm.garbage_collector.work); in xe_svm_garbage_collector_add_range()
117 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r, in xe_svm_range_notifier_event_begin() argument
121 struct xe_svm_range *range = to_xe_range(r); in xe_svm_range_notifier_event_begin()
122 struct xe_device *xe = vm->xe; in xe_svm_range_notifier_event_begin()
123 struct xe_tile *tile; in xe_svm_range_notifier_event_begin() local
132 if (range->base.pages.flags.unmapped || !range->tile_present) in xe_svm_range_notifier_event_begin()
135 range_debug(range, "NOTIFIER - EXECUTE"); in xe_svm_range_notifier_event_begin()
138 *adj_start = min(xe_svm_range_start(range), mmu_range->start); in xe_svm_range_notifier_event_begin()
139 *adj_end = max(xe_svm_range_end(range), mmu_range->end); in xe_svm_range_notifier_event_begin()
146 for_each_tile(tile, xe, id) in xe_svm_range_notifier_event_begin()
147 if (xe_pt_zap_ptes_range(tile, vm, range)) { in xe_svm_range_notifier_event_begin()
152 WRITE_ONCE(range->tile_invalidated, in xe_svm_range_notifier_event_begin()
153 range->tile_invalidated | BIT(id)); in xe_svm_range_notifier_event_begin()
156 xe_svm_tlb_inval_count_stats_incr(tile->primary_gt); in xe_svm_range_notifier_event_begin()
157 if (tile->media_gt) in xe_svm_range_notifier_event_begin()
158 xe_svm_tlb_inval_count_stats_incr(tile->media_gt); in xe_svm_range_notifier_event_begin()
167 xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r, in xe_svm_range_notifier_event_end() argument
174 drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx); in xe_svm_range_notifier_event_end()
175 if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP) in xe_svm_range_notifier_event_end()
176 xe_svm_garbage_collector_add_range(vm, to_xe_range(r), in xe_svm_range_notifier_event_end()
203 struct xe_device *xe = vm->xe; in xe_svm_invalidate()
204 struct drm_gpusvm_range *r, *first; in xe_svm_invalidate() local
205 struct xe_tile *tile; in xe_svm_invalidate() local
207 u64 adj_start = mmu_range->start, adj_end = mmu_range->end; in xe_svm_invalidate()
213 vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm, in xe_svm_invalidate()
215 vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq, in xe_svm_invalidate()
216 mmu_range->start, mmu_range->end, mmu_range->event); in xe_svm_invalidate()
244 r = first; in xe_svm_invalidate()
245 drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) in xe_svm_invalidate()
246 tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range, in xe_svm_invalidate()
258 r = first; in xe_svm_invalidate()
259 drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) in xe_svm_invalidate()
260 xe_svm_range_notifier_event_end(vm, r, mmu_range); in xe_svm_invalidate()
261 for_each_tile(tile, xe, id) { in xe_svm_invalidate()
263 xe_svm_tlb_inval_us_stats_incr(tile->primary_gt, start); in xe_svm_invalidate()
264 if (tile->media_gt) in xe_svm_invalidate()
265 xe_svm_tlb_inval_us_stats_incr(tile->media_gt, start); in xe_svm_invalidate()
284 drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base); in __xe_svm_garbage_collector()
303 return -EINVAL; in xe_svm_range_set_default_attr()
305 if (!(vma->gpuva.flags & XE_VMA_MADV_AUTORESET)) { in xe_svm_range_set_default_attr()
306 drm_dbg(&vm->xe->drm, "Skipping madvise reset for vma.\n"); in xe_svm_range_set_default_attr()
313 vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx", in xe_svm_range_set_default_attr()
317 default_attr.pat_index = vma->attr.default_pat_index; in xe_svm_range_set_default_attr()
318 default_attr.default_pat_index = vma->attr.default_pat_index; in xe_svm_range_set_default_attr()
319 vma->attr = default_attr; in xe_svm_range_set_default_attr()
321 vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, vma_end=0x%016llx", in xe_svm_range_set_default_attr()
323 err = xe_vm_alloc_cpu_addr_mirror_vma(vm, range_start, range_end - range_start); in xe_svm_range_set_default_attr()
325 drm_warn(&vm->xe->drm, "VMA SPLIT failed: %pe\n", ERR_PTR(err)); in xe_svm_range_set_default_attr()
335 return -EAGAIN; in xe_svm_range_set_default_attr()
345 lockdep_assert_held_write(&vm->lock); in xe_svm_garbage_collector()
348 return -ENOENT; in xe_svm_garbage_collector()
351 spin_lock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector()
352 range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list, in xe_svm_garbage_collector()
361 list_del(&range->garbage_collector_link); in xe_svm_garbage_collector()
362 spin_unlock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector()
366 drm_warn(&vm->xe->drm, in xe_svm_garbage_collector()
375 if (err == -EAGAIN) in xe_svm_garbage_collector()
376 ret = -EAGAIN; in xe_svm_garbage_collector()
381 spin_unlock(&vm->svm.garbage_collector.lock); in xe_svm_garbage_collector()
391 down_write(&vm->lock); in xe_svm_garbage_collector_work_func()
393 up_write(&vm->lock); in xe_svm_garbage_collector_work_func()
410 xe_assert(vr->xe, is_device_private_page(page)); in xe_vram_region_page_to_dpa()
411 xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= vr->hpa_base); in xe_vram_region_page_to_dpa()
413 offset = (pfn << PAGE_SHIFT) - vr->hpa_base; in xe_vram_region_page_to_dpa()
414 dpa = vr->dpa_base + offset; in xe_vram_region_page_to_dpa()
509 chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE); in xe_svm_copy()
518 gt = xe_migrate_exec_queue(vr->migrate)->gt; in xe_svm_copy()
519 xe = vr->xe; in xe_svm_copy()
535 match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr; in xe_svm_copy()
537 xe_assert(vr->xe, match); in xe_svm_copy()
540 i += NR_PAGES(pagemap_addr[i].order) - 1; in xe_svm_copy()
541 chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE); in xe_svm_copy()
547 * Mismatched physical address, 8M copy chunk, or last page - in xe_svm_copy()
559 (i - pos + incr) * in xe_svm_copy()
562 vm_dbg(&xe->drm, in xe_svm_copy()
563 "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld", in xe_svm_copy()
565 (u64)pagemap_addr[pos].addr, i - pos + incr); in xe_svm_copy()
566 __fence = xe_migrate_from_vram(vr->migrate, in xe_svm_copy()
567 i - pos + incr, in xe_svm_copy()
571 vm_dbg(&xe->drm, in xe_svm_copy()
572 "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld", in xe_svm_copy()
574 i - pos + incr); in xe_svm_copy()
575 __fence = xe_migrate_to_vram(vr->migrate, in xe_svm_copy()
576 i - pos + incr, in xe_svm_copy()
602 vm_dbg(&xe->drm, in xe_svm_copy()
603 "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d", in xe_svm_copy()
605 __fence = xe_migrate_from_vram(vr->migrate, 1, in xe_svm_copy()
609 vm_dbg(&xe->drm, in xe_svm_copy()
610 "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d", in xe_svm_copy()
612 __fence = xe_migrate_to_vram(vr->migrate, 1, in xe_svm_copy()
677 return PHYS_PFN(offset + vr->hpa_base); in block_offset_to_pfn()
682 return &vram->ttm.mm; in vram_to_buddy()
689 struct ttm_resource *res = bo->ttm.resource; in xe_svm_populate_devmem_pfn()
690 struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks; in xe_svm_populate_devmem_pfn()
695 struct xe_vram_region *vr = block->private; in xe_svm_populate_devmem_pfn()
729 * xe_svm_init() - SVM initialize
740 if (vm->flags & XE_VM_FLAG_FAULT_MODE) { in xe_svm_init()
741 spin_lock_init(&vm->svm.garbage_collector.lock); in xe_svm_init()
742 INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list); in xe_svm_init()
743 INIT_WORK(&vm->svm.garbage_collector.work, in xe_svm_init()
746 err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm, in xe_svm_init()
747 current->mm, 0, vm->size, in xe_svm_init()
751 drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock); in xe_svm_init()
753 err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)", in xe_svm_init()
754 &vm->xe->drm, NULL, 0, 0, 0, NULL, in xe_svm_init()
762 * xe_svm_close() - SVM close
769 xe_assert(vm->xe, xe_vm_is_closed(vm)); in xe_svm_close()
770 flush_work(&vm->svm.garbage_collector.work); in xe_svm_close()
774 * xe_svm_fini() - SVM finalize
781 xe_assert(vm->xe, xe_vm_is_closed(vm)); in xe_svm_fini()
783 drm_gpusvm_fini(&vm->svm.gpusvm); in xe_svm_fini()
787 struct xe_tile *tile, in xe_svm_range_is_valid() argument
790 return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present, in xe_svm_range_is_valid()
791 range->tile_invalidated) && in xe_svm_range_is_valid()
795 /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
805 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base); in xe_svm_range_migrate_to_smem()
809 * xe_svm_range_validate() - Check if the SVM range is valid
828 ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask && in xe_svm_range_validate()
829 (devmem_preferred == range->base.pages.flags.has_devmem_pages); in xe_svm_range_validate()
837 * xe_svm_find_vma_start - Find start of CPU VMA
853 return drm_gpusvm_find_vma_start(&vm->svm.gpusvm, in xe_svm_find_vma_start()
865 struct xe_device *xe = vr->xe; in xe_drm_pagemap_populate_mm()
866 struct device *dev = xe->drm.dev; in xe_drm_pagemap_populate_mm()
874 if (!drm_dev_enter(&xe->drm, &idx)) in xe_drm_pagemap_populate_mm()
875 return -ENODEV; in xe_drm_pagemap_populate_mm()
879 xe_validation_guard(&vctx, &xe->val, &exec, (struct xe_val_flags) {}, err) { in xe_drm_pagemap_populate_mm()
880 bo = xe_bo_create_locked(xe, NULL, NULL, end - start, in xe_drm_pagemap_populate_mm()
891 drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm, in xe_drm_pagemap_populate_mm()
892 &dpagemap_devmem_ops, dpagemap, end - start); in xe_drm_pagemap_populate_mm()
894 blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks; in xe_drm_pagemap_populate_mm()
896 block->private = vr; in xe_drm_pagemap_populate_mm()
902 err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm, in xe_drm_pagemap_populate_mm()
906 xe_svm_devmem_release(&bo->devmem_allocation); in xe_drm_pagemap_populate_mm()
919 if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) in supports_4K_migration()
926 * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
936 struct xe_vm *vm = range_to_vm(&range->base); in xe_svm_range_needs_migrate_to_vram()
939 if (!range->base.pages.flags.migrate_devmem || !preferred_region_is_vram) in xe_svm_range_needs_migrate_to_vram()
942 xe_assert(vm->xe, IS_DGFX(vm->xe)); in xe_svm_range_needs_migrate_to_vram()
945 drm_info(&vm->xe->drm, "Range is already in VRAM\n"); in xe_svm_range_needs_migrate_to_vram()
949 if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) { in xe_svm_range_needs_migrate_to_vram()
950 drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n"); in xe_svm_range_needs_migrate_to_vram()
1010 int devmem_possible = IS_DGFX(vm->xe) &&
1018 vm->xe->atomic_svm_timeslice_ms : 0,
1019 .device_private_page_owner = xe_svm_devm_owner(vm->xe),
1026 struct xe_tile *tile = gt_to_tile(gt); local
1031 lockdep_assert_held_write(&vm->lock);
1032 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
1042 dpagemap = xe_vma_resolve_pagemap(vma, tile);
1052 if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) {
1053 err = -EACCES;
1057 if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) {
1059 range_debug(range, "PAGE FAULT - VALID");
1065 if (--migrate_try_count >= 0 &&
1069 /* TODO : For multi-device dpagemap will be used to find the
1070 * remote tile and remote device. Will need to modify
1071 * xe_svm_alloc_vram to use dpagemap for future multi-device
1075 err = xe_svm_alloc_vram(tile, range, &ctx);
1080 drm_dbg(&vm->xe->drm,
1082 vm->usm.asid, ERR_PTR(err));
1085 * In the devmem-only case, mixed mappings may
1095 drm_err(&vm->xe->drm,
1097 vm->usm.asid, ERR_PTR(err));
1109 if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
1112 drm_dbg(&vm->xe->drm,
1114 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1115 range_debug(range, "PAGE FAULT - RETRY PAGES");
1118 drm_err(&vm->xe->drm,
1120 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1124 range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
1129 range_debug(range, "PAGE FAULT - BIND");
1132 xe_validation_guard(&vctx, &vm->xe->val, &exec, (struct xe_val_flags) {}, err) {
1137 fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
1159 if (err == -EAGAIN) {
1161 range_debug(range, "PAGE FAULT - RETRY BIND");
1169 * xe_svm_handle_pagefault() - SVM handle page fault
1187 need_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic); in xe_svm_handle_pagefault()
1193 if (ret == -EAGAIN) { in xe_svm_handle_pagefault()
1195 * Retry once on -EAGAIN to re-lookup the VMA, as the original VMA in xe_svm_handle_pagefault()
1200 return -EINVAL; in xe_svm_handle_pagefault()
1208 * xe_svm_has_mapping() - SVM has mappings
1219 return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end); in xe_svm_has_mapping()
1223 * xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
1234 lockdep_assert_held_write(&vm->lock); in xe_svm_unmap_address_range()
1236 drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) { in xe_svm_unmap_address_range()
1242 if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range))) in xe_svm_unmap_address_range()
1243 drm_gpusvm_range_evict(&vm->svm.gpusvm, range); in xe_svm_unmap_address_range()
1246 if (!list_empty(&to_xe_range(range)->garbage_collector_link)) { in xe_svm_unmap_address_range()
1247 spin_lock(&vm->svm.garbage_collector.lock); in xe_svm_unmap_address_range()
1248 list_del(&to_xe_range(range)->garbage_collector_link); in xe_svm_unmap_address_range()
1249 spin_unlock(&vm->svm.garbage_collector.lock); in xe_svm_unmap_address_range()
1258 * xe_svm_bo_evict() - SVM evict BO to system memory
1268 return drm_pagemap_evict_to_ram(&bo->devmem_allocation); in xe_svm_bo_evict()
1272 * xe_svm_range_find_or_insert- Find or insert GPU SVM range
1286 struct drm_gpusvm_range *r; in xe_svm_range_find_or_insert() local
1288 r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)), in xe_svm_range_find_or_insert()
1290 if (IS_ERR(r)) in xe_svm_range_find_or_insert()
1291 return ERR_CAST(r); in xe_svm_range_find_or_insert()
1293 return to_xe_range(r); in xe_svm_range_find_or_insert()
1297 * xe_svm_range_get_pages() - Get pages for a SVM range
1303 * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
1312 err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx); in xe_svm_range_get_pages()
1313 if (err == -EOPNOTSUPP) { in xe_svm_range_get_pages()
1314 range_debug(range, "PAGE FAULT - EVICT PAGES"); in xe_svm_range_get_pages()
1315 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base); in xe_svm_range_get_pages()
1322 * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
1337 struct xe_tile *tile; in xe_svm_ranges_zap_ptes_in_range() local
1341 lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) && in xe_svm_ranges_zap_ptes_in_range()
1342 lockdep_is_held_type(&vm->lock, 0)); in xe_svm_ranges_zap_ptes_in_range()
1344 drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) { in xe_svm_ranges_zap_ptes_in_range()
1345 struct drm_gpusvm_range *r = NULL; in xe_svm_ranges_zap_ptes_in_range() local
1349 drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) { in xe_svm_ranges_zap_ptes_in_range()
1350 range = to_xe_range(r); in xe_svm_ranges_zap_ptes_in_range()
1351 for_each_tile(tile, vm->xe, id) { in xe_svm_ranges_zap_ptes_in_range()
1352 if (xe_pt_zap_ptes_range(tile, vm, range)) { in xe_svm_ranges_zap_ptes_in_range()
1361 WRITE_ONCE(range->tile_invalidated, in xe_svm_ranges_zap_ptes_in_range()
1362 range->tile_invalidated | BIT(id)); in xe_svm_ranges_zap_ptes_in_range()
1373 static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile) in tile_local_pagemap() argument
1375 return &tile->mem.vram->dpagemap; in tile_local_pagemap()
1379 * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
1381 * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
1388 * GT identifier, it returns the VRAM pagemap associated with the tile.
1390 * Future support for multi-device configurations may use drm_pagemap_from_fd()
1395 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile) in xe_vma_resolve_pagemap() argument
1397 s32 fd = (s32)vma->attr.preferred_loc.devmem_fd; in xe_vma_resolve_pagemap()
1403 return IS_DGFX(tile_to_xe(tile)) ? tile_local_pagemap(tile) : NULL; in xe_vma_resolve_pagemap()
1405 /* TODO: Support multi-device with drm_pagemap_from_fd(fd) */ in xe_vma_resolve_pagemap()
1410 * xe_svm_alloc_vram()- Allocate device memory pages for range,
1412 * @tile: tile to allocate vram from
1418 int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range, in xe_svm_alloc_vram() argument
1423 xe_assert(tile_to_xe(tile), range->base.pages.flags.migrate_devmem); in xe_svm_alloc_vram()
1426 dpagemap = tile_local_pagemap(tile); in xe_svm_alloc_vram()
1429 range->base.gpusvm->mm, in xe_svm_alloc_vram()
1430 ctx->timeslice_ms); in xe_svm_alloc_vram()
1440 struct device *pgmap_dev = dpagemap->dev; in xe_drm_pagemap_device_map()
1462 * @tile: tile that the memory region belongs to
1470 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr) in xe_devm_add() argument
1472 struct xe_device *xe = tile_to_xe(tile); in xe_devm_add()
1473 struct device *dev = &to_pci_dev(xe->drm.dev)->dev; in xe_devm_add()
1479 vr->usable_size); in xe_devm_add()
1485 vr->pagemap.type = MEMORY_DEVICE_PRIVATE; in xe_devm_add()
1486 vr->pagemap.range.start = res->start; in xe_devm_add()
1487 vr->pagemap.range.end = res->end; in xe_devm_add()
1488 vr->pagemap.nr_range = 1; in xe_devm_add()
1489 vr->pagemap.ops = drm_pagemap_pagemap_ops_get(); in xe_devm_add()
1490 vr->pagemap.owner = xe_svm_devm_owner(xe); in xe_devm_add()
1491 addr = devm_memremap_pages(dev, &vr->pagemap); in xe_devm_add()
1493 vr->dpagemap.dev = dev; in xe_devm_add()
1494 vr->dpagemap.ops = &xe_drm_pagemap_ops; in xe_devm_add()
1497 devm_release_mem_region(dev, res->start, resource_size(res)); in xe_devm_add()
1499 drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n", in xe_devm_add()
1500 tile->id, ERR_PTR(ret)); in xe_devm_add()
1503 vr->hpa_base = res->start; in xe_devm_add()
1505 drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n", in xe_devm_add()
1506 tile->id, vr->io_start, vr->io_start + vr->usable_size, res); in xe_devm_add()
1510 int xe_svm_alloc_vram(struct xe_tile *tile, in xe_svm_alloc_vram() argument
1514 return -EOPNOTSUPP; in xe_svm_alloc_vram()
1517 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr) in xe_devm_add() argument
1522 struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile) in xe_vma_resolve_pagemap() argument
1529 * xe_svm_flush() - SVM flush
1537 flush_work(&vm->svm.garbage_collector.work); in xe_svm_flush()