Lines Matching defs:gpusvm
43 static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
45 return container_of(gpusvm, struct xe_vm, svm.gpusvm);
50 return gpusvm_to_vm(r->gpusvm);
55 "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
58 (r__)->base.gpusvm, \
71 xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
80 xe_vm_get(gpusvm_to_vm(gpusvm));
173 drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
197 static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
201 struct xe_vm *vm = gpusvm_to_vm(gpusvm);
212 vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
213 "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
214 vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
283 drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
745 err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
750 drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
752 err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)",
782 drm_gpusvm_fini(&vm->svm.gpusvm);
804 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
852 return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
1112 "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
1113 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1118 "Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
1119 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
1218 return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
1235 drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
1242 drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
1287 r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
1311 err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
1314 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
1340 lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
1343 drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
1428 range->base.gpusvm->mm,