Lines Matching refs:svmm

67 			struct nouveau_svmm *svmm;
88 struct nouveau_svmm *svmm;
169 if (!cli->svm.svmm) {
186 nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr,
206 nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
209 if (svmm) {
210 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
211 ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst);
216 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
222 nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
225 if (svmm) {
228 ivmm->svmm = svmm;
231 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
232 list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst);
233 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
240 nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
243 nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
255 struct nouveau_svmm *svmm =
263 SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
265 mutex_lock(&svmm->mutex);
266 if (unlikely(!svmm->vmm))
274 update->owner == svmm->vmm->cli->drm->dev)
277 if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
278 if (start < svmm->unmanaged.start) {
279 nouveau_svmm_invalidate(svmm, start,
280 svmm->unmanaged.limit);
282 start = svmm->unmanaged.limit;
285 nouveau_svmm_invalidate(svmm, start, limit);
288 mutex_unlock(&svmm->mutex);
305 struct nouveau_svmm *svmm = *psvmm;
306 if (svmm) {
307 mutex_lock(&svmm->mutex);
308 svmm->vmm = NULL;
309 mutex_unlock(&svmm->mutex);
310 mmu_notifier_put(&svmm->notifier);
320 struct nouveau_svmm *svmm;
329 if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
331 svmm->vmm = &cli->svm;
332 svmm->unmanaged.start = args->unmanaged_addr;
333 svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size;
334 mutex_init(&svmm->mutex);
359 svmm->notifier.ops = &nouveau_mn_ops;
360 ret = __mmu_notifier_register(&svmm->notifier, current->mm);
363 /* Note, ownership of svmm transfers to mmu_notifier */
365 cli->svm.svmm = svmm;
375 kfree(svmm);
503 struct nouveau_svmm *svmm;
514 range->owner == sn->svmm->vmm->cli->drm->dev)
525 mutex_lock(&sn->svmm->mutex);
526 else if (!mutex_trylock(&sn->svmm->mutex))
529 mutex_unlock(&sn->svmm->mutex);
585 static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
592 struct mm_struct *mm = svmm->notifier.mm;
621 mutex_lock(&svmm->mutex);
625 mutex_unlock(&svmm->mutex);
641 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
642 mutex_unlock(&svmm->mutex);
652 static int nouveau_range_fault(struct nouveau_svmm *svmm,
668 struct mm_struct *mm = svmm->notifier.mm;
696 mutex_lock(&svmm->mutex);
699 mutex_unlock(&svmm->mutex);
707 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
708 mutex_unlock(&svmm->mutex);
722 struct nouveau_svmm *svmm;
759 for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
760 if (!svmm || buffer->fault[fi]->inst != inst) {
763 svmm = ivmm ? ivmm->svmm : NULL;
765 SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm);
767 buffer->fault[fi]->svmm = svmm;
783 if (!(svmm = buffer->fault[fi]->svmm)) {
787 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
794 if (start < svmm->unmanaged.limit)
795 limit = min_t(u64, limit, svmm->unmanaged.start);
824 mm = svmm->notifier.mm;
830 notifier.svmm = svmm;
832 ret = nouveau_atomic_range_fault(svmm, svm->drm, args,
836 ret = nouveau_range_fault(svmm, svm->drm, args,
852 if (buffer->fault[fn]->svmm != svmm ||
923 nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
931 mutex_lock(&svmm->mutex);
933 nvif_object_ioctl(&svmm->vmm->vmm.object, args,
936 mutex_unlock(&svmm->mutex);