Lines Matching +full:reg +full:- +full:addr

1 // SPDX-License-Identifier: MIT
68 u64 addr; member
78 struct nouveau_uvma_region *reg; member
85 u64 addr; member
92 u64 addr, u64 range) in nouveau_uvmm_vmm_sparse_ref() argument
94 struct nvif_vmm *vmm = &uvmm->vmm.vmm; in nouveau_uvmm_vmm_sparse_ref()
96 return nvif_vmm_raw_sparse(vmm, addr, range, true); in nouveau_uvmm_vmm_sparse_ref()
101 u64 addr, u64 range) in nouveau_uvmm_vmm_sparse_unref() argument
103 struct nvif_vmm *vmm = &uvmm->vmm.vmm; in nouveau_uvmm_vmm_sparse_unref()
105 return nvif_vmm_raw_sparse(vmm, addr, range, false); in nouveau_uvmm_vmm_sparse_unref()
110 u64 addr, u64 range) in nouveau_uvmm_vmm_get() argument
112 struct nvif_vmm *vmm = &uvmm->vmm.vmm; in nouveau_uvmm_vmm_get()
114 return nvif_vmm_raw_get(vmm, addr, range, PAGE_SHIFT); in nouveau_uvmm_vmm_get()
119 u64 addr, u64 range) in nouveau_uvmm_vmm_put() argument
121 struct nvif_vmm *vmm = &uvmm->vmm.vmm; in nouveau_uvmm_vmm_put()
123 return nvif_vmm_raw_put(vmm, addr, range, PAGE_SHIFT); in nouveau_uvmm_vmm_put()
128 u64 addr, u64 range, bool sparse) in nouveau_uvmm_vmm_unmap() argument
130 struct nvif_vmm *vmm = &uvmm->vmm.vmm; in nouveau_uvmm_vmm_unmap()
132 return nvif_vmm_raw_unmap(vmm, addr, range, PAGE_SHIFT, sparse); in nouveau_uvmm_vmm_unmap()
137 u64 addr, u64 range, in nouveau_uvmm_vmm_map() argument
141 struct nvif_vmm *vmm = &uvmm->vmm.vmm; in nouveau_uvmm_vmm_map()
147 switch (vmm->object.oclass) { in nouveau_uvmm_vmm_map()
152 if (mem->mem.type & NVIF_MEM_VRAM) in nouveau_uvmm_vmm_map()
163 return -ENOSYS; in nouveau_uvmm_vmm_map()
166 return nvif_vmm_raw_map(vmm, addr, range, PAGE_SHIFT, in nouveau_uvmm_vmm_map()
168 &mem->mem, bo_offset); in nouveau_uvmm_vmm_map()
172 nouveau_uvma_region_sparse_unref(struct nouveau_uvma_region *reg) in nouveau_uvma_region_sparse_unref() argument
174 u64 addr = reg->va.addr; in nouveau_uvma_region_sparse_unref() local
175 u64 range = reg->va.range; in nouveau_uvma_region_sparse_unref()
177 return nouveau_uvmm_vmm_sparse_unref(reg->uvmm, addr, range); in nouveau_uvma_region_sparse_unref()
183 u64 addr = uvma->va.va.addr; in nouveau_uvma_vmm_put() local
184 u64 range = uvma->va.va.range; in nouveau_uvma_vmm_put()
186 return nouveau_uvmm_vmm_put(to_uvmm(uvma), addr, range); in nouveau_uvma_vmm_put()
193 u64 addr = uvma->va.va.addr; in nouveau_uvma_map() local
194 u64 offset = uvma->va.gem.offset; in nouveau_uvma_map()
195 u64 range = uvma->va.va.range; in nouveau_uvma_map()
197 return nouveau_uvmm_vmm_map(to_uvmm(uvma), addr, range, in nouveau_uvma_map()
198 offset, uvma->kind, mem); in nouveau_uvma_map()
204 u64 addr = uvma->va.va.addr; in nouveau_uvma_unmap() local
205 u64 range = uvma->va.va.range; in nouveau_uvma_unmap()
206 bool sparse = !!uvma->region; in nouveau_uvma_unmap()
208 if (drm_gpuva_invalidated(&uvma->va)) in nouveau_uvma_unmap()
211 return nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse); in nouveau_uvma_unmap()
219 return -ENOMEM; in nouveau_uvma_alloc()
233 drm_gem_object_get(uvma->va.gem.obj); in nouveau_uvma_gem_get()
239 drm_gem_object_put(uvma->va.gem.obj); in nouveau_uvma_gem_put()
247 return -ENOMEM; in nouveau_uvma_region_alloc()
249 kref_init(&(*preg)->kref); in nouveau_uvma_region_alloc()
257 struct nouveau_uvma_region *reg = in nouveau_uvma_region_free() local
260 kfree(reg); in nouveau_uvma_region_free()
264 nouveau_uvma_region_get(struct nouveau_uvma_region *reg) in nouveau_uvma_region_get() argument
266 kref_get(&reg->kref); in nouveau_uvma_region_get()
270 nouveau_uvma_region_put(struct nouveau_uvma_region *reg) in nouveau_uvma_region_put() argument
272 kref_put(&reg->kref, nouveau_uvma_region_free); in nouveau_uvma_region_put()
277 struct nouveau_uvma_region *reg) in __nouveau_uvma_region_insert() argument
279 u64 addr = reg->va.addr; in __nouveau_uvma_region_insert() local
280 u64 range = reg->va.range; in __nouveau_uvma_region_insert()
281 u64 last = addr + range - 1; in __nouveau_uvma_region_insert()
282 MA_STATE(mas, &uvmm->region_mt, addr, addr); in __nouveau_uvma_region_insert()
285 return -EEXIST; in __nouveau_uvma_region_insert()
288 return -EEXIST; in __nouveau_uvma_region_insert()
290 mas.index = addr; in __nouveau_uvma_region_insert()
293 mas_store_gfp(&mas, reg, GFP_KERNEL); in __nouveau_uvma_region_insert()
295 reg->uvmm = uvmm; in __nouveau_uvma_region_insert()
302 struct nouveau_uvma_region *reg, in nouveau_uvma_region_insert() argument
303 u64 addr, u64 range) in nouveau_uvma_region_insert() argument
307 reg->uvmm = uvmm; in nouveau_uvma_region_insert()
308 reg->va.addr = addr; in nouveau_uvma_region_insert()
309 reg->va.range = range; in nouveau_uvma_region_insert()
311 ret = __nouveau_uvma_region_insert(uvmm, reg); in nouveau_uvma_region_insert()
319 nouveau_uvma_region_remove(struct nouveau_uvma_region *reg) in nouveau_uvma_region_remove() argument
321 struct nouveau_uvmm *uvmm = reg->uvmm; in nouveau_uvma_region_remove()
322 MA_STATE(mas, &uvmm->region_mt, reg->va.addr, 0); in nouveau_uvma_region_remove()
329 u64 addr, u64 range) in nouveau_uvma_region_create() argument
331 struct nouveau_uvma_region *reg; in nouveau_uvma_region_create() local
334 if (!drm_gpuvm_interval_empty(&uvmm->base, addr, range)) in nouveau_uvma_region_create()
335 return -ENOSPC; in nouveau_uvma_region_create()
337 ret = nouveau_uvma_region_alloc(&reg); in nouveau_uvma_region_create()
341 ret = nouveau_uvma_region_insert(uvmm, reg, addr, range); in nouveau_uvma_region_create()
345 ret = nouveau_uvmm_vmm_sparse_ref(uvmm, addr, range); in nouveau_uvma_region_create()
352 nouveau_uvma_region_remove(reg); in nouveau_uvma_region_create()
354 nouveau_uvma_region_put(reg); in nouveau_uvma_region_create()
360 u64 addr, u64 range) in nouveau_uvma_region_find_first() argument
362 MA_STATE(mas, &uvmm->region_mt, addr, 0); in nouveau_uvma_region_find_first()
364 return mas_find(&mas, addr + range - 1); in nouveau_uvma_region_find_first()
369 u64 addr, u64 range) in nouveau_uvma_region_find() argument
371 struct nouveau_uvma_region *reg; in nouveau_uvma_region_find() local
373 reg = nouveau_uvma_region_find_first(uvmm, addr, range); in nouveau_uvma_region_find()
374 if (!reg) in nouveau_uvma_region_find()
377 if (reg->va.addr != addr || in nouveau_uvma_region_find()
378 reg->va.range != range) in nouveau_uvma_region_find()
381 return reg; in nouveau_uvma_region_find()
385 nouveau_uvma_region_empty(struct nouveau_uvma_region *reg) in nouveau_uvma_region_empty() argument
387 struct nouveau_uvmm *uvmm = reg->uvmm; in nouveau_uvma_region_empty()
389 return drm_gpuvm_interval_empty(&uvmm->base, in nouveau_uvma_region_empty()
390 reg->va.addr, in nouveau_uvma_region_empty()
391 reg->va.range); in nouveau_uvma_region_empty()
395 __nouveau_uvma_region_destroy(struct nouveau_uvma_region *reg) in __nouveau_uvma_region_destroy() argument
397 struct nouveau_uvmm *uvmm = reg->uvmm; in __nouveau_uvma_region_destroy()
398 u64 addr = reg->va.addr; in __nouveau_uvma_region_destroy() local
399 u64 range = reg->va.range; in __nouveau_uvma_region_destroy()
401 if (!nouveau_uvma_region_empty(reg)) in __nouveau_uvma_region_destroy()
402 return -EBUSY; in __nouveau_uvma_region_destroy()
404 nouveau_uvma_region_remove(reg); in __nouveau_uvma_region_destroy()
405 nouveau_uvmm_vmm_sparse_unref(uvmm, addr, range); in __nouveau_uvma_region_destroy()
406 nouveau_uvma_region_put(reg); in __nouveau_uvma_region_destroy()
413 u64 addr, u64 range) in nouveau_uvma_region_destroy() argument
415 struct nouveau_uvma_region *reg; in nouveau_uvma_region_destroy() local
417 reg = nouveau_uvma_region_find(uvmm, addr, range); in nouveau_uvma_region_destroy()
418 if (!reg) in nouveau_uvma_region_destroy()
419 return -ENOENT; in nouveau_uvma_region_destroy()
421 return __nouveau_uvma_region_destroy(reg); in nouveau_uvma_region_destroy()
425 nouveau_uvma_region_dirty(struct nouveau_uvma_region *reg) in nouveau_uvma_region_dirty() argument
428 init_completion(&reg->complete); in nouveau_uvma_region_dirty()
429 reg->dirty = true; in nouveau_uvma_region_dirty()
433 nouveau_uvma_region_complete(struct nouveau_uvma_region *reg) in nouveau_uvma_region_complete() argument
435 complete_all(&reg->complete); in nouveau_uvma_region_complete()
441 struct drm_gpuva *va = &uvma->va; in op_map_prepare_unwind()
450 drm_gpuva_insert(va->vm, va); in op_unmap_prepare_unwind()
461 u64 vmm_get_start = args ? args->addr : 0; in nouveau_uvmm_sm_prepare_unwind()
462 u64 vmm_get_end = args ? args->addr + args->range : 0; in nouveau_uvmm_sm_prepare_unwind()
466 switch (op->op) { in nouveau_uvmm_sm_prepare_unwind()
468 op_map_prepare_unwind(new->map); in nouveau_uvmm_sm_prepare_unwind()
471 struct drm_gpuva_op_remap *r = &op->remap; in nouveau_uvmm_sm_prepare_unwind()
472 struct drm_gpuva *va = r->unmap->va; in nouveau_uvmm_sm_prepare_unwind()
474 if (r->next) in nouveau_uvmm_sm_prepare_unwind()
475 op_map_prepare_unwind(new->next); in nouveau_uvmm_sm_prepare_unwind()
477 if (r->prev) in nouveau_uvmm_sm_prepare_unwind()
478 op_map_prepare_unwind(new->prev); in nouveau_uvmm_sm_prepare_unwind()
484 op_unmap_prepare_unwind(op->unmap.va); in nouveau_uvmm_sm_prepare_unwind()
498 switch (op->op) { in nouveau_uvmm_sm_prepare_unwind()
500 u64 vmm_get_range = vmm_get_end - vmm_get_start; in nouveau_uvmm_sm_prepare_unwind()
508 struct drm_gpuva_op_remap *r = &op->remap; in nouveau_uvmm_sm_prepare_unwind()
509 struct drm_gpuva *va = r->unmap->va; in nouveau_uvmm_sm_prepare_unwind()
510 u64 ustart = va->va.addr; in nouveau_uvmm_sm_prepare_unwind()
511 u64 urange = va->va.range; in nouveau_uvmm_sm_prepare_unwind()
514 if (r->prev) in nouveau_uvmm_sm_prepare_unwind()
517 if (r->next) in nouveau_uvmm_sm_prepare_unwind()
520 if (r->prev && r->next) in nouveau_uvmm_sm_prepare_unwind()
526 struct drm_gpuva_op_unmap *u = &op->unmap; in nouveau_uvmm_sm_prepare_unwind()
527 struct drm_gpuva *va = u->va; in nouveau_uvmm_sm_prepare_unwind()
528 u64 ustart = va->va.addr; in nouveau_uvmm_sm_prepare_unwind()
529 u64 urange = va->va.range; in nouveau_uvmm_sm_prepare_unwind()
538 u64 vmm_get_range = ustart - vmm_get_start; in nouveau_uvmm_sm_prepare_unwind()
559 u64 addr, u64 range) in nouveau_uvmm_sm_map_prepare_unwind() argument
563 .addr = addr, in nouveau_uvmm_sm_map_prepare_unwind()
593 uvma->region = args->region; in op_map_prepare()
594 uvma->kind = args->kind; in op_map_prepare()
596 drm_gpuva_map(&uvmm->base, &uvma->va, op); in op_map_prepare()
621 u64 vmm_get_start = args ? args->addr : 0; in nouveau_uvmm_sm_prepare()
622 u64 vmm_get_end = args ? args->addr + args->range : 0; in nouveau_uvmm_sm_prepare()
626 switch (op->op) { in nouveau_uvmm_sm_prepare()
628 u64 vmm_get_range = vmm_get_end - vmm_get_start; in nouveau_uvmm_sm_prepare()
630 ret = op_map_prepare(uvmm, &new->map, &op->map, args); in nouveau_uvmm_sm_prepare()
638 op_map_prepare_unwind(new->map); in nouveau_uvmm_sm_prepare()
646 struct drm_gpuva_op_remap *r = &op->remap; in nouveau_uvmm_sm_prepare()
647 struct drm_gpuva *va = r->unmap->va; in nouveau_uvmm_sm_prepare()
649 .kind = uvma_from_va(va)->kind, in nouveau_uvmm_sm_prepare()
650 .region = uvma_from_va(va)->region, in nouveau_uvmm_sm_prepare()
652 u64 ustart = va->va.addr; in nouveau_uvmm_sm_prepare()
653 u64 urange = va->va.range; in nouveau_uvmm_sm_prepare()
656 op_unmap_prepare(r->unmap); in nouveau_uvmm_sm_prepare()
658 if (r->prev) { in nouveau_uvmm_sm_prepare()
659 ret = op_map_prepare(uvmm, &new->prev, r->prev, in nouveau_uvmm_sm_prepare()
668 if (r->next) { in nouveau_uvmm_sm_prepare()
669 ret = op_map_prepare(uvmm, &new->next, r->next, in nouveau_uvmm_sm_prepare()
672 if (r->prev) in nouveau_uvmm_sm_prepare()
673 op_map_prepare_unwind(new->prev); in nouveau_uvmm_sm_prepare()
681 if (args && (r->prev && r->next)) in nouveau_uvmm_sm_prepare()
687 struct drm_gpuva_op_unmap *u = &op->unmap; in nouveau_uvmm_sm_prepare()
688 struct drm_gpuva *va = u->va; in nouveau_uvmm_sm_prepare()
689 u64 ustart = va->va.addr; in nouveau_uvmm_sm_prepare()
690 u64 urange = va->va.range; in nouveau_uvmm_sm_prepare()
704 u64 vmm_get_range = ustart - vmm_get_start; in nouveau_uvmm_sm_prepare()
718 ret = -EINVAL; in nouveau_uvmm_sm_prepare()
738 u64 addr, u64 range, u8 kind) in nouveau_uvmm_sm_map_prepare() argument
742 .addr = addr, in nouveau_uvmm_sm_map_prepare()
761 switch (op->op) { in op_gem_obj()
763 return op->map.gem.obj; in op_gem_obj()
769 return op->remap.unmap->va->gem.obj; in op_gem_obj()
771 return op->unmap.va->gem.obj; in op_gem_obj()
781 struct nouveau_bo *nvbo = nouveau_gem_object(uvma->va.gem.obj); in op_map()
783 nouveau_uvma_map(uvma, nouveau_mem(nvbo->bo.resource)); in op_map()
789 struct drm_gpuva *va = u->va; in op_unmap()
793 if (!u->keep) in op_unmap()
799 u64 addr, u64 range) in op_unmap_range() argument
801 struct nouveau_uvma *uvma = uvma_from_va(u->va); in op_unmap_range()
802 bool sparse = !!uvma->region; in op_unmap_range()
804 if (!drm_gpuva_invalidated(u->va)) in op_unmap_range()
805 nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse); in op_unmap_range()
812 struct drm_gpuva_op_unmap *u = r->unmap; in op_remap()
813 struct nouveau_uvma *uvma = uvma_from_va(u->va); in op_remap()
814 u64 addr = uvma->va.va.addr; in op_remap() local
815 u64 end = uvma->va.va.addr + uvma->va.va.range; in op_remap()
817 if (r->prev) in op_remap()
818 addr = r->prev->va.addr + r->prev->va.range; in op_remap()
820 if (r->next) in op_remap()
821 end = r->next->va.addr; in op_remap()
823 op_unmap_range(u, addr, end - addr); in op_remap()
834 switch (op->op) { in nouveau_uvmm_sm()
836 op_map(new->map); in nouveau_uvmm_sm()
839 op_remap(&op->remap, new); in nouveau_uvmm_sm()
842 op_unmap(&op->unmap); in nouveau_uvmm_sm()
876 switch (op->op) { in nouveau_uvmm_sm_cleanup()
880 struct drm_gpuva_op_remap *r = &op->remap; in nouveau_uvmm_sm_cleanup()
881 struct drm_gpuva_op_map *p = r->prev; in nouveau_uvmm_sm_cleanup()
882 struct drm_gpuva_op_map *n = r->next; in nouveau_uvmm_sm_cleanup()
883 struct drm_gpuva *va = r->unmap->va; in nouveau_uvmm_sm_cleanup()
887 u64 addr = va->va.addr; in nouveau_uvmm_sm_cleanup() local
888 u64 end = addr + va->va.range; in nouveau_uvmm_sm_cleanup()
891 addr = p->va.addr + p->va.range; in nouveau_uvmm_sm_cleanup()
894 end = n->va.addr; in nouveau_uvmm_sm_cleanup()
896 nouveau_uvmm_vmm_put(uvmm, addr, end - addr); in nouveau_uvmm_sm_cleanup()
904 struct drm_gpuva_op_unmap *u = &op->unmap; in nouveau_uvmm_sm_cleanup()
905 struct drm_gpuva *va = u->va; in nouveau_uvmm_sm_cleanup()
938 nouveau_uvmm_validate_range(struct nouveau_uvmm *uvmm, u64 addr, u64 range) in nouveau_uvmm_validate_range() argument
940 if (addr & ~PAGE_MASK) in nouveau_uvmm_validate_range()
941 return -EINVAL; in nouveau_uvmm_validate_range()
944 return -EINVAL; in nouveau_uvmm_validate_range()
946 if (!drm_gpuvm_range_valid(&uvmm->base, addr, range)) in nouveau_uvmm_validate_range()
947 return -EINVAL; in nouveau_uvmm_validate_range()
957 return -ENOMEM; in nouveau_uvmm_bind_job_alloc()
959 kref_init(&(*pjob)->kref); in nouveau_uvmm_bind_job_alloc()
971 list_for_each_op_safe(op, next, &job->ops) { in nouveau_uvmm_bind_job_free()
972 list_del(&op->entry); in nouveau_uvmm_bind_job_free()
976 nouveau_job_free(&job->base); in nouveau_uvmm_bind_job_free()
983 kref_get(&job->kref); in nouveau_uvmm_bind_job_get()
989 kref_put(&job->kref, nouveau_uvmm_bind_job_free); in nouveau_uvmm_bind_job_put()
996 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli); in bind_validate_op()
997 struct drm_gem_object *obj = op->gem.obj; in bind_validate_op()
999 if (op->op == OP_MAP) { in bind_validate_op()
1000 if (op->gem.offset & ~PAGE_MASK) in bind_validate_op()
1001 return -EINVAL; in bind_validate_op()
1003 if (obj->size <= op->gem.offset) in bind_validate_op()
1004 return -EINVAL; in bind_validate_op()
1006 if (op->va.range > (obj->size - op->gem.offset)) in bind_validate_op()
1007 return -EINVAL; in bind_validate_op()
1010 return nouveau_uvmm_validate_range(uvmm, op->va.addr, op->va.range); in bind_validate_op()
1014 bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range) in bind_validate_map_sparse() argument
1016 struct nouveau_sched *sched = job->sched; in bind_validate_map_sparse()
1019 u64 end = addr + range; in bind_validate_map_sparse()
1022 spin_lock(&sched->job.list.lock); in bind_validate_map_sparse()
1023 list_for_each_entry(__job, &sched->job.list.head, entry) { in bind_validate_map_sparse()
1026 list_for_each_op(op, &bind_job->ops) { in bind_validate_map_sparse()
1027 if (op->op == OP_UNMAP) { in bind_validate_map_sparse()
1028 u64 op_addr = op->va.addr; in bind_validate_map_sparse()
1029 u64 op_end = op_addr + op->va.range; in bind_validate_map_sparse()
1031 if (!(end <= op_addr || addr >= op_end)) { in bind_validate_map_sparse()
1033 spin_unlock(&sched->job.list.lock); in bind_validate_map_sparse()
1034 wait_for_completion(&bind_job->complete); in bind_validate_map_sparse()
1041 spin_unlock(&sched->job.list.lock); in bind_validate_map_sparse()
1045 bind_validate_map_common(struct nouveau_job *job, u64 addr, u64 range, in bind_validate_map_common() argument
1048 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli); in bind_validate_map_common()
1049 struct nouveau_uvma_region *reg; in bind_validate_map_common() local
1051 u64 end = addr + range; in bind_validate_map_common()
1055 reg = nouveau_uvma_region_find_first(uvmm, addr, range); in bind_validate_map_common()
1056 if (!reg) { in bind_validate_map_common()
1064 if (reg->dirty) { in bind_validate_map_common()
1065 nouveau_uvma_region_get(reg); in bind_validate_map_common()
1067 wait_for_completion(&reg->complete); in bind_validate_map_common()
1068 nouveau_uvma_region_put(reg); in bind_validate_map_common()
1074 return -ENOSPC; in bind_validate_map_common()
1076 reg_addr = reg->va.addr; in bind_validate_map_common()
1077 reg_end = reg_addr + reg->va.range; in bind_validate_map_common()
1082 if (reg_addr > addr || reg_end < end) in bind_validate_map_common()
1083 return -ENOSPC; in bind_validate_map_common()
1095 list_for_each_op(op, &bind_job->ops) { in bind_validate_region()
1096 u64 op_addr = op->va.addr; in bind_validate_region()
1097 u64 op_range = op->va.range; in bind_validate_region()
1100 switch (op->op) { in bind_validate_region()
1122 struct nouveau_uvma_prealloc *new = &bop->new; in bind_link_gpuvas()
1123 struct drm_gpuvm_bo *vm_bo = bop->vm_bo; in bind_link_gpuvas()
1124 struct drm_gpuva_ops *ops = bop->ops; in bind_link_gpuvas()
1128 switch (op->op) { in bind_link_gpuvas()
1130 drm_gpuva_link(&new->map->va, vm_bo); in bind_link_gpuvas()
1133 struct drm_gpuva *va = op->remap.unmap->va; in bind_link_gpuvas()
1135 if (op->remap.prev) in bind_link_gpuvas()
1136 drm_gpuva_link(&new->prev->va, va->vm_bo); in bind_link_gpuvas()
1137 if (op->remap.next) in bind_link_gpuvas()
1138 drm_gpuva_link(&new->next->va, va->vm_bo); in bind_link_gpuvas()
1143 drm_gpuva_unlink(op->unmap.va); in bind_link_gpuvas()
1159 list_for_each_op(op, &bind_job->ops) { in bind_lock_validate()
1162 if (!op->ops) in bind_lock_validate()
1165 drm_gpuva_for_each_op(va_op, op->ops) { in bind_lock_validate()
1178 if (va_op->op == DRM_GPUVA_OP_UNMAP) in bind_lock_validate()
1195 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli); in nouveau_uvmm_bind_job_submit()
1197 struct drm_exec *exec = &vme->exec; in nouveau_uvmm_bind_job_submit()
1201 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1202 if (op->op == OP_MAP) { in nouveau_uvmm_bind_job_submit()
1203 struct drm_gem_object *obj = op->gem.obj = in nouveau_uvmm_bind_job_submit()
1204 drm_gem_object_lookup(job->file_priv, in nouveau_uvmm_bind_job_submit()
1205 op->gem.handle); in nouveau_uvmm_bind_job_submit()
1207 return -ENOENT; in nouveau_uvmm_bind_job_submit()
1209 dma_resv_lock(obj->resv, NULL); in nouveau_uvmm_bind_job_submit()
1210 op->vm_bo = drm_gpuvm_bo_obtain(&uvmm->base, obj); in nouveau_uvmm_bind_job_submit()
1211 dma_resv_unlock(obj->resv); in nouveau_uvmm_bind_job_submit()
1212 if (IS_ERR(op->vm_bo)) in nouveau_uvmm_bind_job_submit()
1213 return PTR_ERR(op->vm_bo); in nouveau_uvmm_bind_job_submit()
1215 drm_gpuvm_bo_extobj_add(op->vm_bo); in nouveau_uvmm_bind_job_submit()
1239 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1240 switch (op->op) { in nouveau_uvmm_bind_job_submit()
1243 op->va.addr, in nouveau_uvmm_bind_job_submit()
1244 op->va.range); in nouveau_uvmm_bind_job_submit()
1250 op->reg = nouveau_uvma_region_find(uvmm, op->va.addr, in nouveau_uvmm_bind_job_submit()
1251 op->va.range); in nouveau_uvmm_bind_job_submit()
1252 if (!op->reg || op->reg->dirty) { in nouveau_uvmm_bind_job_submit()
1253 ret = -ENOENT; in nouveau_uvmm_bind_job_submit()
1257 op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base, in nouveau_uvmm_bind_job_submit()
1258 op->va.addr, in nouveau_uvmm_bind_job_submit()
1259 op->va.range); in nouveau_uvmm_bind_job_submit()
1260 if (IS_ERR(op->ops)) { in nouveau_uvmm_bind_job_submit()
1261 ret = PTR_ERR(op->ops); in nouveau_uvmm_bind_job_submit()
1265 ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1266 op->ops); in nouveau_uvmm_bind_job_submit()
1268 drm_gpuva_ops_free(&uvmm->base, op->ops); in nouveau_uvmm_bind_job_submit()
1269 op->ops = NULL; in nouveau_uvmm_bind_job_submit()
1270 op->reg = NULL; in nouveau_uvmm_bind_job_submit()
1274 nouveau_uvma_region_dirty(op->reg); in nouveau_uvmm_bind_job_submit()
1278 struct nouveau_uvma_region *reg; in nouveau_uvmm_bind_job_submit() local
1280 reg = nouveau_uvma_region_find_first(uvmm, in nouveau_uvmm_bind_job_submit()
1281 op->va.addr, in nouveau_uvmm_bind_job_submit()
1282 op->va.range); in nouveau_uvmm_bind_job_submit()
1283 if (reg) { in nouveau_uvmm_bind_job_submit()
1284 u64 reg_addr = reg->va.addr; in nouveau_uvmm_bind_job_submit()
1285 u64 reg_end = reg_addr + reg->va.range; in nouveau_uvmm_bind_job_submit()
1286 u64 op_addr = op->va.addr; in nouveau_uvmm_bind_job_submit()
1287 u64 op_end = op_addr + op->va.range; in nouveau_uvmm_bind_job_submit()
1289 if (unlikely(reg->dirty)) { in nouveau_uvmm_bind_job_submit()
1290 ret = -EINVAL; in nouveau_uvmm_bind_job_submit()
1298 ret = -ENOSPC; in nouveau_uvmm_bind_job_submit()
1303 op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base, in nouveau_uvmm_bind_job_submit()
1304 op->va.addr, in nouveau_uvmm_bind_job_submit()
1305 op->va.range, in nouveau_uvmm_bind_job_submit()
1306 op->gem.obj, in nouveau_uvmm_bind_job_submit()
1307 op->gem.offset); in nouveau_uvmm_bind_job_submit()
1308 if (IS_ERR(op->ops)) { in nouveau_uvmm_bind_job_submit()
1309 ret = PTR_ERR(op->ops); in nouveau_uvmm_bind_job_submit()
1313 ret = nouveau_uvmm_sm_map_prepare(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1314 reg, op->ops, in nouveau_uvmm_bind_job_submit()
1315 op->va.addr, in nouveau_uvmm_bind_job_submit()
1316 op->va.range, in nouveau_uvmm_bind_job_submit()
1317 op->flags & 0xff); in nouveau_uvmm_bind_job_submit()
1319 drm_gpuva_ops_free(&uvmm->base, op->ops); in nouveau_uvmm_bind_job_submit()
1320 op->ops = NULL; in nouveau_uvmm_bind_job_submit()
1327 op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base, in nouveau_uvmm_bind_job_submit()
1328 op->va.addr, in nouveau_uvmm_bind_job_submit()
1329 op->va.range); in nouveau_uvmm_bind_job_submit()
1330 if (IS_ERR(op->ops)) { in nouveau_uvmm_bind_job_submit()
1331 ret = PTR_ERR(op->ops); in nouveau_uvmm_bind_job_submit()
1335 ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1336 op->ops); in nouveau_uvmm_bind_job_submit()
1338 drm_gpuva_ops_free(&uvmm->base, op->ops); in nouveau_uvmm_bind_job_submit()
1339 op->ops = NULL; in nouveau_uvmm_bind_job_submit()
1345 ret = -EINVAL; in nouveau_uvmm_bind_job_submit()
1350 drm_exec_init(exec, vme->flags, 0); in nouveau_uvmm_bind_job_submit()
1352 ret = bind_lock_validate(job, exec, vme->num_fences); in nouveau_uvmm_bind_job_submit()
1355 op = list_last_op(&bind_job->ops); in nouveau_uvmm_bind_job_submit()
1380 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1381 switch (op->op) { in nouveau_uvmm_bind_job_submit()
1398 list_for_each_op_from_reverse(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1399 switch (op->op) { in nouveau_uvmm_bind_job_submit()
1401 nouveau_uvma_region_destroy(uvmm, op->va.addr, in nouveau_uvmm_bind_job_submit()
1402 op->va.range); in nouveau_uvmm_bind_job_submit()
1405 __nouveau_uvma_region_insert(uvmm, op->reg); in nouveau_uvmm_bind_job_submit()
1406 nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1407 op->ops); in nouveau_uvmm_bind_job_submit()
1410 nouveau_uvmm_sm_map_prepare_unwind(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1411 op->ops, in nouveau_uvmm_bind_job_submit()
1412 op->va.addr, in nouveau_uvmm_bind_job_submit()
1413 op->va.range); in nouveau_uvmm_bind_job_submit()
1416 nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1417 op->ops); in nouveau_uvmm_bind_job_submit()
1421 drm_gpuva_ops_free(&uvmm->base, op->ops); in nouveau_uvmm_bind_job_submit()
1422 op->ops = NULL; in nouveau_uvmm_bind_job_submit()
1423 op->reg = NULL; in nouveau_uvmm_bind_job_submit()
1435 drm_gpuvm_exec_resv_add_fence(vme, job->done_fence, in nouveau_uvmm_bind_job_armed_submit()
1436 job->resv_usage, job->resv_usage); in nouveau_uvmm_bind_job_armed_submit()
1444 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli); in nouveau_uvmm_bind_job_run()
1448 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_run()
1449 switch (op->op) { in nouveau_uvmm_bind_job_run()
1454 ret = nouveau_uvmm_sm_map(uvmm, &op->new, op->ops); in nouveau_uvmm_bind_job_run()
1461 ret = nouveau_uvmm_sm_unmap(uvmm, &op->new, op->ops); in nouveau_uvmm_bind_job_run()
1470 NV_PRINTK(err, job->cli, "bind job failed: %d\n", ret); in nouveau_uvmm_bind_job_run()
1478 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli); in nouveau_uvmm_bind_job_cleanup()
1481 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_cleanup()
1482 struct drm_gem_object *obj = op->gem.obj; in nouveau_uvmm_bind_job_cleanup()
1484 /* When nouveau_uvmm_bind_job_submit() fails op->ops and op->reg in nouveau_uvmm_bind_job_cleanup()
1487 switch (op->op) { in nouveau_uvmm_bind_job_cleanup()
1492 if (!IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_cleanup()
1493 nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new, in nouveau_uvmm_bind_job_cleanup()
1494 op->ops); in nouveau_uvmm_bind_job_cleanup()
1496 if (op->reg) { in nouveau_uvmm_bind_job_cleanup()
1497 nouveau_uvma_region_sparse_unref(op->reg); in nouveau_uvmm_bind_job_cleanup()
1499 nouveau_uvma_region_remove(op->reg); in nouveau_uvmm_bind_job_cleanup()
1501 nouveau_uvma_region_complete(op->reg); in nouveau_uvmm_bind_job_cleanup()
1502 nouveau_uvma_region_put(op->reg); in nouveau_uvmm_bind_job_cleanup()
1507 if (!IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_cleanup()
1508 nouveau_uvmm_sm_map_cleanup(uvmm, &op->new, in nouveau_uvmm_bind_job_cleanup()
1509 op->ops); in nouveau_uvmm_bind_job_cleanup()
1512 if (!IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_cleanup()
1513 nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new, in nouveau_uvmm_bind_job_cleanup()
1514 op->ops); in nouveau_uvmm_bind_job_cleanup()
1518 if (!IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_cleanup()
1519 drm_gpuva_ops_free(&uvmm->base, op->ops); in nouveau_uvmm_bind_job_cleanup()
1521 if (!IS_ERR_OR_NULL(op->vm_bo)) { in nouveau_uvmm_bind_job_cleanup()
1522 dma_resv_lock(obj->resv, NULL); in nouveau_uvmm_bind_job_cleanup()
1523 drm_gpuvm_bo_put(op->vm_bo); in nouveau_uvmm_bind_job_cleanup()
1524 dma_resv_unlock(obj->resv); in nouveau_uvmm_bind_job_cleanup()
1532 complete_all(&bind_job->complete); in nouveau_uvmm_bind_job_cleanup()
1552 return -ENOMEM; in bind_job_op_from_uop()
1554 switch (uop->op) { in bind_job_op_from_uop()
1556 op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ? in bind_job_op_from_uop()
1560 op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ? in bind_job_op_from_uop()
1564 op->op = uop->op; in bind_job_op_from_uop()
1568 op->flags = uop->flags; in bind_job_op_from_uop()
1569 op->va.addr = uop->addr; in bind_job_op_from_uop()
1570 op->va.range = uop->range; in bind_job_op_from_uop()
1571 op->gem.handle = uop->handle; in bind_job_op_from_uop()
1572 op->gem.offset = uop->bo_offset; in bind_job_op_from_uop()
1583 list_del(&op->entry); in bind_job_ops_free()
1601 INIT_LIST_HEAD(&job->ops); in nouveau_uvmm_bind_job_init()
1603 for (i = 0; i < __args->op.count; i++) { in nouveau_uvmm_bind_job_init()
1604 ret = bind_job_op_from_uop(&op, &__args->op.s[i]); in nouveau_uvmm_bind_job_init()
1608 list_add_tail(&op->entry, &job->ops); in nouveau_uvmm_bind_job_init()
1611 init_completion(&job->complete); in nouveau_uvmm_bind_job_init()
1613 args.file_priv = __args->file_priv; in nouveau_uvmm_bind_job_init()
1615 args.sched = __args->sched; in nouveau_uvmm_bind_job_init()
1618 args.in_sync.count = __args->in_sync.count; in nouveau_uvmm_bind_job_init()
1619 args.in_sync.s = __args->in_sync.s; in nouveau_uvmm_bind_job_init()
1621 args.out_sync.count = __args->out_sync.count; in nouveau_uvmm_bind_job_init()
1622 args.out_sync.s = __args->out_sync.s; in nouveau_uvmm_bind_job_init()
1624 args.sync = !(__args->flags & DRM_NOUVEAU_VM_BIND_RUN_ASYNC); in nouveau_uvmm_bind_job_init()
1628 ret = nouveau_job_init(&job->base, &args); in nouveau_uvmm_bind_job_init()
1636 bind_job_ops_free(&job->ops); in nouveau_uvmm_bind_job_init()
1653 ret = nouveau_job_submit(&job->base); in nouveau_uvmm_vm_bind()
1660 nouveau_job_fini(&job->base); in nouveau_uvmm_vm_bind()
1669 u32 inc = req->wait_count; in nouveau_uvmm_vm_bind_ucopy()
1670 u64 ins = req->wait_ptr; in nouveau_uvmm_vm_bind_ucopy()
1671 u32 outc = req->sig_count; in nouveau_uvmm_vm_bind_ucopy()
1672 u64 outs = req->sig_ptr; in nouveau_uvmm_vm_bind_ucopy()
1673 u32 opc = req->op_count; in nouveau_uvmm_vm_bind_ucopy()
1674 u64 ops = req->op_ptr; in nouveau_uvmm_vm_bind_ucopy()
1677 args->flags = req->flags; in nouveau_uvmm_vm_bind_ucopy()
1680 args->op.count = opc; in nouveau_uvmm_vm_bind_ucopy()
1681 args->op.s = u_memcpya(ops, opc, in nouveau_uvmm_vm_bind_ucopy()
1682 sizeof(*args->op.s)); in nouveau_uvmm_vm_bind_ucopy()
1683 if (IS_ERR(args->op.s)) in nouveau_uvmm_vm_bind_ucopy()
1684 return PTR_ERR(args->op.s); in nouveau_uvmm_vm_bind_ucopy()
1688 s = &args->in_sync.s; in nouveau_uvmm_vm_bind_ucopy()
1690 args->in_sync.count = inc; in nouveau_uvmm_vm_bind_ucopy()
1699 s = &args->out_sync.s; in nouveau_uvmm_vm_bind_ucopy()
1701 args->out_sync.count = outc; in nouveau_uvmm_vm_bind_ucopy()
1712 u_free(args->op.s); in nouveau_uvmm_vm_bind_ucopy()
1714 u_free(args->in_sync.s); in nouveau_uvmm_vm_bind_ucopy()
1721 u_free(args->op.s); in nouveau_uvmm_vm_bind_ufree()
1722 u_free(args->in_sync.s); in nouveau_uvmm_vm_bind_ufree()
1723 u_free(args->out_sync.s); in nouveau_uvmm_vm_bind_ufree()
1737 return -ENOSYS; in nouveau_uvmm_ioctl_vm_bind()
1743 args.sched = cli->sched; in nouveau_uvmm_ioctl_vm_bind()
1758 struct drm_gem_object *obj = &nvbo->bo.base; in nouveau_uvmm_bo_map_all()
1762 dma_resv_assert_held(obj->resv); in nouveau_uvmm_bo_map_all()
1777 struct drm_gem_object *obj = &nvbo->bo.base; in nouveau_uvmm_bo_unmap_all()
1781 dma_resv_assert_held(obj->resv); in nouveau_uvmm_bo_unmap_all()
1804 struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj); in nouveau_uvmm_bo_validate()
1806 nouveau_bo_placement_set(nvbo, nvbo->valid_domains, 0); in nouveau_uvmm_bo_validate()
1822 struct drm_device *drm = cli->drm->dev; in nouveau_uvmm_ioctl_vm_init()
1828 if (check_add_overflow(init->kernel_managed_addr, in nouveau_uvmm_ioctl_vm_init()
1829 init->kernel_managed_size, in nouveau_uvmm_ioctl_vm_init()
1831 return -EINVAL; in nouveau_uvmm_ioctl_vm_init()
1834 return -EINVAL; in nouveau_uvmm_ioctl_vm_init()
1836 mutex_lock(&cli->mutex); in nouveau_uvmm_ioctl_vm_init()
1838 if (unlikely(cli->uvmm.disabled)) { in nouveau_uvmm_ioctl_vm_init()
1839 ret = -ENOSYS; in nouveau_uvmm_ioctl_vm_init()
1845 ret = -ENOMEM; in nouveau_uvmm_ioctl_vm_init()
1852 ret = -ENOMEM; in nouveau_uvmm_ioctl_vm_init()
1856 mutex_init(&uvmm->mutex); in nouveau_uvmm_ioctl_vm_init()
1857 mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN); in nouveau_uvmm_ioctl_vm_init()
1858 mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex); in nouveau_uvmm_ioctl_vm_init()
1860 drm_gpuvm_init(&uvmm->base, cli->name, 0, drm, r_obj, in nouveau_uvmm_ioctl_vm_init()
1863 init->kernel_managed_addr, in nouveau_uvmm_ioctl_vm_init()
1864 init->kernel_managed_size, in nouveau_uvmm_ioctl_vm_init()
1869 ret = nvif_vmm_ctor(&cli->mmu, "uvmm", in nouveau_uvmm_ioctl_vm_init()
1870 cli->vmm.vmm.object.oclass, RAW, in nouveau_uvmm_ioctl_vm_init()
1871 init->kernel_managed_addr, in nouveau_uvmm_ioctl_vm_init()
1872 init->kernel_managed_size, in nouveau_uvmm_ioctl_vm_init()
1873 NULL, 0, &uvmm->vmm.vmm); in nouveau_uvmm_ioctl_vm_init()
1877 uvmm->vmm.cli = cli; in nouveau_uvmm_ioctl_vm_init()
1878 cli->uvmm.ptr = uvmm; in nouveau_uvmm_ioctl_vm_init()
1879 mutex_unlock(&cli->mutex); in nouveau_uvmm_ioctl_vm_init()
1884 drm_gpuvm_put(&uvmm->base); in nouveau_uvmm_ioctl_vm_init()
1886 mutex_unlock(&cli->mutex); in nouveau_uvmm_ioctl_vm_init()
1893 MA_STATE(mas, &uvmm->region_mt, 0, 0); in nouveau_uvmm_fini()
1894 struct nouveau_uvma_region *reg; in nouveau_uvmm_fini() local
1895 struct nouveau_cli *cli = uvmm->vmm.cli; in nouveau_uvmm_fini()
1899 drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) { in nouveau_uvmm_fini()
1901 struct drm_gem_object *obj = va->gem.obj; in nouveau_uvmm_fini()
1903 if (unlikely(va == &uvmm->base.kernel_alloc_node)) in nouveau_uvmm_fini()
1908 dma_resv_lock(obj->resv, NULL); in nouveau_uvmm_fini()
1910 dma_resv_unlock(obj->resv); in nouveau_uvmm_fini()
1919 mas_for_each(&mas, reg, ULONG_MAX) { in nouveau_uvmm_fini()
1921 nouveau_uvma_region_sparse_unref(reg); in nouveau_uvmm_fini()
1922 nouveau_uvma_region_put(reg); in nouveau_uvmm_fini()
1925 WARN(!mtree_empty(&uvmm->region_mt), in nouveau_uvmm_fini()
1927 __mt_destroy(&uvmm->region_mt); in nouveau_uvmm_fini()
1930 mutex_lock(&cli->mutex); in nouveau_uvmm_fini()
1931 nouveau_vmm_fini(&uvmm->vmm); in nouveau_uvmm_fini()
1932 drm_gpuvm_put(&uvmm->base); in nouveau_uvmm_fini()
1933 mutex_unlock(&cli->mutex); in nouveau_uvmm_fini()