Lines Matching +full:dma +full:- +full:safe +full:- +full:map
1 // SPDX-License-Identifier: MIT
12 * Actual map/unmap operations within the fence signalling critical path are
13 * protected by installing DMA fences to the corresponding GEMs DMA
15 * list in order to map/unmap it's entries, can't occur concurrently.
20 * the corresponding GEMs DMA reservation fence.
54 struct nouveau_uvma *map; member
94 struct nvif_vmm *vmm = &uvmm->vmm.vmm; in nouveau_uvmm_vmm_sparse_ref()
103 struct nvif_vmm *vmm = &uvmm->vmm.vmm; in nouveau_uvmm_vmm_sparse_unref()
112 struct nvif_vmm *vmm = &uvmm->vmm.vmm; in nouveau_uvmm_vmm_get()
121 struct nvif_vmm *vmm = &uvmm->vmm.vmm; in nouveau_uvmm_vmm_put()
130 struct nvif_vmm *vmm = &uvmm->vmm.vmm; in nouveau_uvmm_vmm_unmap()
141 struct nvif_vmm *vmm = &uvmm->vmm.vmm; in nouveau_uvmm_vmm_map()
147 switch (vmm->object.oclass) { in nouveau_uvmm_vmm_map()
152 if (mem->mem.type & NVIF_MEM_VRAM) in nouveau_uvmm_vmm_map()
163 return -ENOSYS; in nouveau_uvmm_vmm_map()
168 &mem->mem, bo_offset); in nouveau_uvmm_vmm_map()
174 u64 addr = reg->va.addr; in nouveau_uvma_region_sparse_unref()
175 u64 range = reg->va.range; in nouveau_uvma_region_sparse_unref()
177 return nouveau_uvmm_vmm_sparse_unref(reg->uvmm, addr, range); in nouveau_uvma_region_sparse_unref()
183 u64 addr = uvma->va.va.addr; in nouveau_uvma_vmm_put()
184 u64 range = uvma->va.va.range; in nouveau_uvma_vmm_put()
193 u64 addr = uvma->va.va.addr; in nouveau_uvma_map()
194 u64 offset = uvma->va.gem.offset; in nouveau_uvma_map()
195 u64 range = uvma->va.va.range; in nouveau_uvma_map()
198 offset, uvma->kind, mem); in nouveau_uvma_map()
204 u64 addr = uvma->va.va.addr; in nouveau_uvma_unmap()
205 u64 range = uvma->va.va.range; in nouveau_uvma_unmap()
206 bool sparse = !!uvma->region; in nouveau_uvma_unmap()
208 if (drm_gpuva_invalidated(&uvma->va)) in nouveau_uvma_unmap()
219 return -ENOMEM; in nouveau_uvma_alloc()
233 drm_gem_object_get(uvma->va.gem.obj); in nouveau_uvma_gem_get()
239 drm_gem_object_put(uvma->va.gem.obj); in nouveau_uvma_gem_put()
247 return -ENOMEM; in nouveau_uvma_region_alloc()
249 kref_init(&(*preg)->kref); in nouveau_uvma_region_alloc()
266 kref_get(®->kref); in nouveau_uvma_region_get()
272 kref_put(®->kref, nouveau_uvma_region_free); in nouveau_uvma_region_put()
279 u64 addr = reg->va.addr; in __nouveau_uvma_region_insert()
280 u64 range = reg->va.range; in __nouveau_uvma_region_insert()
281 u64 last = addr + range - 1; in __nouveau_uvma_region_insert()
282 MA_STATE(mas, &uvmm->region_mt, addr, addr); in __nouveau_uvma_region_insert()
285 return -EEXIST; in __nouveau_uvma_region_insert()
288 return -EEXIST; in __nouveau_uvma_region_insert()
295 reg->uvmm = uvmm; in __nouveau_uvma_region_insert()
307 reg->uvmm = uvmm; in nouveau_uvma_region_insert()
308 reg->va.addr = addr; in nouveau_uvma_region_insert()
309 reg->va.range = range; in nouveau_uvma_region_insert()
321 struct nouveau_uvmm *uvmm = reg->uvmm; in nouveau_uvma_region_remove()
322 MA_STATE(mas, &uvmm->region_mt, reg->va.addr, 0); in nouveau_uvma_region_remove()
334 if (!drm_gpuvm_interval_empty(&uvmm->base, addr, range)) in nouveau_uvma_region_create()
335 return -ENOSPC; in nouveau_uvma_region_create()
362 MA_STATE(mas, &uvmm->region_mt, addr, 0); in nouveau_uvma_region_find_first()
364 return mas_find(&mas, addr + range - 1); in nouveau_uvma_region_find_first()
377 if (reg->va.addr != addr || in nouveau_uvma_region_find()
378 reg->va.range != range) in nouveau_uvma_region_find()
387 struct nouveau_uvmm *uvmm = reg->uvmm; in nouveau_uvma_region_empty()
389 return drm_gpuvm_interval_empty(&uvmm->base, in nouveau_uvma_region_empty()
390 reg->va.addr, in nouveau_uvma_region_empty()
391 reg->va.range); in nouveau_uvma_region_empty()
397 struct nouveau_uvmm *uvmm = reg->uvmm; in __nouveau_uvma_region_destroy()
398 u64 addr = reg->va.addr; in __nouveau_uvma_region_destroy()
399 u64 range = reg->va.range; in __nouveau_uvma_region_destroy()
402 return -EBUSY; in __nouveau_uvma_region_destroy()
419 return -ENOENT; in nouveau_uvma_region_destroy()
428 init_completion(®->complete); in nouveau_uvma_region_dirty()
429 reg->dirty = true; in nouveau_uvma_region_dirty()
435 complete_all(®->complete); in nouveau_uvma_region_complete()
441 struct drm_gpuva *va = &uvma->va; in op_map_prepare_unwind()
450 drm_gpuva_insert(va->vm, va); in op_unmap_prepare_unwind()
461 u64 vmm_get_start = args ? args->addr : 0; in nouveau_uvmm_sm_prepare_unwind()
462 u64 vmm_get_end = args ? args->addr + args->range : 0; in nouveau_uvmm_sm_prepare_unwind()
466 switch (op->op) { in nouveau_uvmm_sm_prepare_unwind()
468 op_map_prepare_unwind(new->map); in nouveau_uvmm_sm_prepare_unwind()
471 struct drm_gpuva_op_remap *r = &op->remap; in nouveau_uvmm_sm_prepare_unwind()
472 struct drm_gpuva *va = r->unmap->va; in nouveau_uvmm_sm_prepare_unwind()
474 if (r->next) in nouveau_uvmm_sm_prepare_unwind()
475 op_map_prepare_unwind(new->next); in nouveau_uvmm_sm_prepare_unwind()
477 if (r->prev) in nouveau_uvmm_sm_prepare_unwind()
478 op_map_prepare_unwind(new->prev); in nouveau_uvmm_sm_prepare_unwind()
484 op_unmap_prepare_unwind(op->unmap.va); in nouveau_uvmm_sm_prepare_unwind()
498 switch (op->op) { in nouveau_uvmm_sm_prepare_unwind()
500 u64 vmm_get_range = vmm_get_end - vmm_get_start; in nouveau_uvmm_sm_prepare_unwind()
508 struct drm_gpuva_op_remap *r = &op->remap; in nouveau_uvmm_sm_prepare_unwind()
509 struct drm_gpuva *va = r->unmap->va; in nouveau_uvmm_sm_prepare_unwind()
510 u64 ustart = va->va.addr; in nouveau_uvmm_sm_prepare_unwind()
511 u64 urange = va->va.range; in nouveau_uvmm_sm_prepare_unwind()
514 if (r->prev) in nouveau_uvmm_sm_prepare_unwind()
517 if (r->next) in nouveau_uvmm_sm_prepare_unwind()
520 if (r->prev && r->next) in nouveau_uvmm_sm_prepare_unwind()
526 struct drm_gpuva_op_unmap *u = &op->unmap; in nouveau_uvmm_sm_prepare_unwind()
527 struct drm_gpuva *va = u->va; in nouveau_uvmm_sm_prepare_unwind()
528 u64 ustart = va->va.addr; in nouveau_uvmm_sm_prepare_unwind()
529 u64 urange = va->va.range; in nouveau_uvmm_sm_prepare_unwind()
538 u64 vmm_get_range = ustart - vmm_get_start; in nouveau_uvmm_sm_prepare_unwind()
593 uvma->region = args->region; in op_map_prepare()
594 uvma->kind = args->kind; in op_map_prepare()
596 drm_gpuva_map(&uvmm->base, &uvma->va, op); in op_map_prepare()
612 * Note: @args should not be NULL when calling for a map operation.
621 u64 vmm_get_start = args ? args->addr : 0; in nouveau_uvmm_sm_prepare()
622 u64 vmm_get_end = args ? args->addr + args->range : 0; in nouveau_uvmm_sm_prepare()
626 switch (op->op) { in nouveau_uvmm_sm_prepare()
628 u64 vmm_get_range = vmm_get_end - vmm_get_start; in nouveau_uvmm_sm_prepare()
630 ret = op_map_prepare(uvmm, &new->map, &op->map, args); in nouveau_uvmm_sm_prepare()
638 op_map_prepare_unwind(new->map); in nouveau_uvmm_sm_prepare()
646 struct drm_gpuva_op_remap *r = &op->remap; in nouveau_uvmm_sm_prepare()
647 struct drm_gpuva *va = r->unmap->va; in nouveau_uvmm_sm_prepare()
649 .kind = uvma_from_va(va)->kind, in nouveau_uvmm_sm_prepare()
650 .region = uvma_from_va(va)->region, in nouveau_uvmm_sm_prepare()
652 u64 ustart = va->va.addr; in nouveau_uvmm_sm_prepare()
653 u64 urange = va->va.range; in nouveau_uvmm_sm_prepare()
656 op_unmap_prepare(r->unmap); in nouveau_uvmm_sm_prepare()
658 if (r->prev) { in nouveau_uvmm_sm_prepare()
659 ret = op_map_prepare(uvmm, &new->prev, r->prev, in nouveau_uvmm_sm_prepare()
668 if (r->next) { in nouveau_uvmm_sm_prepare()
669 ret = op_map_prepare(uvmm, &new->next, r->next, in nouveau_uvmm_sm_prepare()
672 if (r->prev) in nouveau_uvmm_sm_prepare()
673 op_map_prepare_unwind(new->prev); in nouveau_uvmm_sm_prepare()
681 if (args && (r->prev && r->next)) in nouveau_uvmm_sm_prepare()
687 struct drm_gpuva_op_unmap *u = &op->unmap; in nouveau_uvmm_sm_prepare()
688 struct drm_gpuva *va = u->va; in nouveau_uvmm_sm_prepare()
689 u64 ustart = va->va.addr; in nouveau_uvmm_sm_prepare()
690 u64 urange = va->va.range; in nouveau_uvmm_sm_prepare()
704 u64 vmm_get_range = ustart - vmm_get_start; in nouveau_uvmm_sm_prepare()
718 ret = -EINVAL; in nouveau_uvmm_sm_prepare()
761 switch (op->op) { in op_gem_obj()
763 return op->map.gem.obj; in op_gem_obj()
769 return op->remap.unmap->va->gem.obj; in op_gem_obj()
771 return op->unmap.va->gem.obj; in op_gem_obj()
781 struct nouveau_bo *nvbo = nouveau_gem_object(uvma->va.gem.obj); in op_map()
783 nouveau_uvma_map(uvma, nouveau_mem(nvbo->bo.resource)); in op_map()
789 struct drm_gpuva *va = u->va; in op_unmap()
793 if (!u->keep) in op_unmap()
801 struct nouveau_uvma *uvma = uvma_from_va(u->va); in op_unmap_range()
802 bool sparse = !!uvma->region; in op_unmap_range()
804 if (!drm_gpuva_invalidated(u->va)) in op_unmap_range()
812 struct drm_gpuva_op_unmap *u = r->unmap; in op_remap()
813 struct nouveau_uvma *uvma = uvma_from_va(u->va); in op_remap()
814 u64 addr = uvma->va.va.addr; in op_remap()
815 u64 end = uvma->va.va.addr + uvma->va.va.range; in op_remap()
817 if (r->prev) in op_remap()
818 addr = r->prev->va.addr + r->prev->va.range; in op_remap()
820 if (r->next) in op_remap()
821 end = r->next->va.addr; in op_remap()
823 op_unmap_range(u, addr, end - addr); in op_remap()
834 switch (op->op) { in nouveau_uvmm_sm()
836 op_map(new->map); in nouveau_uvmm_sm()
839 op_remap(&op->remap, new); in nouveau_uvmm_sm()
842 op_unmap(&op->unmap); in nouveau_uvmm_sm()
876 switch (op->op) { in nouveau_uvmm_sm_cleanup()
880 struct drm_gpuva_op_remap *r = &op->remap; in nouveau_uvmm_sm_cleanup()
881 struct drm_gpuva_op_map *p = r->prev; in nouveau_uvmm_sm_cleanup()
882 struct drm_gpuva_op_map *n = r->next; in nouveau_uvmm_sm_cleanup()
883 struct drm_gpuva *va = r->unmap->va; in nouveau_uvmm_sm_cleanup()
887 u64 addr = va->va.addr; in nouveau_uvmm_sm_cleanup()
888 u64 end = addr + va->va.range; in nouveau_uvmm_sm_cleanup()
891 addr = p->va.addr + p->va.range; in nouveau_uvmm_sm_cleanup()
894 end = n->va.addr; in nouveau_uvmm_sm_cleanup()
896 nouveau_uvmm_vmm_put(uvmm, addr, end - addr); in nouveau_uvmm_sm_cleanup()
904 struct drm_gpuva_op_unmap *u = &op->unmap; in nouveau_uvmm_sm_cleanup()
905 struct drm_gpuva *va = u->va; in nouveau_uvmm_sm_cleanup()
941 return -EINVAL; in nouveau_uvmm_validate_range()
944 return -EINVAL; in nouveau_uvmm_validate_range()
946 if (!drm_gpuvm_range_valid(&uvmm->base, addr, range)) in nouveau_uvmm_validate_range()
947 return -EINVAL; in nouveau_uvmm_validate_range()
957 return -ENOMEM; in nouveau_uvmm_bind_job_alloc()
959 kref_init(&(*pjob)->kref); in nouveau_uvmm_bind_job_alloc()
971 list_for_each_op_safe(op, next, &job->ops) { in nouveau_uvmm_bind_job_free()
972 list_del(&op->entry); in nouveau_uvmm_bind_job_free()
976 nouveau_job_free(&job->base); in nouveau_uvmm_bind_job_free()
983 kref_get(&job->kref); in nouveau_uvmm_bind_job_get()
989 kref_put(&job->kref, nouveau_uvmm_bind_job_free); in nouveau_uvmm_bind_job_put()
996 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli); in bind_validate_op()
997 struct drm_gem_object *obj = op->gem.obj; in bind_validate_op()
999 if (op->op == OP_MAP) { in bind_validate_op()
1000 if (op->gem.offset & ~PAGE_MASK) in bind_validate_op()
1001 return -EINVAL; in bind_validate_op()
1003 if (obj->size <= op->gem.offset) in bind_validate_op()
1004 return -EINVAL; in bind_validate_op()
1006 if (op->va.range > (obj->size - op->gem.offset)) in bind_validate_op()
1007 return -EINVAL; in bind_validate_op()
1010 return nouveau_uvmm_validate_range(uvmm, op->va.addr, op->va.range); in bind_validate_op()
1016 struct nouveau_sched *sched = job->sched; in bind_validate_map_sparse()
1022 spin_lock(&sched->job.list.lock); in bind_validate_map_sparse()
1023 list_for_each_entry(__job, &sched->job.list.head, entry) { in bind_validate_map_sparse()
1026 list_for_each_op(op, &bind_job->ops) { in bind_validate_map_sparse()
1027 if (op->op == OP_UNMAP) { in bind_validate_map_sparse()
1028 u64 op_addr = op->va.addr; in bind_validate_map_sparse()
1029 u64 op_end = op_addr + op->va.range; in bind_validate_map_sparse()
1033 spin_unlock(&sched->job.list.lock); in bind_validate_map_sparse()
1034 wait_for_completion(&bind_job->complete); in bind_validate_map_sparse()
1041 spin_unlock(&sched->job.list.lock); in bind_validate_map_sparse()
1048 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli); in bind_validate_map_common()
1064 if (reg->dirty) { in bind_validate_map_common()
1067 wait_for_completion(®->complete); in bind_validate_map_common()
1074 return -ENOSPC; in bind_validate_map_common()
1076 reg_addr = reg->va.addr; in bind_validate_map_common()
1077 reg_end = reg_addr + reg->va.range; in bind_validate_map_common()
1083 return -ENOSPC; in bind_validate_map_common()
1095 list_for_each_op(op, &bind_job->ops) { in bind_validate_region()
1096 u64 op_addr = op->va.addr; in bind_validate_region()
1097 u64 op_range = op->va.range; in bind_validate_region()
1100 switch (op->op) { in bind_validate_region()
1122 struct nouveau_uvma_prealloc *new = &bop->new; in bind_link_gpuvas()
1123 struct drm_gpuvm_bo *vm_bo = bop->vm_bo; in bind_link_gpuvas()
1124 struct drm_gpuva_ops *ops = bop->ops; in bind_link_gpuvas()
1128 switch (op->op) { in bind_link_gpuvas()
1130 drm_gpuva_link(&new->map->va, vm_bo); in bind_link_gpuvas()
1133 struct drm_gpuva *va = op->remap.unmap->va; in bind_link_gpuvas()
1135 if (op->remap.prev) in bind_link_gpuvas()
1136 drm_gpuva_link(&new->prev->va, va->vm_bo); in bind_link_gpuvas()
1137 if (op->remap.next) in bind_link_gpuvas()
1138 drm_gpuva_link(&new->next->va, va->vm_bo); in bind_link_gpuvas()
1143 drm_gpuva_unlink(op->unmap.va); in bind_link_gpuvas()
1159 list_for_each_op(op, &bind_job->ops) { in bind_lock_validate()
1162 if (!op->ops) in bind_lock_validate()
1165 drm_gpuva_for_each_op(va_op, op->ops) { in bind_lock_validate()
1178 if (va_op->op == DRM_GPUVA_OP_UNMAP) in bind_lock_validate()
1195 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli); in nouveau_uvmm_bind_job_submit()
1197 struct drm_exec *exec = &vme->exec; in nouveau_uvmm_bind_job_submit()
1201 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1202 if (op->op == OP_MAP) { in nouveau_uvmm_bind_job_submit()
1203 struct drm_gem_object *obj = op->gem.obj = in nouveau_uvmm_bind_job_submit()
1204 drm_gem_object_lookup(job->file_priv, in nouveau_uvmm_bind_job_submit()
1205 op->gem.handle); in nouveau_uvmm_bind_job_submit()
1207 return -ENOENT; in nouveau_uvmm_bind_job_submit()
1209 dma_resv_lock(obj->resv, NULL); in nouveau_uvmm_bind_job_submit()
1210 op->vm_bo = drm_gpuvm_bo_obtain(&uvmm->base, obj); in nouveau_uvmm_bind_job_submit()
1211 dma_resv_unlock(obj->resv); in nouveau_uvmm_bind_job_submit()
1212 if (IS_ERR(op->vm_bo)) in nouveau_uvmm_bind_job_submit()
1213 return PTR_ERR(op->vm_bo); in nouveau_uvmm_bind_job_submit()
1215 drm_gpuvm_bo_extobj_add(op->vm_bo); in nouveau_uvmm_bind_job_submit()
1239 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1240 switch (op->op) { in nouveau_uvmm_bind_job_submit()
1243 op->va.addr, in nouveau_uvmm_bind_job_submit()
1244 op->va.range); in nouveau_uvmm_bind_job_submit()
1250 op->reg = nouveau_uvma_region_find(uvmm, op->va.addr, in nouveau_uvmm_bind_job_submit()
1251 op->va.range); in nouveau_uvmm_bind_job_submit()
1252 if (!op->reg || op->reg->dirty) { in nouveau_uvmm_bind_job_submit()
1253 ret = -ENOENT; in nouveau_uvmm_bind_job_submit()
1257 op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base, in nouveau_uvmm_bind_job_submit()
1258 op->va.addr, in nouveau_uvmm_bind_job_submit()
1259 op->va.range); in nouveau_uvmm_bind_job_submit()
1260 if (IS_ERR(op->ops)) { in nouveau_uvmm_bind_job_submit()
1261 ret = PTR_ERR(op->ops); in nouveau_uvmm_bind_job_submit()
1265 ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1266 op->ops); in nouveau_uvmm_bind_job_submit()
1268 drm_gpuva_ops_free(&uvmm->base, op->ops); in nouveau_uvmm_bind_job_submit()
1269 op->ops = NULL; in nouveau_uvmm_bind_job_submit()
1270 op->reg = NULL; in nouveau_uvmm_bind_job_submit()
1274 nouveau_uvma_region_dirty(op->reg); in nouveau_uvmm_bind_job_submit()
1280 .map.va.addr = op->va.addr, in nouveau_uvmm_bind_job_submit()
1281 .map.va.range = op->va.range, in nouveau_uvmm_bind_job_submit()
1282 .map.gem.obj = op->gem.obj, in nouveau_uvmm_bind_job_submit()
1283 .map.gem.offset = op->gem.offset, in nouveau_uvmm_bind_job_submit()
1287 op->va.addr, in nouveau_uvmm_bind_job_submit()
1288 op->va.range); in nouveau_uvmm_bind_job_submit()
1290 u64 reg_addr = reg->va.addr; in nouveau_uvmm_bind_job_submit()
1291 u64 reg_end = reg_addr + reg->va.range; in nouveau_uvmm_bind_job_submit()
1292 u64 op_addr = op->va.addr; in nouveau_uvmm_bind_job_submit()
1293 u64 op_end = op_addr + op->va.range; in nouveau_uvmm_bind_job_submit()
1295 if (unlikely(reg->dirty)) { in nouveau_uvmm_bind_job_submit()
1296 ret = -EINVAL; in nouveau_uvmm_bind_job_submit()
1304 ret = -ENOSPC; in nouveau_uvmm_bind_job_submit()
1309 op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base, in nouveau_uvmm_bind_job_submit()
1311 if (IS_ERR(op->ops)) { in nouveau_uvmm_bind_job_submit()
1312 ret = PTR_ERR(op->ops); in nouveau_uvmm_bind_job_submit()
1316 ret = nouveau_uvmm_sm_map_prepare(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1317 reg, op->ops, in nouveau_uvmm_bind_job_submit()
1318 op->va.addr, in nouveau_uvmm_bind_job_submit()
1319 op->va.range, in nouveau_uvmm_bind_job_submit()
1320 op->flags & 0xff); in nouveau_uvmm_bind_job_submit()
1322 drm_gpuva_ops_free(&uvmm->base, op->ops); in nouveau_uvmm_bind_job_submit()
1323 op->ops = NULL; in nouveau_uvmm_bind_job_submit()
1330 op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base, in nouveau_uvmm_bind_job_submit()
1331 op->va.addr, in nouveau_uvmm_bind_job_submit()
1332 op->va.range); in nouveau_uvmm_bind_job_submit()
1333 if (IS_ERR(op->ops)) { in nouveau_uvmm_bind_job_submit()
1334 ret = PTR_ERR(op->ops); in nouveau_uvmm_bind_job_submit()
1338 ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1339 op->ops); in nouveau_uvmm_bind_job_submit()
1341 drm_gpuva_ops_free(&uvmm->base, op->ops); in nouveau_uvmm_bind_job_submit()
1342 op->ops = NULL; in nouveau_uvmm_bind_job_submit()
1348 ret = -EINVAL; in nouveau_uvmm_bind_job_submit()
1353 drm_exec_init(exec, vme->flags, 0); in nouveau_uvmm_bind_job_submit()
1355 ret = bind_lock_validate(job, exec, vme->num_fences); in nouveau_uvmm_bind_job_submit()
1358 op = list_last_op(&bind_job->ops); in nouveau_uvmm_bind_job_submit()
1365 * As long as we validate() all GEMs and add fences to all GEMs DMA in nouveau_uvmm_bind_job_submit()
1366 * reservations backing map and remap operations we can be sure there in nouveau_uvmm_bind_job_submit()
1368 * we're safe to check drm_gpuva_invalidated() within the fence in nouveau_uvmm_bind_job_submit()
1371 * GPUVAs about to be unmapped are safe as well, since they're unlinked in nouveau_uvmm_bind_job_submit()
1374 * GEMs from map and remap operations must be validated before linking in nouveau_uvmm_bind_job_submit()
1383 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1384 switch (op->op) { in nouveau_uvmm_bind_job_submit()
1401 list_for_each_op_from_reverse(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1402 switch (op->op) { in nouveau_uvmm_bind_job_submit()
1404 nouveau_uvma_region_destroy(uvmm, op->va.addr, in nouveau_uvmm_bind_job_submit()
1405 op->va.range); in nouveau_uvmm_bind_job_submit()
1408 __nouveau_uvma_region_insert(uvmm, op->reg); in nouveau_uvmm_bind_job_submit()
1409 nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1410 op->ops); in nouveau_uvmm_bind_job_submit()
1413 nouveau_uvmm_sm_map_prepare_unwind(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1414 op->ops, in nouveau_uvmm_bind_job_submit()
1415 op->va.addr, in nouveau_uvmm_bind_job_submit()
1416 op->va.range); in nouveau_uvmm_bind_job_submit()
1419 nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1420 op->ops); in nouveau_uvmm_bind_job_submit()
1424 drm_gpuva_ops_free(&uvmm->base, op->ops); in nouveau_uvmm_bind_job_submit()
1425 op->ops = NULL; in nouveau_uvmm_bind_job_submit()
1426 op->reg = NULL; in nouveau_uvmm_bind_job_submit()
1438 drm_gpuvm_exec_resv_add_fence(vme, job->done_fence, in nouveau_uvmm_bind_job_armed_submit()
1439 job->resv_usage, job->resv_usage); in nouveau_uvmm_bind_job_armed_submit()
1447 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli); in nouveau_uvmm_bind_job_run()
1451 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_run()
1452 switch (op->op) { in nouveau_uvmm_bind_job_run()
1457 ret = nouveau_uvmm_sm_map(uvmm, &op->new, op->ops); in nouveau_uvmm_bind_job_run()
1464 ret = nouveau_uvmm_sm_unmap(uvmm, &op->new, op->ops); in nouveau_uvmm_bind_job_run()
1473 NV_PRINTK(err, job->cli, "bind job failed: %d\n", ret); in nouveau_uvmm_bind_job_run()
1481 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli); in nouveau_uvmm_bind_job_cleanup()
1484 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_cleanup()
1485 struct drm_gem_object *obj = op->gem.obj; in nouveau_uvmm_bind_job_cleanup()
1487 /* When nouveau_uvmm_bind_job_submit() fails op->ops and op->reg in nouveau_uvmm_bind_job_cleanup()
1490 switch (op->op) { in nouveau_uvmm_bind_job_cleanup()
1495 if (!IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_cleanup()
1496 nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new, in nouveau_uvmm_bind_job_cleanup()
1497 op->ops); in nouveau_uvmm_bind_job_cleanup()
1499 if (op->reg) { in nouveau_uvmm_bind_job_cleanup()
1500 nouveau_uvma_region_sparse_unref(op->reg); in nouveau_uvmm_bind_job_cleanup()
1502 nouveau_uvma_region_remove(op->reg); in nouveau_uvmm_bind_job_cleanup()
1504 nouveau_uvma_region_complete(op->reg); in nouveau_uvmm_bind_job_cleanup()
1505 nouveau_uvma_region_put(op->reg); in nouveau_uvmm_bind_job_cleanup()
1510 if (!IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_cleanup()
1511 nouveau_uvmm_sm_map_cleanup(uvmm, &op->new, in nouveau_uvmm_bind_job_cleanup()
1512 op->ops); in nouveau_uvmm_bind_job_cleanup()
1515 if (!IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_cleanup()
1516 nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new, in nouveau_uvmm_bind_job_cleanup()
1517 op->ops); in nouveau_uvmm_bind_job_cleanup()
1521 if (!IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_cleanup()
1522 drm_gpuva_ops_free(&uvmm->base, op->ops); in nouveau_uvmm_bind_job_cleanup()
1524 if (!IS_ERR_OR_NULL(op->vm_bo)) { in nouveau_uvmm_bind_job_cleanup()
1525 dma_resv_lock(obj->resv, NULL); in nouveau_uvmm_bind_job_cleanup()
1526 drm_gpuvm_bo_put(op->vm_bo); in nouveau_uvmm_bind_job_cleanup()
1527 dma_resv_unlock(obj->resv); in nouveau_uvmm_bind_job_cleanup()
1535 complete_all(&bind_job->complete); in nouveau_uvmm_bind_job_cleanup()
1555 return -ENOMEM; in bind_job_op_from_uop()
1557 switch (uop->op) { in bind_job_op_from_uop()
1559 op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ? in bind_job_op_from_uop()
1563 op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ? in bind_job_op_from_uop()
1567 op->op = uop->op; in bind_job_op_from_uop()
1571 op->flags = uop->flags; in bind_job_op_from_uop()
1572 op->va.addr = uop->addr; in bind_job_op_from_uop()
1573 op->va.range = uop->range; in bind_job_op_from_uop()
1574 op->gem.handle = uop->handle; in bind_job_op_from_uop()
1575 op->gem.offset = uop->bo_offset; in bind_job_op_from_uop()
1586 list_del(&op->entry); in bind_job_ops_free()
1604 INIT_LIST_HEAD(&job->ops); in nouveau_uvmm_bind_job_init()
1606 for (i = 0; i < __args->op.count; i++) { in nouveau_uvmm_bind_job_init()
1607 ret = bind_job_op_from_uop(&op, &__args->op.s[i]); in nouveau_uvmm_bind_job_init()
1611 list_add_tail(&op->entry, &job->ops); in nouveau_uvmm_bind_job_init()
1614 init_completion(&job->complete); in nouveau_uvmm_bind_job_init()
1616 args.file_priv = __args->file_priv; in nouveau_uvmm_bind_job_init()
1618 args.sched = __args->sched; in nouveau_uvmm_bind_job_init()
1621 args.in_sync.count = __args->in_sync.count; in nouveau_uvmm_bind_job_init()
1622 args.in_sync.s = __args->in_sync.s; in nouveau_uvmm_bind_job_init()
1624 args.out_sync.count = __args->out_sync.count; in nouveau_uvmm_bind_job_init()
1625 args.out_sync.s = __args->out_sync.s; in nouveau_uvmm_bind_job_init()
1627 args.sync = !(__args->flags & DRM_NOUVEAU_VM_BIND_RUN_ASYNC); in nouveau_uvmm_bind_job_init()
1631 ret = nouveau_job_init(&job->base, &args); in nouveau_uvmm_bind_job_init()
1639 bind_job_ops_free(&job->ops); in nouveau_uvmm_bind_job_init()
1656 ret = nouveau_job_submit(&job->base); in nouveau_uvmm_vm_bind()
1663 nouveau_job_fini(&job->base); in nouveau_uvmm_vm_bind()
1672 u32 inc = req->wait_count; in nouveau_uvmm_vm_bind_ucopy()
1673 u64 ins = req->wait_ptr; in nouveau_uvmm_vm_bind_ucopy()
1674 u32 outc = req->sig_count; in nouveau_uvmm_vm_bind_ucopy()
1675 u64 outs = req->sig_ptr; in nouveau_uvmm_vm_bind_ucopy()
1676 u32 opc = req->op_count; in nouveau_uvmm_vm_bind_ucopy()
1677 u64 ops = req->op_ptr; in nouveau_uvmm_vm_bind_ucopy()
1680 args->flags = req->flags; in nouveau_uvmm_vm_bind_ucopy()
1683 args->op.count = opc; in nouveau_uvmm_vm_bind_ucopy()
1684 args->op.s = u_memcpya(ops, opc, in nouveau_uvmm_vm_bind_ucopy()
1685 sizeof(*args->op.s)); in nouveau_uvmm_vm_bind_ucopy()
1686 if (IS_ERR(args->op.s)) in nouveau_uvmm_vm_bind_ucopy()
1687 return PTR_ERR(args->op.s); in nouveau_uvmm_vm_bind_ucopy()
1691 s = &args->in_sync.s; in nouveau_uvmm_vm_bind_ucopy()
1693 args->in_sync.count = inc; in nouveau_uvmm_vm_bind_ucopy()
1702 s = &args->out_sync.s; in nouveau_uvmm_vm_bind_ucopy()
1704 args->out_sync.count = outc; in nouveau_uvmm_vm_bind_ucopy()
1715 u_free(args->op.s); in nouveau_uvmm_vm_bind_ucopy()
1717 u_free(args->in_sync.s); in nouveau_uvmm_vm_bind_ucopy()
1724 u_free(args->op.s); in nouveau_uvmm_vm_bind_ufree()
1725 u_free(args->in_sync.s); in nouveau_uvmm_vm_bind_ufree()
1726 u_free(args->out_sync.s); in nouveau_uvmm_vm_bind_ufree()
1740 return -ENOSYS; in nouveau_uvmm_ioctl_vm_bind()
1746 args.sched = cli->sched; in nouveau_uvmm_ioctl_vm_bind()
1761 struct drm_gem_object *obj = &nvbo->bo.base; in nouveau_uvmm_bo_map_all()
1765 dma_resv_assert_held(obj->resv); in nouveau_uvmm_bo_map_all()
1780 struct drm_gem_object *obj = &nvbo->bo.base; in nouveau_uvmm_bo_unmap_all()
1784 dma_resv_assert_held(obj->resv); in nouveau_uvmm_bo_unmap_all()
1807 struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj); in nouveau_uvmm_bo_validate()
1809 nouveau_bo_placement_set(nvbo, nvbo->valid_domains, 0); in nouveau_uvmm_bo_validate()
1825 struct drm_device *drm = cli->drm->dev; in nouveau_uvmm_ioctl_vm_init()
1831 if (check_add_overflow(init->kernel_managed_addr, in nouveau_uvmm_ioctl_vm_init()
1832 init->kernel_managed_size, in nouveau_uvmm_ioctl_vm_init()
1834 return -EINVAL; in nouveau_uvmm_ioctl_vm_init()
1837 return -EINVAL; in nouveau_uvmm_ioctl_vm_init()
1839 mutex_lock(&cli->mutex); in nouveau_uvmm_ioctl_vm_init()
1841 if (unlikely(cli->uvmm.disabled)) { in nouveau_uvmm_ioctl_vm_init()
1842 ret = -ENOSYS; in nouveau_uvmm_ioctl_vm_init()
1848 ret = -ENOMEM; in nouveau_uvmm_ioctl_vm_init()
1855 ret = -ENOMEM; in nouveau_uvmm_ioctl_vm_init()
1859 mutex_init(&uvmm->mutex); in nouveau_uvmm_ioctl_vm_init()
1860 mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN); in nouveau_uvmm_ioctl_vm_init()
1861 mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex); in nouveau_uvmm_ioctl_vm_init()
1863 drm_gpuvm_init(&uvmm->base, cli->name, 0, drm, r_obj, in nouveau_uvmm_ioctl_vm_init()
1866 init->kernel_managed_addr, in nouveau_uvmm_ioctl_vm_init()
1867 init->kernel_managed_size, in nouveau_uvmm_ioctl_vm_init()
1872 ret = nvif_vmm_ctor(&cli->mmu, "uvmm", in nouveau_uvmm_ioctl_vm_init()
1873 cli->vmm.vmm.object.oclass, RAW, in nouveau_uvmm_ioctl_vm_init()
1874 init->kernel_managed_addr, in nouveau_uvmm_ioctl_vm_init()
1875 init->kernel_managed_size, in nouveau_uvmm_ioctl_vm_init()
1876 NULL, 0, &uvmm->vmm.vmm); in nouveau_uvmm_ioctl_vm_init()
1880 uvmm->vmm.cli = cli; in nouveau_uvmm_ioctl_vm_init()
1881 cli->uvmm.ptr = uvmm; in nouveau_uvmm_ioctl_vm_init()
1882 mutex_unlock(&cli->mutex); in nouveau_uvmm_ioctl_vm_init()
1887 drm_gpuvm_put(&uvmm->base); in nouveau_uvmm_ioctl_vm_init()
1889 mutex_unlock(&cli->mutex); in nouveau_uvmm_ioctl_vm_init()
1896 MA_STATE(mas, &uvmm->region_mt, 0, 0); in nouveau_uvmm_fini()
1898 struct nouveau_cli *cli = uvmm->vmm.cli; in nouveau_uvmm_fini()
1902 drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) { in nouveau_uvmm_fini()
1904 struct drm_gem_object *obj = va->gem.obj; in nouveau_uvmm_fini()
1906 if (unlikely(va == &uvmm->base.kernel_alloc_node)) in nouveau_uvmm_fini()
1911 dma_resv_lock(obj->resv, NULL); in nouveau_uvmm_fini()
1913 dma_resv_unlock(obj->resv); in nouveau_uvmm_fini()
1928 WARN(!mtree_empty(&uvmm->region_mt), in nouveau_uvmm_fini()
1930 __mt_destroy(&uvmm->region_mt); in nouveau_uvmm_fini()
1933 mutex_lock(&cli->mutex); in nouveau_uvmm_fini()
1934 nouveau_vmm_fini(&uvmm->vmm); in nouveau_uvmm_fini()
1935 drm_gpuvm_put(&uvmm->base); in nouveau_uvmm_fini()
1936 mutex_unlock(&cli->mutex); in nouveau_uvmm_fini()