Lines Matching refs:op
62 enum vm_bind_op op; member
460 struct drm_gpuva_op *op = last; in nouveau_uvmm_sm_prepare_unwind() local
465 drm_gpuva_for_each_op_from_reverse(op, ops) { in nouveau_uvmm_sm_prepare_unwind()
466 switch (op->op) { in nouveau_uvmm_sm_prepare_unwind()
471 struct drm_gpuva_op_remap *r = &op->remap; in nouveau_uvmm_sm_prepare_unwind()
484 op_unmap_prepare_unwind(op->unmap.va); in nouveau_uvmm_sm_prepare_unwind()
497 drm_gpuva_for_each_op(op, ops) { in nouveau_uvmm_sm_prepare_unwind()
498 switch (op->op) { in nouveau_uvmm_sm_prepare_unwind()
508 struct drm_gpuva_op_remap *r = &op->remap; in nouveau_uvmm_sm_prepare_unwind()
526 struct drm_gpuva_op_unmap *u = &op->unmap; in nouveau_uvmm_sm_prepare_unwind()
550 if (op == last) in nouveau_uvmm_sm_prepare_unwind()
583 struct drm_gpuva_op_map *op, in op_map_prepare() argument
596 drm_gpuva_map(&uvmm->base, &uvma->va, op); in op_map_prepare()
620 struct drm_gpuva_op *op; in nouveau_uvmm_sm_prepare() local
625 drm_gpuva_for_each_op(op, ops) { in nouveau_uvmm_sm_prepare()
626 switch (op->op) { in nouveau_uvmm_sm_prepare()
630 ret = op_map_prepare(uvmm, &new->map, &op->map, args); in nouveau_uvmm_sm_prepare()
646 struct drm_gpuva_op_remap *r = &op->remap; in nouveau_uvmm_sm_prepare()
687 struct drm_gpuva_op_unmap *u = &op->unmap; in nouveau_uvmm_sm_prepare()
726 if (op != drm_gpuva_first_op(ops)) in nouveau_uvmm_sm_prepare()
728 drm_gpuva_prev_op(op), in nouveau_uvmm_sm_prepare()
759 op_gem_obj(struct drm_gpuva_op *op) in op_gem_obj() argument
761 switch (op->op) { in op_gem_obj()
763 return op->map.gem.obj; in op_gem_obj()
769 return op->remap.unmap->va->gem.obj; in op_gem_obj()
771 return op->unmap.va->gem.obj; in op_gem_obj()
831 struct drm_gpuva_op *op; in nouveau_uvmm_sm() local
833 drm_gpuva_for_each_op(op, ops) { in nouveau_uvmm_sm()
834 switch (op->op) { in nouveau_uvmm_sm()
839 op_remap(&op->remap, new); in nouveau_uvmm_sm()
842 op_unmap(&op->unmap); in nouveau_uvmm_sm()
873 struct drm_gpuva_op *op; in nouveau_uvmm_sm_cleanup() local
875 drm_gpuva_for_each_op(op, ops) { in nouveau_uvmm_sm_cleanup()
876 switch (op->op) { in nouveau_uvmm_sm_cleanup()
880 struct drm_gpuva_op_remap *r = &op->remap; in nouveau_uvmm_sm_cleanup()
904 struct drm_gpuva_op_unmap *u = &op->unmap; in nouveau_uvmm_sm_cleanup()
969 struct bind_job_op *op, *next; in nouveau_uvmm_bind_job_free() local
971 list_for_each_op_safe(op, next, &job->ops) { in nouveau_uvmm_bind_job_free()
972 list_del(&op->entry); in nouveau_uvmm_bind_job_free()
973 kfree(op); in nouveau_uvmm_bind_job_free()
994 struct bind_job_op *op) in bind_validate_op() argument
997 struct drm_gem_object *obj = op->gem.obj; in bind_validate_op()
999 if (op->op == OP_MAP) { in bind_validate_op()
1000 if (op->gem.offset & ~PAGE_MASK) in bind_validate_op()
1003 if (obj->size <= op->gem.offset) in bind_validate_op()
1006 if (op->va.range > (obj->size - op->gem.offset)) in bind_validate_op()
1010 return nouveau_uvmm_validate_range(uvmm, op->va.addr, op->va.range); in bind_validate_op()
1018 struct bind_job_op *op; in bind_validate_map_sparse() local
1026 list_for_each_op(op, &bind_job->ops) { in bind_validate_map_sparse()
1027 if (op->op == OP_UNMAP) { in bind_validate_map_sparse()
1028 u64 op_addr = op->va.addr; in bind_validate_map_sparse()
1029 u64 op_end = op_addr + op->va.range; in bind_validate_map_sparse()
1092 struct bind_job_op *op; in bind_validate_region() local
1095 list_for_each_op(op, &bind_job->ops) { in bind_validate_region()
1096 u64 op_addr = op->va.addr; in bind_validate_region()
1097 u64 op_range = op->va.range; in bind_validate_region()
1100 switch (op->op) { in bind_validate_region()
1125 struct drm_gpuva_op *op; in bind_link_gpuvas() local
1127 drm_gpuva_for_each_op(op, ops) { in bind_link_gpuvas()
1128 switch (op->op) { in bind_link_gpuvas()
1133 struct drm_gpuva *va = op->remap.unmap->va; in bind_link_gpuvas()
1135 if (op->remap.prev) in bind_link_gpuvas()
1137 if (op->remap.next) in bind_link_gpuvas()
1143 drm_gpuva_unlink(op->unmap.va); in bind_link_gpuvas()
1156 struct bind_job_op *op; in bind_lock_validate() local
1159 list_for_each_op(op, &bind_job->ops) { in bind_lock_validate()
1162 if (!op->ops) in bind_lock_validate()
1165 drm_gpuva_for_each_op(va_op, op->ops) { in bind_lock_validate()
1178 if (va_op->op == DRM_GPUVA_OP_UNMAP) in bind_lock_validate()
1198 struct bind_job_op *op; in nouveau_uvmm_bind_job_submit() local
1201 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1202 if (op->op == OP_MAP) { in nouveau_uvmm_bind_job_submit()
1203 struct drm_gem_object *obj = op->gem.obj = in nouveau_uvmm_bind_job_submit()
1205 op->gem.handle); in nouveau_uvmm_bind_job_submit()
1210 op->vm_bo = drm_gpuvm_bo_obtain(&uvmm->base, obj); in nouveau_uvmm_bind_job_submit()
1212 if (IS_ERR(op->vm_bo)) in nouveau_uvmm_bind_job_submit()
1213 return PTR_ERR(op->vm_bo); in nouveau_uvmm_bind_job_submit()
1215 drm_gpuvm_bo_extobj_add(op->vm_bo); in nouveau_uvmm_bind_job_submit()
1218 ret = bind_validate_op(job, op); in nouveau_uvmm_bind_job_submit()
1239 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1240 switch (op->op) { in nouveau_uvmm_bind_job_submit()
1243 op->va.addr, in nouveau_uvmm_bind_job_submit()
1244 op->va.range); in nouveau_uvmm_bind_job_submit()
1250 op->reg = nouveau_uvma_region_find(uvmm, op->va.addr, in nouveau_uvmm_bind_job_submit()
1251 op->va.range); in nouveau_uvmm_bind_job_submit()
1252 if (!op->reg || op->reg->dirty) { in nouveau_uvmm_bind_job_submit()
1257 op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base, in nouveau_uvmm_bind_job_submit()
1258 op->va.addr, in nouveau_uvmm_bind_job_submit()
1259 op->va.range); in nouveau_uvmm_bind_job_submit()
1260 if (IS_ERR(op->ops)) { in nouveau_uvmm_bind_job_submit()
1261 ret = PTR_ERR(op->ops); in nouveau_uvmm_bind_job_submit()
1265 ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1266 op->ops); in nouveau_uvmm_bind_job_submit()
1268 drm_gpuva_ops_free(&uvmm->base, op->ops); in nouveau_uvmm_bind_job_submit()
1269 op->ops = NULL; in nouveau_uvmm_bind_job_submit()
1270 op->reg = NULL; in nouveau_uvmm_bind_job_submit()
1274 nouveau_uvma_region_dirty(op->reg); in nouveau_uvmm_bind_job_submit()
1281 op->va.addr, in nouveau_uvmm_bind_job_submit()
1282 op->va.range); in nouveau_uvmm_bind_job_submit()
1286 u64 op_addr = op->va.addr; in nouveau_uvmm_bind_job_submit()
1287 u64 op_end = op_addr + op->va.range; in nouveau_uvmm_bind_job_submit()
1303 op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base, in nouveau_uvmm_bind_job_submit()
1304 op->va.addr, in nouveau_uvmm_bind_job_submit()
1305 op->va.range, in nouveau_uvmm_bind_job_submit()
1306 op->gem.obj, in nouveau_uvmm_bind_job_submit()
1307 op->gem.offset); in nouveau_uvmm_bind_job_submit()
1308 if (IS_ERR(op->ops)) { in nouveau_uvmm_bind_job_submit()
1309 ret = PTR_ERR(op->ops); in nouveau_uvmm_bind_job_submit()
1313 ret = nouveau_uvmm_sm_map_prepare(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1314 reg, op->ops, in nouveau_uvmm_bind_job_submit()
1315 op->va.addr, in nouveau_uvmm_bind_job_submit()
1316 op->va.range, in nouveau_uvmm_bind_job_submit()
1317 op->flags & 0xff); in nouveau_uvmm_bind_job_submit()
1319 drm_gpuva_ops_free(&uvmm->base, op->ops); in nouveau_uvmm_bind_job_submit()
1320 op->ops = NULL; in nouveau_uvmm_bind_job_submit()
1327 op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base, in nouveau_uvmm_bind_job_submit()
1328 op->va.addr, in nouveau_uvmm_bind_job_submit()
1329 op->va.range); in nouveau_uvmm_bind_job_submit()
1330 if (IS_ERR(op->ops)) { in nouveau_uvmm_bind_job_submit()
1331 ret = PTR_ERR(op->ops); in nouveau_uvmm_bind_job_submit()
1335 ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1336 op->ops); in nouveau_uvmm_bind_job_submit()
1338 drm_gpuva_ops_free(&uvmm->base, op->ops); in nouveau_uvmm_bind_job_submit()
1339 op->ops = NULL; in nouveau_uvmm_bind_job_submit()
1355 op = list_last_op(&bind_job->ops); in nouveau_uvmm_bind_job_submit()
1380 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1381 switch (op->op) { in nouveau_uvmm_bind_job_submit()
1385 bind_link_gpuvas(op); in nouveau_uvmm_bind_job_submit()
1396 op = list_prev_op(op); in nouveau_uvmm_bind_job_submit()
1398 list_for_each_op_from_reverse(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1399 switch (op->op) { in nouveau_uvmm_bind_job_submit()
1401 nouveau_uvma_region_destroy(uvmm, op->va.addr, in nouveau_uvmm_bind_job_submit()
1402 op->va.range); in nouveau_uvmm_bind_job_submit()
1405 __nouveau_uvma_region_insert(uvmm, op->reg); in nouveau_uvmm_bind_job_submit()
1406 nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1407 op->ops); in nouveau_uvmm_bind_job_submit()
1410 nouveau_uvmm_sm_map_prepare_unwind(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1411 op->ops, in nouveau_uvmm_bind_job_submit()
1412 op->va.addr, in nouveau_uvmm_bind_job_submit()
1413 op->va.range); in nouveau_uvmm_bind_job_submit()
1416 nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1417 op->ops); in nouveau_uvmm_bind_job_submit()
1421 drm_gpuva_ops_free(&uvmm->base, op->ops); in nouveau_uvmm_bind_job_submit()
1422 op->ops = NULL; in nouveau_uvmm_bind_job_submit()
1423 op->reg = NULL; in nouveau_uvmm_bind_job_submit()
1445 struct bind_job_op *op; in nouveau_uvmm_bind_job_run() local
1448 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_run()
1449 switch (op->op) { in nouveau_uvmm_bind_job_run()
1454 ret = nouveau_uvmm_sm_map(uvmm, &op->new, op->ops); in nouveau_uvmm_bind_job_run()
1461 ret = nouveau_uvmm_sm_unmap(uvmm, &op->new, op->ops); in nouveau_uvmm_bind_job_run()
1479 struct bind_job_op *op; in nouveau_uvmm_bind_job_cleanup() local
1481 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_cleanup()
1482 struct drm_gem_object *obj = op->gem.obj; in nouveau_uvmm_bind_job_cleanup()
1487 switch (op->op) { in nouveau_uvmm_bind_job_cleanup()
1492 if (!IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_cleanup()
1493 nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new, in nouveau_uvmm_bind_job_cleanup()
1494 op->ops); in nouveau_uvmm_bind_job_cleanup()
1496 if (op->reg) { in nouveau_uvmm_bind_job_cleanup()
1497 nouveau_uvma_region_sparse_unref(op->reg); in nouveau_uvmm_bind_job_cleanup()
1499 nouveau_uvma_region_remove(op->reg); in nouveau_uvmm_bind_job_cleanup()
1501 nouveau_uvma_region_complete(op->reg); in nouveau_uvmm_bind_job_cleanup()
1502 nouveau_uvma_region_put(op->reg); in nouveau_uvmm_bind_job_cleanup()
1507 if (!IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_cleanup()
1508 nouveau_uvmm_sm_map_cleanup(uvmm, &op->new, in nouveau_uvmm_bind_job_cleanup()
1509 op->ops); in nouveau_uvmm_bind_job_cleanup()
1512 if (!IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_cleanup()
1513 nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new, in nouveau_uvmm_bind_job_cleanup()
1514 op->ops); in nouveau_uvmm_bind_job_cleanup()
1518 if (!IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_cleanup()
1519 drm_gpuva_ops_free(&uvmm->base, op->ops); in nouveau_uvmm_bind_job_cleanup()
1521 if (!IS_ERR_OR_NULL(op->vm_bo)) { in nouveau_uvmm_bind_job_cleanup()
1523 drm_gpuvm_bo_put(op->vm_bo); in nouveau_uvmm_bind_job_cleanup()
1548 struct bind_job_op *op; in bind_job_op_from_uop() local
1550 op = *pop = kzalloc(sizeof(*op), GFP_KERNEL); in bind_job_op_from_uop()
1551 if (!op) in bind_job_op_from_uop()
1554 switch (uop->op) { in bind_job_op_from_uop()
1556 op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ? in bind_job_op_from_uop()
1560 op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ? in bind_job_op_from_uop()
1564 op->op = uop->op; in bind_job_op_from_uop()
1568 op->flags = uop->flags; in bind_job_op_from_uop()
1569 op->va.addr = uop->addr; in bind_job_op_from_uop()
1570 op->va.range = uop->range; in bind_job_op_from_uop()
1571 op->gem.handle = uop->handle; in bind_job_op_from_uop()
1572 op->gem.offset = uop->bo_offset; in bind_job_op_from_uop()
1580 struct bind_job_op *op, *next; in bind_job_ops_free() local
1582 list_for_each_op_safe(op, next, ops) { in bind_job_ops_free()
1583 list_del(&op->entry); in bind_job_ops_free()
1584 kfree(op); in bind_job_ops_free()
1594 struct bind_job_op *op; in nouveau_uvmm_bind_job_init() local
1603 for (i = 0; i < __args->op.count; i++) { in nouveau_uvmm_bind_job_init()
1604 ret = bind_job_op_from_uop(&op, &__args->op.s[i]); in nouveau_uvmm_bind_job_init()
1608 list_add_tail(&op->entry, &job->ops); in nouveau_uvmm_bind_job_init()
1680 args->op.count = opc; in nouveau_uvmm_vm_bind_ucopy()
1681 args->op.s = u_memcpya(ops, opc, in nouveau_uvmm_vm_bind_ucopy()
1682 sizeof(*args->op.s)); in nouveau_uvmm_vm_bind_ucopy()
1683 if (IS_ERR(args->op.s)) in nouveau_uvmm_vm_bind_ucopy()
1684 return PTR_ERR(args->op.s); in nouveau_uvmm_vm_bind_ucopy()
1712 u_free(args->op.s); in nouveau_uvmm_vm_bind_ucopy()
1721 u_free(args->op.s); in nouveau_uvmm_vm_bind_ufree()