Lines Matching full:job
113 /** @fence: The fence that is signaled when job completes */
115 /** @queue: The queue that the job runs on */
153 static inline struct msm_vm_bind_job *to_msm_vm_bind_job(struct drm_sched_job *job) in to_msm_vm_bind_job() argument
155 return container_of(job, struct msm_vm_bind_job, base); in to_msm_vm_bind_job()
460 struct msm_vm_bind_job *job; member
470 list_add_tail(&op->node, &arg->job->vm_ops); in vm_op_enqueue()
479 return msm_gem_vma_new(arg->job->vm, op->gem.obj, op->gem.offset, in vma_from_op()
487 struct msm_vm_bind_job *job = arg->job; in msm_gem_vm_sm_step_map() local
521 .queue_id = job->queue->id, in msm_gem_vm_sm_step_map()
534 struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job; in msm_gem_vm_sm_step_remap() local
535 struct drm_gpuvm *vm = job->vm; in msm_gem_vm_sm_step_remap()
555 .queue_id = job->queue->id, in msm_gem_vm_sm_step_remap()
618 struct msm_vm_bind_job *job = arg->job; in msm_gem_vm_sm_step_unmap() local
658 .queue_id = job->queue->id, in msm_gem_vm_sm_step_unmap()
682 struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job); in msm_vma_job_run() local
683 struct msm_gem_vm *vm = to_msm_vm(job->vm); in msm_vma_job_run()
690 vm->mmu->prealloc = &job->prealloc; in msm_vma_job_run()
692 while (!list_empty(&job->vm_ops)) { in msm_vma_job_run()
694 list_first_entry(&job->vm_ops, struct msm_vm_op, node); in msm_vma_job_run()
723 msm_gem_vm_unusable(job->vm); in msm_vma_job_run()
725 job_foreach_bo (obj, job) { in msm_vma_job_run()
738 struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job); in msm_vma_job_free() local
739 struct msm_gem_vm *vm = to_msm_vm(job->vm); in msm_vma_job_free()
742 vm->mmu->funcs->prealloc_cleanup(vm->mmu, &job->prealloc); in msm_vma_job_free()
744 atomic_sub(job->prealloc.count, &vm->prealloc_throttle.in_flight); in msm_vma_job_free()
748 job_foreach_bo (obj, job) in msm_vma_job_free()
751 msm_submitqueue_put(job->queue); in msm_vma_job_free()
752 dma_fence_put(job->fence); in msm_vma_job_free()
755 while (!list_empty(&job->vm_ops)) { in msm_vma_job_free()
757 list_first_entry(&job->vm_ops, struct msm_vm_op, node); in msm_vma_job_free()
764 kfree(job); in msm_vma_job_free()
933 struct msm_vm_bind_job *job; in vm_bind_job_create() local
937 sz = struct_size(job, ops, nr_ops); in vm_bind_job_create()
942 job = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); in vm_bind_job_create()
943 if (!job) in vm_bind_job_create()
946 ret = drm_sched_job_init(&job->base, queue->entity, 1, queue, in vm_bind_job_create()
949 kfree(job); in vm_bind_job_create()
953 job->vm = msm_context_vm(dev, queue->ctx); in vm_bind_job_create()
954 job->queue = queue; in vm_bind_job_create()
955 INIT_LIST_HEAD(&job->vm_ops); in vm_bind_job_create()
957 return job; in vm_bind_job_create()
971 lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op) in lookup_op() argument
973 struct drm_device *dev = job->vm->drm; in lookup_op()
974 int i = job->nr_ops++; in lookup_op()
977 job->ops[i].op = op->op; in lookup_op()
978 job->ops[i].handle = op->handle; in lookup_op()
979 job->ops[i].obj_offset = op->obj_offset; in lookup_op()
980 job->ops[i].iova = op->iova; in lookup_op()
981 job->ops[i].range = op->range; in lookup_op()
982 job->ops[i].flags = op->flags; in lookup_op()
996 if (!drm_gpuvm_range_valid(job->vm, op->iova, op->range)) in lookup_op()
1027 vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args, in vm_bind_job_lookup_ops() argument
1030 struct drm_device *dev = job->vm->drm; in vm_bind_job_lookup_ops()
1037 ret = lookup_op(job, &args->op); in vm_bind_job_lookup_ops()
1047 job->ops[i].flags = 0; in vm_bind_job_lookup_ops()
1054 ret = lookup_op(job, &op); in vm_bind_job_lookup_ops()
1061 job->nr_ops = 0; in vm_bind_job_lookup_ops()
1068 struct msm_vm_bind_op *op = &job->ops[i]; in vm_bind_job_lookup_ops()
1105 struct msm_vm_bind_op *op = &job->ops[i]; in vm_bind_job_lookup_ops()
1119 prealloc_count(struct msm_vm_bind_job *job, in prealloc_count() argument
1123 struct msm_mmu *mmu = to_msm_vm(job->vm)->mmu; in prealloc_count()
1131 mmu->funcs->prealloc_count(mmu, &job->prealloc, start_iova, end_iova - start_iova); in prealloc_count()
1155 vm_bind_prealloc_count(struct msm_vm_bind_job *job) in vm_bind_prealloc_count() argument
1158 struct msm_gem_vm *vm = to_msm_vm(job->vm); in vm_bind_prealloc_count()
1161 for (int i = 0; i < job->nr_ops; i++) { in vm_bind_prealloc_count()
1162 struct msm_vm_bind_op *op = &job->ops[i]; in vm_bind_prealloc_count()
1182 prealloc_count(job, first, last); in vm_bind_prealloc_count()
1187 prealloc_count(job, first, last); in vm_bind_prealloc_count()
1199 atomic_add(job->prealloc.count, &vm->prealloc_throttle.in_flight); in vm_bind_prealloc_count()
1208 vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec) in vm_bind_job_lock_objects() argument
1214 ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(job->vm)); in vm_bind_job_lock_objects()
1219 for (unsigned i = 0; i < job->nr_ops; i++) { in vm_bind_job_lock_objects()
1220 const struct msm_vm_bind_op *op = &job->ops[i]; in vm_bind_job_lock_objects()
1224 ret = drm_gpuvm_sm_unmap_exec_lock(job->vm, exec, in vm_bind_job_lock_objects()
1237 ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req); in vm_bind_job_lock_objects()
1263 vm_bind_job_pin_objects(struct msm_vm_bind_job *job) in vm_bind_job_pin_objects() argument
1272 job_foreach_bo (obj, job) { in vm_bind_job_pin_objects()
1280 struct msm_drm_private *priv = job->vm->drm->dev_private; in vm_bind_job_pin_objects()
1290 job_foreach_bo (obj, job) in vm_bind_job_pin_objects()
1294 job->bos_pinned = true; in vm_bind_job_pin_objects()
1300 * Unpin GEM objects. Normally this is done after the bind job is run.
1303 vm_bind_job_unpin_objects(struct msm_vm_bind_job *job) in vm_bind_job_unpin_objects() argument
1307 if (!job->bos_pinned) in vm_bind_job_unpin_objects()
1310 job_foreach_bo (obj, job) in vm_bind_job_unpin_objects()
1313 job->bos_pinned = false; in vm_bind_job_unpin_objects()
1321 vm_bind_job_prepare(struct msm_vm_bind_job *job) in vm_bind_job_prepare() argument
1323 struct msm_gem_vm *vm = to_msm_vm(job->vm); in vm_bind_job_prepare()
1327 ret = mmu->funcs->prealloc_allocate(mmu, &job->prealloc); in vm_bind_job_prepare()
1331 for (unsigned i = 0; i < job->nr_ops; i++) { in vm_bind_job_prepare()
1332 const struct msm_vm_bind_op *op = &job->ops[i]; in vm_bind_job_prepare()
1334 .job = job, in vm_bind_job_prepare()
1340 ret = drm_gpuvm_sm_unmap(job->vm, &arg, op->iova, in vm_bind_job_prepare()
1355 ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req); in vm_bind_job_prepare()
1373 msm_gem_vm_unusable(job->vm); in vm_bind_job_prepare()
1387 vm_bind_job_attach_fences(struct msm_vm_bind_job *job) in vm_bind_job_attach_fences() argument
1389 for (unsigned i = 0; i < job->nr_ops; i++) { in vm_bind_job_attach_fences()
1390 struct drm_gem_object *obj = job->ops[i].obj; in vm_bind_job_attach_fences()
1395 dma_resv_add_fence(obj->resv, job->fence, in vm_bind_job_attach_fences()
1406 struct msm_vm_bind_job *job = NULL; in msm_ioctl_vm_bind() local
1455 job = vm_bind_job_create(dev, file, queue, args->nr_ops); in msm_ioctl_vm_bind()
1456 if (IS_ERR(job)) { in msm_ioctl_vm_bind()
1457 ret = PTR_ERR(job); in msm_ioctl_vm_bind()
1475 ret = drm_sched_job_add_dependency(&job->base, in_fence); in msm_ioctl_vm_bind()
1481 syncobjs_to_reset = msm_syncobj_parse_deps(dev, &job->base, in msm_ioctl_vm_bind()
1502 ret = vm_bind_job_lookup_ops(job, args, file, &nr_bos); in msm_ioctl_vm_bind()
1506 ret = vm_bind_prealloc_count(job); in msm_ioctl_vm_bind()
1514 ret = vm_bind_job_lock_objects(job, &exec); in msm_ioctl_vm_bind()
1518 ret = vm_bind_job_pin_objects(job); in msm_ioctl_vm_bind()
1522 ret = vm_bind_job_prepare(job); in msm_ioctl_vm_bind()
1526 drm_sched_job_arm(&job->base); in msm_ioctl_vm_bind()
1528 job->fence = dma_fence_get(&job->base.s_fence->finished); in msm_ioctl_vm_bind()
1531 sync_file = sync_file_create(job->fence); in msm_ioctl_vm_bind()
1539 vm_bind_job_attach_fences(job); in msm_ioctl_vm_bind()
1542 * The job can be free'd (and fence unref'd) at any point after in msm_ioctl_vm_bind()
1545 fence = dma_fence_get(job->fence); in msm_ioctl_vm_bind()
1547 drm_sched_entity_push_job(&job->base); in msm_ioctl_vm_bind()
1556 vm_bind_job_unpin_objects(job); in msm_ioctl_vm_bind()
1572 if (!IS_ERR_OR_NULL(job)) { in msm_ioctl_vm_bind()
1574 msm_vma_job_free(&job->base); in msm_ioctl_vm_bind()