Lines Matching full:submit
258 static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submit *submit) in crashstate_get_bos() argument
262 if (msm_context_is_vmbind(submit->queue->ctx)) { in crashstate_get_bos()
271 drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(submit->vm)); in crashstate_get_bos()
274 drm_gpuvm_for_each_va (vma, submit->vm) { in crashstate_get_bos()
285 drm_gpuvm_for_each_va (vma, submit->vm) in crashstate_get_bos()
290 drm_gpuvm_for_each_va (vma, submit->vm) { in crashstate_get_bos()
303 state->bos = kcalloc(submit->nr_bos, in crashstate_get_bos()
306 for (int i = 0; state->bos && i < submit->nr_bos; i++) { in crashstate_get_bos()
307 struct drm_gem_object *obj = submit->bos[i].obj; in crashstate_get_bos()
308 bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP); in crashstate_get_bos()
311 msm_gpu_crashstate_get_bo(state, obj, submit->bos[i].iova, in crashstate_get_bos()
361 struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info, in msm_gpu_crashstate_capture() argument
384 if (submit && state->fault_info.ttbr0) { in msm_gpu_crashstate_capture()
386 struct msm_mmu *mmu = to_msm_vm(submit->vm)->mmu; in msm_gpu_crashstate_capture()
393 if (submit) { in msm_gpu_crashstate_capture()
394 crashstate_get_vm_logs(state, to_msm_vm(submit->vm)); in msm_gpu_crashstate_capture()
395 crashstate_get_bos(state, submit); in msm_gpu_crashstate_capture()
406 struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info, in msm_gpu_crashstate_capture() argument
419 struct msm_gem_submit *submit; in find_submit() local
423 list_for_each_entry(submit, &ring->submits, node) { in find_submit()
424 if (submit->seqno == fence) { in find_submit()
426 return submit; in find_submit()
436 static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd) in get_comm_cmdline() argument
438 struct msm_context *ctx = submit->queue->ctx; in get_comm_cmdline()
441 WARN_ON(!mutex_is_locked(&submit->gpu->lock)); in get_comm_cmdline()
447 task = get_pid_task(submit->pid, PIDTYPE_PID); in get_comm_cmdline()
465 struct msm_gem_submit *submit; in recover_worker() local
475 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1); in recover_worker()
478 * If the submit retired while we were waiting for the worker to run, in recover_worker()
481 if (!submit) in recover_worker()
485 submit->queue->faults++; in recover_worker()
487 task = get_pid_task(submit->pid, PIDTYPE_PID); in recover_worker()
491 struct msm_gem_vm *vm = to_msm_vm(submit->vm); in recover_worker()
502 msm_gem_vm_unusable(submit->vm); in recover_worker()
505 get_comm_cmdline(submit, &comm, &cmd); in recover_worker()
511 msm_rd_dump_submit(priv->hangrd, submit, in recover_worker()
516 msm_rd_dump_submit(priv->hangrd, submit, NULL); in recover_worker()
521 msm_gpu_crashstate_capture(gpu, submit, NULL, comm, cmd); in recover_worker()
529 * bo's referenced by the offending submit are still around. in recover_worker()
537 * For the current (faulting?) ring/submit advance the fence by in recover_worker()
538 * one more to clear the faulting submit in recover_worker()
561 list_for_each_entry(submit, &ring->submits, node) { in recover_worker()
563 * If the submit uses an unusable vm make sure in recover_worker()
566 if (to_msm_vm(submit->vm)->unusable) in recover_worker()
567 submit->nr_cmds = 0; in recover_worker()
568 gpu->funcs->submit(gpu, submit); in recover_worker()
584 struct msm_gem_submit *submit; in msm_gpu_fault_crashstate_capture() local
590 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1); in msm_gpu_fault_crashstate_capture()
591 if (submit && submit->fault_dumped) in msm_gpu_fault_crashstate_capture()
594 if (submit) { in msm_gpu_fault_crashstate_capture()
595 get_comm_cmdline(submit, &comm, &cmd); in msm_gpu_fault_crashstate_capture()
601 submit->fault_dumped = true; in msm_gpu_fault_crashstate_capture()
606 msm_gpu_crashstate_capture(gpu, submit, fault_info, comm, cmd); in msm_gpu_fault_crashstate_capture()
775 struct msm_gem_submit *submit) in retire_submit() argument
777 int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; in retire_submit()
795 submit->queue->ctx->elapsed_ns += elapsed; in retire_submit()
796 submit->queue->ctx->cycles += cycles; in retire_submit()
798 trace_msm_gpu_submit_retired(submit, elapsed, clock, in retire_submit()
801 msm_submit_retire(submit); in retire_submit()
806 list_del(&submit->node); in retire_submit()
820 msm_gem_submit_put(submit); in retire_submit()
832 struct msm_gem_submit *submit = NULL; in retire_submits() local
836 submit = list_first_entry_or_null(&ring->submits, in retire_submits()
841 * If no submit, we are done. If submit->fence hasn't in retire_submits()
845 if (submit && dma_fence_is_signaled(submit->hw_fence)) { in retire_submits()
846 retire_submit(gpu, ring, submit); in retire_submits()
876 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in msm_gpu_submit() argument
878 struct msm_ringbuffer *ring = submit->ring; in msm_gpu_submit()
887 submit->seqno = submit->hw_fence->seqno; in msm_gpu_submit()
892 * ring->submits holds a ref to the submit, to deal with the case in msm_gpu_submit()
893 * that a submit completes before msm_ioctl_gem_submit() returns. in msm_gpu_submit()
895 msm_gem_submit_get(submit); in msm_gpu_submit()
898 list_add_tail(&submit->node, &ring->submits); in msm_gpu_submit()
910 gpu->funcs->submit(gpu, submit); in msm_gpu_submit()
911 submit->ring->cur_ctx_seqno = submit->queue->ctx->seqno; in msm_gpu_submit()