Lines Matching refs:pipe
22 struct lima_sched_pipe *pipe; member
64 return f->pipe->base.name; in lima_fence_get_timeline_name()
88 static struct lima_fence *lima_fence_create(struct lima_sched_pipe *pipe) in lima_fence_create() argument
96 fence->pipe = pipe; in lima_fence_create()
97 dma_fence_init(&fence->base, &lima_fence_ops, &pipe->fence_lock, in lima_fence_create()
98 pipe->fence_context, ++pipe->fence_seqno); in lima_fence_create()
158 int lima_sched_context_init(struct lima_sched_pipe *pipe, in lima_sched_context_init() argument
161 struct drm_gpu_scheduler *sched = &pipe->base; in lima_sched_context_init()
167 void lima_sched_context_fini(struct lima_sched_pipe *pipe, in lima_sched_context_fini() argument
207 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_run_job() local
208 struct lima_device *ldev = pipe->ldev; in lima_sched_run_job()
216 fence = lima_fence_create(pipe); in lima_sched_run_job()
233 pipe->current_task = task; in lima_sched_run_job()
250 for (i = 0; i < pipe->num_l2_cache; i++) in lima_sched_run_job()
251 lima_l2_cache_flush(pipe->l2_cache[i]); in lima_sched_run_job()
253 lima_vm_put(pipe->current_vm); in lima_sched_run_job()
254 pipe->current_vm = lima_vm_get(task->vm); in lima_sched_run_job()
256 if (pipe->bcast_mmu) in lima_sched_run_job()
257 lima_mmu_switch_vm(pipe->bcast_mmu, pipe->current_vm); in lima_sched_run_job()
259 for (i = 0; i < pipe->num_mmu; i++) in lima_sched_run_job()
260 lima_mmu_switch_vm(pipe->mmu[i], pipe->current_vm); in lima_sched_run_job()
265 pipe->error = false; in lima_sched_run_job()
266 pipe->task_run(pipe, task); in lima_sched_run_job()
274 struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched); in lima_sched_build_error_task_list() local
275 struct lima_ip *ip = pipe->processor[0]; in lima_sched_build_error_task_list()
301 size = sizeof(struct lima_dump_chunk) + pipe->frame_size; in lima_sched_build_error_task_list()
334 chunk->size = pipe->frame_size; in lima_sched_build_error_task_list()
335 memcpy(chunk + 1, task->frame, pipe->frame_size); in lima_sched_build_error_task_list()
403 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_timedout_job() local
405 struct lima_device *ldev = pipe->ldev; in lima_sched_timedout_job()
406 struct lima_ip *ip = pipe->processor[0]; in lima_sched_timedout_job()
425 for (i = 0; i < pipe->num_processor; i++) in lima_sched_timedout_job()
426 synchronize_irq(pipe->processor[i]->irq); in lima_sched_timedout_job()
427 if (pipe->bcast_processor) in lima_sched_timedout_job()
428 synchronize_irq(pipe->bcast_processor->irq); in lima_sched_timedout_job()
440 pipe->task_mask_irq(pipe); in lima_sched_timedout_job()
442 if (!pipe->error) in lima_sched_timedout_job()
445 drm_sched_stop(&pipe->base, &task->base); in lima_sched_timedout_job()
452 pipe->task_error(pipe); in lima_sched_timedout_job()
454 if (pipe->bcast_mmu) in lima_sched_timedout_job()
455 lima_mmu_page_fault_resume(pipe->bcast_mmu); in lima_sched_timedout_job()
457 for (i = 0; i < pipe->num_mmu; i++) in lima_sched_timedout_job()
458 lima_mmu_page_fault_resume(pipe->mmu[i]); in lima_sched_timedout_job()
461 lima_vm_put(pipe->current_vm); in lima_sched_timedout_job()
462 pipe->current_vm = NULL; in lima_sched_timedout_job()
463 pipe->current_task = NULL; in lima_sched_timedout_job()
467 drm_sched_resubmit_jobs(&pipe->base); in lima_sched_timedout_job()
468 drm_sched_start(&pipe->base, 0); in lima_sched_timedout_job()
476 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_free_job() local
487 kmem_cache_free(pipe->task_slab, task); in lima_sched_free_job()
498 struct lima_sched_pipe *pipe = in lima_sched_recover_work() local
502 for (i = 0; i < pipe->num_l2_cache; i++) in lima_sched_recover_work()
503 lima_l2_cache_flush(pipe->l2_cache[i]); in lima_sched_recover_work()
505 if (pipe->bcast_mmu) { in lima_sched_recover_work()
506 lima_mmu_flush_tlb(pipe->bcast_mmu); in lima_sched_recover_work()
508 for (i = 0; i < pipe->num_mmu; i++) in lima_sched_recover_work()
509 lima_mmu_flush_tlb(pipe->mmu[i]); in lima_sched_recover_work()
512 if (pipe->task_recover(pipe)) in lima_sched_recover_work()
513 drm_sched_fault(&pipe->base); in lima_sched_recover_work()
516 int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name) in lima_sched_pipe_init() argument
527 .dev = pipe->ldev->dev, in lima_sched_pipe_init()
530 pipe->fence_context = dma_fence_context_alloc(1); in lima_sched_pipe_init()
531 spin_lock_init(&pipe->fence_lock); in lima_sched_pipe_init()
533 INIT_WORK(&pipe->recover_work, lima_sched_recover_work); in lima_sched_pipe_init()
535 return drm_sched_init(&pipe->base, &args); in lima_sched_pipe_init()
538 void lima_sched_pipe_fini(struct lima_sched_pipe *pipe) in lima_sched_pipe_fini() argument
540 drm_sched_fini(&pipe->base); in lima_sched_pipe_fini()
543 void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe) in lima_sched_pipe_task_done() argument
545 struct lima_sched_task *task = pipe->current_task; in lima_sched_pipe_task_done()
546 struct lima_device *ldev = pipe->ldev; in lima_sched_pipe_task_done()
548 if (pipe->error) { in lima_sched_pipe_task_done()
550 schedule_work(&pipe->recover_work); in lima_sched_pipe_task_done()
552 drm_sched_fault(&pipe->base); in lima_sched_pipe_task_done()
554 pipe->task_fini(pipe); in lima_sched_pipe_task_done()