Lines Matching +full:ctx +full:- +full:asid

1 // SPDX-License-Identifier: GPL-2.0-only
27 struct drm_device *dev = gpu->dev; in enable_pwrrail()
30 if (gpu->gpu_reg) { in enable_pwrrail()
31 ret = regulator_enable(gpu->gpu_reg); in enable_pwrrail()
33 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret); in enable_pwrrail()
38 if (gpu->gpu_cx) { in enable_pwrrail()
39 ret = regulator_enable(gpu->gpu_cx); in enable_pwrrail()
41 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret); in enable_pwrrail()
51 if (gpu->gpu_cx) in disable_pwrrail()
52 regulator_disable(gpu->gpu_cx); in disable_pwrrail()
53 if (gpu->gpu_reg) in disable_pwrrail()
54 regulator_disable(gpu->gpu_reg); in disable_pwrrail()
60 if (gpu->core_clk && gpu->fast_rate) in enable_clk()
61 dev_pm_opp_set_rate(&gpu->pdev->dev, gpu->fast_rate); in enable_clk()
64 if (gpu->rbbmtimer_clk) in enable_clk()
65 clk_set_rate(gpu->rbbmtimer_clk, 19200000); in enable_clk()
67 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks); in enable_clk()
72 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); in disable_clk()
79 if (gpu->core_clk) in disable_clk()
80 dev_pm_opp_set_rate(&gpu->pdev->dev, 27000000); in disable_clk()
82 if (gpu->rbbmtimer_clk) in disable_clk()
83 clk_set_rate(gpu->rbbmtimer_clk, 0); in disable_clk()
90 return clk_prepare_enable(gpu->ebi1_clk); in enable_axi()
95 clk_disable_unprepare(gpu->ebi1_clk); in disable_axi()
103 DBG("%s", gpu->name); in msm_gpu_pm_resume()
120 gpu->needs_hw_init = true; in msm_gpu_pm_resume()
129 DBG("%s", gpu->name); in msm_gpu_pm_suspend()
146 gpu->suspend_count++; in msm_gpu_pm_suspend()
151 void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_context *ctx, in msm_gpu_show_fdinfo() argument
154 drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns); in msm_gpu_show_fdinfo()
155 drm_printf(p, "drm-cycles-gpu:\t%llu\n", ctx->cycles); in msm_gpu_show_fdinfo()
156 drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate); in msm_gpu_show_fdinfo()
163 WARN_ON(!mutex_is_locked(&gpu->lock)); in msm_gpu_hw_init()
165 if (!gpu->needs_hw_init) in msm_gpu_hw_init()
168 disable_irq(gpu->irq); in msm_gpu_hw_init()
169 ret = gpu->funcs->hw_init(gpu); in msm_gpu_hw_init()
171 gpu->needs_hw_init = false; in msm_gpu_hw_init()
172 enable_irq(gpu->irq); in msm_gpu_hw_init()
197 drm_printf(&p, "---\n"); in msm_gpu_devcoredump_read()
201 state->time.tv_sec, state->time.tv_nsec); in msm_gpu_devcoredump_read()
202 if (state->comm) in msm_gpu_devcoredump_read()
203 drm_printf(&p, "comm: %s\n", state->comm); in msm_gpu_devcoredump_read()
204 if (state->cmd) in msm_gpu_devcoredump_read()
205 drm_printf(&p, "cmdline: %s\n", state->cmd); in msm_gpu_devcoredump_read()
207 gpu->funcs->show(gpu, state, &p); in msm_gpu_devcoredump_read()
211 return count - iter.remain; in msm_gpu_devcoredump_read()
225 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos]; in msm_gpu_crashstate_get_bo()
229 state_bo->size = size; in msm_gpu_crashstate_get_bo()
230 state_bo->flags = msm_obj->flags; in msm_gpu_crashstate_get_bo()
231 state_bo->iova = iova; in msm_gpu_crashstate_get_bo()
233 BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(msm_obj->name)); in msm_gpu_crashstate_get_bo()
235 memcpy(state_bo->name, msm_obj->name, sizeof(state_bo->name)); in msm_gpu_crashstate_get_bo()
240 state_bo->data = kvmalloc(size, GFP_KERNEL); in msm_gpu_crashstate_get_bo()
241 if (!state_bo->data) in msm_gpu_crashstate_get_bo()
246 kvfree(state_bo->data); in msm_gpu_crashstate_get_bo()
247 state_bo->data = NULL; in msm_gpu_crashstate_get_bo()
251 memcpy(state_bo->data, ptr + offset, size); in msm_gpu_crashstate_get_bo()
255 state->nr_bos++; in msm_gpu_crashstate_get_bo()
262 if (msm_context_is_vmbind(submit->queue->ctx)) { in crashstate_get_bos()
271 drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(submit->vm)); in crashstate_get_bos()
274 drm_gpuvm_for_each_va (vma, submit->vm) { in crashstate_get_bos()
275 if (!vma->gem.obj) in crashstate_get_bos()
279 drm_exec_lock_obj(&exec, vma->gem.obj); in crashstate_get_bos()
285 drm_gpuvm_for_each_va (vma, submit->vm) in crashstate_get_bos()
288 state->bos = kcalloc(cnt, sizeof(struct msm_gpu_state_bo), GFP_KERNEL); in crashstate_get_bos()
290 drm_gpuvm_for_each_va (vma, submit->vm) { in crashstate_get_bos()
291 bool dump = rd_full || (vma->flags & MSM_VMA_DUMP); in crashstate_get_bos()
294 if (!vma->gem.obj) in crashstate_get_bos()
297 msm_gpu_crashstate_get_bo(state, vma->gem.obj, vma->va.addr, in crashstate_get_bos()
298 dump, vma->gem.offset, vma->va.range); in crashstate_get_bos()
303 state->bos = kcalloc(submit->nr_bos, in crashstate_get_bos()
306 for (int i = 0; state->bos && i < submit->nr_bos; i++) { in crashstate_get_bos()
307 struct drm_gem_object *obj = submit->bos[i].obj; in crashstate_get_bos()
308 bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP); in crashstate_get_bos()
311 msm_gpu_crashstate_get_bo(state, obj, submit->bos[i].iova, in crashstate_get_bos()
312 dump, 0, obj->size); in crashstate_get_bos()
320 uint32_t vm_log_len = (1 << vm->log_shift); in crashstate_get_vm_logs()
321 uint32_t vm_log_mask = vm_log_len - 1; in crashstate_get_vm_logs()
325 if (!vm->log || !vm->log[0].op) in crashstate_get_vm_logs()
328 mutex_lock(&vm->mmu_lock); in crashstate_get_vm_logs()
335 first = vm->log_idx; in crashstate_get_vm_logs()
337 if (!vm->log[first].op) { in crashstate_get_vm_logs()
340 * entries 0 to idx-1 are valid (ie. we haven't wrapped around in crashstate_get_vm_logs()
343 state->nr_vm_logs = MAX(0, first - 1); in crashstate_get_vm_logs()
346 state->nr_vm_logs = vm_log_len; in crashstate_get_vm_logs()
349 state->vm_logs = kmalloc_array( in crashstate_get_vm_logs()
350 state->nr_vm_logs, sizeof(vm->log[0]), GFP_KERNEL); in crashstate_get_vm_logs()
351 for (int i = 0; i < state->nr_vm_logs; i++) { in crashstate_get_vm_logs()
354 state->vm_logs[i] = vm->log[idx]; in crashstate_get_vm_logs()
357 mutex_unlock(&vm->mmu_lock); in crashstate_get_vm_logs()
367 if (!gpu->funcs->gpu_state_get) in msm_gpu_crashstate_capture()
371 if (gpu->crashstate) in msm_gpu_crashstate_capture()
374 state = gpu->funcs->gpu_state_get(gpu); in msm_gpu_crashstate_capture()
379 state->comm = kstrdup(comm, GFP_KERNEL); in msm_gpu_crashstate_capture()
380 state->cmd = kstrdup(cmd, GFP_KERNEL); in msm_gpu_crashstate_capture()
382 state->fault_info = *fault_info; in msm_gpu_crashstate_capture()
384 if (submit && state->fault_info.ttbr0) { in msm_gpu_crashstate_capture()
385 struct msm_gpu_fault_info *info = &state->fault_info; in msm_gpu_crashstate_capture()
386 struct msm_mmu *mmu = to_msm_vm(submit->vm)->mmu; in msm_gpu_crashstate_capture()
388 msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0, in msm_gpu_crashstate_capture()
389 &info->asid); in msm_gpu_crashstate_capture()
390 msm_iommu_pagetable_walk(mmu, info->iova, info->ptes); in msm_gpu_crashstate_capture()
394 crashstate_get_vm_logs(state, to_msm_vm(submit->vm)); in msm_gpu_crashstate_capture()
399 gpu->crashstate = state; in msm_gpu_crashstate_capture()
401 dev_coredumpm(&gpu->pdev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL, in msm_gpu_crashstate_capture()
422 spin_lock_irqsave(&ring->submit_lock, flags); in find_submit()
423 list_for_each_entry(submit, &ring->submits, node) { in find_submit()
424 if (submit->seqno == fence) { in find_submit()
425 spin_unlock_irqrestore(&ring->submit_lock, flags); in find_submit()
429 spin_unlock_irqrestore(&ring->submit_lock, flags); in find_submit()
438 struct msm_context *ctx = submit->queue->ctx; in get_comm_cmdline() local
441 WARN_ON(!mutex_is_locked(&submit->gpu->lock)); in get_comm_cmdline()
444 *comm = kstrdup(ctx->comm, GFP_KERNEL); in get_comm_cmdline()
445 *cmd = kstrdup(ctx->cmdline, GFP_KERNEL); in get_comm_cmdline()
447 task = get_pid_task(submit->pid, PIDTYPE_PID); in get_comm_cmdline()
452 *comm = kstrdup(task->comm, GFP_KERNEL); in get_comm_cmdline()
463 struct drm_device *dev = gpu->dev; in recover_worker()
464 struct msm_drm_private *priv = dev->dev_private; in recover_worker()
466 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu); in recover_worker()
471 mutex_lock(&gpu->lock); in recover_worker()
473 DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name); in recover_worker()
475 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1); in recover_worker()
485 submit->queue->faults++; in recover_worker()
487 task = get_pid_task(submit->pid, PIDTYPE_PID); in recover_worker()
489 gpu->global_faults++; in recover_worker()
491 struct msm_gem_vm *vm = to_msm_vm(submit->vm); in recover_worker()
493 vm->faults++; in recover_worker()
496 * If userspace has opted-in to VM_BIND (and therefore userspace in recover_worker()
501 if (!vm->managed) in recover_worker()
502 msm_gem_vm_unusable(submit->vm); in recover_worker()
508 DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n", in recover_worker()
509 gpu->name, comm, cmd); in recover_worker()
511 msm_rd_dump_submit(priv->hangrd, submit, in recover_worker()
514 DRM_DEV_ERROR(dev->dev, "%s: offending task: unknown\n", gpu->name); in recover_worker()
516 msm_rd_dump_submit(priv->hangrd, submit, NULL); in recover_worker()
520 pm_runtime_get_sync(&gpu->pdev->dev); in recover_worker()
531 for (i = 0; i < gpu->nr_rings; i++) { in recover_worker()
532 struct msm_ringbuffer *ring = gpu->rb[i]; in recover_worker()
534 uint32_t fence = ring->memptrs->fence; in recover_worker()
541 ring->memptrs->fence = ++fence; in recover_worker()
543 msm_update_fence(ring->fctx, fence); in recover_worker()
550 gpu->funcs->recover(gpu); in recover_worker()
556 for (i = 0; i < gpu->nr_rings; i++) { in recover_worker()
557 struct msm_ringbuffer *ring = gpu->rb[i]; in recover_worker()
560 spin_lock_irqsave(&ring->submit_lock, flags); in recover_worker()
561 list_for_each_entry(submit, &ring->submits, node) { in recover_worker()
566 if (to_msm_vm(submit->vm)->unusable) in recover_worker()
567 submit->nr_cmds = 0; in recover_worker()
568 gpu->funcs->submit(gpu, submit); in recover_worker()
570 spin_unlock_irqrestore(&ring->submit_lock, flags); in recover_worker()
574 pm_runtime_put(&gpu->pdev->dev); in recover_worker()
577 mutex_unlock(&gpu->lock); in recover_worker()
585 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu); in msm_gpu_fault_crashstate_capture()
588 mutex_lock(&gpu->lock); in msm_gpu_fault_crashstate_capture()
590 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1); in msm_gpu_fault_crashstate_capture()
591 if (submit && submit->fault_dumped) in msm_gpu_fault_crashstate_capture()
601 submit->fault_dumped = true; in msm_gpu_fault_crashstate_capture()
605 pm_runtime_get_sync(&gpu->pdev->dev); in msm_gpu_fault_crashstate_capture()
607 pm_runtime_put_sync(&gpu->pdev->dev); in msm_gpu_fault_crashstate_capture()
613 mutex_unlock(&gpu->lock); in msm_gpu_fault_crashstate_capture()
618 struct msm_drm_private *priv = gpu->dev->dev_private; in hangcheck_timer_reset()
619 mod_timer(&gpu->hangcheck_timer, in hangcheck_timer_reset()
620 round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period))); in hangcheck_timer_reset()
625 if (ring->hangcheck_progress_retries >= DRM_MSM_HANGCHECK_PROGRESS_RETRIES) in made_progress()
628 if (!gpu->funcs->progress) in made_progress()
631 if (!gpu->funcs->progress(gpu, ring)) in made_progress()
634 ring->hangcheck_progress_retries++; in made_progress()
641 struct drm_device *dev = gpu->dev; in hangcheck_handler()
642 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in hangcheck_handler()
643 uint32_t fence = ring->memptrs->fence; in hangcheck_handler()
645 if (fence != ring->hangcheck_fence) { in hangcheck_handler()
647 ring->hangcheck_fence = fence; in hangcheck_handler()
648 ring->hangcheck_progress_retries = 0; in hangcheck_handler()
649 } else if (fence_before(fence, ring->fctx->last_fence) && in hangcheck_handler()
652 ring->hangcheck_fence = fence; in hangcheck_handler()
653 ring->hangcheck_progress_retries = 0; in hangcheck_handler()
654 DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n", in hangcheck_handler()
655 gpu->name, ring->id); in hangcheck_handler()
656 DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n", in hangcheck_handler()
657 gpu->name, fence); in hangcheck_handler()
658 DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n", in hangcheck_handler()
659 gpu->name, ring->fctx->last_fence); in hangcheck_handler()
661 kthread_queue_work(gpu->worker, &gpu->recover_work); in hangcheck_handler()
665 if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence)) in hangcheck_handler()
679 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)]; in update_hw_cntrs()
680 int i, n = min(ncntrs, gpu->num_perfcntrs); in update_hw_cntrs()
683 for (i = 0; i < gpu->num_perfcntrs; i++) in update_hw_cntrs()
684 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg); in update_hw_cntrs()
688 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i]; in update_hw_cntrs()
691 for (i = 0; i < gpu->num_perfcntrs; i++) in update_hw_cntrs()
692 gpu->last_cntrs[i] = current_cntrs[i]; in update_hw_cntrs()
703 spin_lock_irqsave(&gpu->perf_lock, flags); in update_sw_cntrs()
704 if (!gpu->perfcntr_active) in update_sw_cntrs()
708 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time)); in update_sw_cntrs()
710 gpu->totaltime += elapsed; in update_sw_cntrs()
711 if (gpu->last_sample.active) in update_sw_cntrs()
712 gpu->activetime += elapsed; in update_sw_cntrs()
714 gpu->last_sample.active = msm_gpu_active(gpu); in update_sw_cntrs()
715 gpu->last_sample.time = time; in update_sw_cntrs()
718 spin_unlock_irqrestore(&gpu->perf_lock, flags); in update_sw_cntrs()
725 pm_runtime_get_sync(&gpu->pdev->dev); in msm_gpu_perfcntr_start()
727 spin_lock_irqsave(&gpu->perf_lock, flags); in msm_gpu_perfcntr_start()
729 gpu->last_sample.active = msm_gpu_active(gpu); in msm_gpu_perfcntr_start()
730 gpu->last_sample.time = ktime_get(); in msm_gpu_perfcntr_start()
731 gpu->activetime = gpu->totaltime = 0; in msm_gpu_perfcntr_start()
732 gpu->perfcntr_active = true; in msm_gpu_perfcntr_start()
734 spin_unlock_irqrestore(&gpu->perf_lock, flags); in msm_gpu_perfcntr_start()
739 gpu->perfcntr_active = false; in msm_gpu_perfcntr_stop()
740 pm_runtime_put_sync(&gpu->pdev->dev); in msm_gpu_perfcntr_stop()
743 /* returns -errno or # of cntrs sampled */
750 spin_lock_irqsave(&gpu->perf_lock, flags); in msm_gpu_perfcntr_sample()
752 if (!gpu->perfcntr_active) { in msm_gpu_perfcntr_sample()
753 ret = -EINVAL; in msm_gpu_perfcntr_sample()
757 *activetime = gpu->activetime; in msm_gpu_perfcntr_sample()
758 *totaltime = gpu->totaltime; in msm_gpu_perfcntr_sample()
760 gpu->activetime = gpu->totaltime = 0; in msm_gpu_perfcntr_sample()
765 spin_unlock_irqrestore(&gpu->perf_lock, flags); in msm_gpu_perfcntr_sample()
777 int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; in retire_submit()
782 stats = &ring->memptrs->stats[index]; in retire_submit()
784 elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000; in retire_submit()
787 cycles = stats->cpcycles_end - stats->cpcycles_start; in retire_submit()
795 submit->queue->ctx->elapsed_ns += elapsed; in retire_submit()
796 submit->queue->ctx->cycles += cycles; in retire_submit()
799 stats->alwayson_start, stats->alwayson_end); in retire_submit()
803 pm_runtime_mark_last_busy(&gpu->pdev->dev); in retire_submit()
805 spin_lock_irqsave(&ring->submit_lock, flags); in retire_submit()
806 list_del(&submit->node); in retire_submit()
807 spin_unlock_irqrestore(&ring->submit_lock, flags); in retire_submit()
809 /* Update devfreq on transition from active->idle: */ in retire_submit()
810 mutex_lock(&gpu->active_lock); in retire_submit()
811 gpu->active_submits--; in retire_submit()
812 WARN_ON(gpu->active_submits < 0); in retire_submit()
813 if (!gpu->active_submits) { in retire_submit()
815 pm_runtime_put_autosuspend(&gpu->pdev->dev); in retire_submit()
818 mutex_unlock(&gpu->active_lock); in retire_submit()
828 for (i = 0; i < gpu->nr_rings; i++) { in retire_submits()
829 struct msm_ringbuffer *ring = gpu->rb[i]; in retire_submits()
835 spin_lock_irqsave(&ring->submit_lock, flags); in retire_submits()
836 submit = list_first_entry_or_null(&ring->submits, in retire_submits()
838 spin_unlock_irqrestore(&ring->submit_lock, flags); in retire_submits()
841 * If no submit, we are done. If submit->fence hasn't in retire_submits()
845 if (submit && dma_fence_is_signaled(submit->hw_fence)) { in retire_submits()
853 wake_up_all(&gpu->retire_event); in retire_submits()
868 for (i = 0; i < gpu->nr_rings; i++) in msm_gpu_retire()
869 msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence); in msm_gpu_retire()
871 kthread_queue_work(gpu->worker, &gpu->retire_work); in msm_gpu_retire()
878 struct msm_ringbuffer *ring = submit->ring; in msm_gpu_submit()
881 WARN_ON(!mutex_is_locked(&gpu->lock)); in msm_gpu_submit()
883 pm_runtime_get_sync(&gpu->pdev->dev); in msm_gpu_submit()
887 submit->seqno = submit->hw_fence->seqno; in msm_gpu_submit()
892 * ring->submits holds a ref to the submit, to deal with the case in msm_gpu_submit()
897 spin_lock_irqsave(&ring->submit_lock, flags); in msm_gpu_submit()
898 list_add_tail(&submit->node, &ring->submits); in msm_gpu_submit()
899 spin_unlock_irqrestore(&ring->submit_lock, flags); in msm_gpu_submit()
901 /* Update devfreq on transition from idle->active: */ in msm_gpu_submit()
902 mutex_lock(&gpu->active_lock); in msm_gpu_submit()
903 if (!gpu->active_submits) { in msm_gpu_submit()
904 pm_runtime_get(&gpu->pdev->dev); in msm_gpu_submit()
907 gpu->active_submits++; in msm_gpu_submit()
908 mutex_unlock(&gpu->active_lock); in msm_gpu_submit()
910 gpu->funcs->submit(gpu, submit); in msm_gpu_submit()
911 submit->ring->cur_ctx_seqno = submit->queue->ctx->seqno; in msm_gpu_submit()
913 pm_runtime_put(&gpu->pdev->dev); in msm_gpu_submit()
924 return gpu->funcs->irq(gpu); in irq_handler()
929 int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks); in get_clocks()
932 gpu->nr_clocks = 0; in get_clocks()
936 gpu->nr_clocks = ret; in get_clocks()
938 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks, in get_clocks()
939 gpu->nr_clocks, "core"); in get_clocks()
941 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks, in get_clocks()
942 gpu->nr_clocks, "rbbmtimer"); in get_clocks()
961 if (gpu->funcs->create_private_vm) { in msm_gpu_create_private_vm()
962 vm = gpu->funcs->create_private_vm(gpu, kernel_managed); in msm_gpu_create_private_vm()
964 to_msm_vm(vm)->pid = get_pid(task_pid(task)); in msm_gpu_create_private_vm()
968 vm = drm_gpuvm_get(gpu->vm); in msm_gpu_create_private_vm()
977 struct msm_drm_private *priv = drm->dev_private; in msm_gpu_init()
978 int i, ret, nr_rings = config->nr_rings; in msm_gpu_init()
982 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) in msm_gpu_init()
983 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); in msm_gpu_init()
985 gpu->dev = drm; in msm_gpu_init()
986 gpu->funcs = funcs; in msm_gpu_init()
987 gpu->name = name; in msm_gpu_init()
989 gpu->worker = kthread_run_worker(0, "gpu-worker"); in msm_gpu_init()
990 if (IS_ERR(gpu->worker)) { in msm_gpu_init()
991 ret = PTR_ERR(gpu->worker); in msm_gpu_init()
992 gpu->worker = NULL; in msm_gpu_init()
996 sched_set_fifo_low(gpu->worker->task); in msm_gpu_init()
998 mutex_init(&gpu->active_lock); in msm_gpu_init()
999 mutex_init(&gpu->lock); in msm_gpu_init()
1000 init_waitqueue_head(&gpu->retire_event); in msm_gpu_init()
1001 kthread_init_work(&gpu->retire_work, retire_worker); in msm_gpu_init()
1002 kthread_init_work(&gpu->recover_work, recover_worker); in msm_gpu_init()
1004 priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD; in msm_gpu_init()
1011 if (funcs->progress) in msm_gpu_init()
1012 priv->hangcheck_period /= 2; in msm_gpu_init()
1014 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0); in msm_gpu_init()
1016 spin_lock_init(&gpu->perf_lock); in msm_gpu_init()
1020 gpu->mmio = msm_ioremap(pdev, config->ioname); in msm_gpu_init()
1021 if (IS_ERR(gpu->mmio)) { in msm_gpu_init()
1022 ret = PTR_ERR(gpu->mmio); in msm_gpu_init()
1027 gpu->irq = platform_get_irq(pdev, 0); in msm_gpu_init()
1028 if (gpu->irq < 0) { in msm_gpu_init()
1029 ret = gpu->irq; in msm_gpu_init()
1033 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, in msm_gpu_init()
1034 IRQF_TRIGGER_HIGH, "gpu-irq", gpu); in msm_gpu_init()
1036 DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); in msm_gpu_init()
1044 gpu->ebi1_clk = msm_clk_get(pdev, "bus"); in msm_gpu_init()
1045 DBG("ebi1_clk: %p", gpu->ebi1_clk); in msm_gpu_init()
1046 if (IS_ERR(gpu->ebi1_clk)) in msm_gpu_init()
1047 gpu->ebi1_clk = NULL; in msm_gpu_init()
1050 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd"); in msm_gpu_init()
1051 DBG("gpu_reg: %p", gpu->gpu_reg); in msm_gpu_init()
1052 if (IS_ERR(gpu->gpu_reg)) in msm_gpu_init()
1053 gpu->gpu_reg = NULL; in msm_gpu_init()
1055 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx"); in msm_gpu_init()
1056 DBG("gpu_cx: %p", gpu->gpu_cx); in msm_gpu_init()
1057 if (IS_ERR(gpu->gpu_cx)) in msm_gpu_init()
1058 gpu->gpu_cx = NULL; in msm_gpu_init()
1060 platform_set_drvdata(pdev, &gpu->adreno_smmu); in msm_gpu_init()
1064 gpu->vm = gpu->funcs->create_vm(gpu, pdev); in msm_gpu_init()
1065 if (IS_ERR(gpu->vm)) { in msm_gpu_init()
1066 ret = PTR_ERR(gpu->vm); in msm_gpu_init()
1072 check_apriv(gpu, MSM_BO_WC), gpu->vm, &gpu->memptrs_bo, in msm_gpu_init()
1077 DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret); in msm_gpu_init()
1081 msm_gem_object_set_name(gpu->memptrs_bo, "memptrs"); in msm_gpu_init()
1083 if (nr_rings > ARRAY_SIZE(gpu->rb)) { in msm_gpu_init()
1084 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n", in msm_gpu_init()
1085 ARRAY_SIZE(gpu->rb)); in msm_gpu_init()
1086 nr_rings = ARRAY_SIZE(gpu->rb); in msm_gpu_init()
1091 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova); in msm_gpu_init()
1093 if (IS_ERR(gpu->rb[i])) { in msm_gpu_init()
1094 ret = PTR_ERR(gpu->rb[i]); in msm_gpu_init()
1095 DRM_DEV_ERROR(drm->dev, in msm_gpu_init()
1104 gpu->nr_rings = nr_rings; in msm_gpu_init()
1106 refcount_set(&gpu->sysprof_active, 1); in msm_gpu_init()
1111 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { in msm_gpu_init()
1112 msm_ringbuffer_destroy(gpu->rb[i]); in msm_gpu_init()
1113 gpu->rb[i] = NULL; in msm_gpu_init()
1116 msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm); in msm_gpu_init()
1126 DBG("%s", gpu->name); in msm_gpu_cleanup()
1128 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { in msm_gpu_cleanup()
1129 msm_ringbuffer_destroy(gpu->rb[i]); in msm_gpu_cleanup()
1130 gpu->rb[i] = NULL; in msm_gpu_cleanup()
1133 msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm); in msm_gpu_cleanup()
1135 if (!IS_ERR_OR_NULL(gpu->vm)) { in msm_gpu_cleanup()
1136 struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu; in msm_gpu_cleanup()
1137 mmu->funcs->detach(mmu); in msm_gpu_cleanup()
1138 drm_gpuvm_put(gpu->vm); in msm_gpu_cleanup()
1141 if (gpu->worker) { in msm_gpu_cleanup()
1142 kthread_destroy_worker(gpu->worker); in msm_gpu_cleanup()
1147 platform_set_drvdata(gpu->pdev, NULL); in msm_gpu_cleanup()