1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2017 Etnaviv Project 4 */ 5 6 #include <linux/moduleparam.h> 7 8 #include "etnaviv_drv.h" 9 #include "etnaviv_dump.h" 10 #include "etnaviv_gem.h" 11 #include "etnaviv_gpu.h" 12 #include "etnaviv_sched.h" 13 #include "state.xml.h" 14 #include "state_hi.xml.h" 15 16 static int etnaviv_job_hang_limit = 0; 17 module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444); 18 static int etnaviv_hw_jobs_limit = 4; 19 module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444); 20 21 static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job) 22 { 23 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); 24 struct dma_fence *fence = NULL; 25 26 if (likely(!sched_job->s_fence->finished.error)) 27 fence = etnaviv_gpu_submit(submit); 28 else 29 dev_dbg(submit->gpu->dev, "skipping bad job\n"); 30 31 return fence; 32 } 33 34 static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job 35 *sched_job) 36 { 37 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); 38 struct drm_gpu_scheduler *sched = sched_job->sched; 39 struct etnaviv_gpu *gpu = submit->gpu; 40 u32 dma_addr, primid = 0; 41 int change; 42 43 /* 44 * If the GPU managed to complete this jobs fence, the timout is 45 * spurious. Bail out. 46 */ 47 if (dma_fence_is_signaled(submit->out_fence)) 48 goto out_no_timeout; 49 50 /* 51 * If the GPU is still making forward progress on the front-end (which 52 * should never loop) we shift out the timeout to give it a chance to 53 * finish the job. 54 */ 55 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); 56 change = dma_addr - gpu->hangcheck_dma_addr; 57 if (submit->exec_state == ETNA_PIPE_3D) { 58 /* guard against concurrent usage from perfmon_sample */ 59 mutex_lock(&gpu->lock); 60 gpu_write(gpu, VIVS_MC_PROFILE_CONFIG0, 61 VIVS_MC_PROFILE_CONFIG0_FE_CURRENT_PRIM << 62 VIVS_MC_PROFILE_CONFIG0_FE__SHIFT); 63 primid = gpu_read(gpu, VIVS_MC_PROFILE_FE_READ); 64 mutex_unlock(&gpu->lock); 65 } 66 if (gpu->state == ETNA_GPU_STATE_RUNNING && 67 (gpu->completed_fence != gpu->hangcheck_fence || 68 change < 0 || change > 16 || 69 (submit->exec_state == ETNA_PIPE_3D && 70 gpu->hangcheck_primid != primid))) { 71 gpu->hangcheck_dma_addr = dma_addr; 72 gpu->hangcheck_primid = primid; 73 gpu->hangcheck_fence = gpu->completed_fence; 74 goto out_no_timeout; 75 } 76 77 /* block scheduler */ 78 drm_sched_stop(&gpu->sched, sched_job); 79 80 if(sched_job) 81 drm_sched_increase_karma(sched_job); 82 83 /* get the GPU back into the init state */ 84 etnaviv_core_dump(submit); 85 etnaviv_gpu_recover_hang(submit); 86 87 drm_sched_resubmit_jobs(&gpu->sched); 88 89 drm_sched_start(&gpu->sched, 0); 90 return DRM_GPU_SCHED_STAT_NOMINAL; 91 92 out_no_timeout: 93 spin_lock(&sched->job_list_lock); 94 list_add(&sched_job->list, &sched->pending_list); 95 spin_unlock(&sched->job_list_lock); 96 return DRM_GPU_SCHED_STAT_NOMINAL; 97 } 98 99 static void etnaviv_sched_free_job(struct drm_sched_job *sched_job) 100 { 101 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); 102 103 drm_sched_job_cleanup(sched_job); 104 105 etnaviv_submit_put(submit); 106 } 107 108 static const struct drm_sched_backend_ops etnaviv_sched_ops = { 109 .run_job = etnaviv_sched_run_job, 110 .timedout_job = etnaviv_sched_timedout_job, 111 .free_job = etnaviv_sched_free_job, 112 }; 113 114 int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit) 115 { 116 struct etnaviv_gpu *gpu = submit->gpu; 117 int ret; 118 119 /* 120 * Hold the sched lock across the whole operation to avoid jobs being 121 * pushed out of order with regard to their sched fence seqnos as 122 * allocated in drm_sched_job_arm. 123 */ 124 mutex_lock(&gpu->sched_lock); 125 126 drm_sched_job_arm(&submit->sched_job); 127 128 submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished); 129 ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id, 130 submit->out_fence, xa_limit_32b, 131 &gpu->next_user_fence, GFP_KERNEL); 132 if (ret < 0) { 133 drm_sched_job_cleanup(&submit->sched_job); 134 goto out_unlock; 135 } 136 137 /* the scheduler holds on to the job now */ 138 kref_get(&submit->refcount); 139 140 drm_sched_entity_push_job(&submit->sched_job); 141 142 out_unlock: 143 mutex_unlock(&gpu->sched_lock); 144 145 return ret; 146 } 147 148 int etnaviv_sched_init(struct etnaviv_gpu *gpu) 149 { 150 const struct drm_sched_init_args args = { 151 .ops = &etnaviv_sched_ops, 152 .num_rqs = DRM_SCHED_PRIORITY_COUNT, 153 .credit_limit = etnaviv_hw_jobs_limit, 154 .hang_limit = etnaviv_job_hang_limit, 155 .timeout = msecs_to_jiffies(500), 156 .name = dev_name(gpu->dev), 157 .dev = gpu->dev, 158 }; 159 160 return drm_sched_init(&gpu->sched, &args); 161 } 162 163 void etnaviv_sched_fini(struct etnaviv_gpu *gpu) 164 { 165 drm_sched_fini(&gpu->sched); 166 } 167