xref: /linux/drivers/gpu/drm/etnaviv/etnaviv_sched.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 Etnaviv Project
4  */
5 
6 #include <linux/moduleparam.h>
7 
8 #include "etnaviv_drv.h"
9 #include "etnaviv_dump.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_gpu.h"
12 #include "etnaviv_sched.h"
13 #include "state.xml.h"
14 #include "state_hi.xml.h"
15 
16 static int etnaviv_job_hang_limit = 0;
17 module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
18 static int etnaviv_hw_jobs_limit = 4;
19 module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
20 
etnaviv_sched_run_job(struct drm_sched_job * sched_job)21 static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
22 {
23 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
24 	struct dma_fence *fence = NULL;
25 
26 	if (likely(!sched_job->s_fence->finished.error))
27 		fence = etnaviv_gpu_submit(submit);
28 	else
29 		dev_dbg(submit->gpu->dev, "skipping bad job\n");
30 
31 	return fence;
32 }
33 
etnaviv_sched_timedout_job(struct drm_sched_job * sched_job)34 static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
35 							  *sched_job)
36 {
37 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
38 	struct etnaviv_gpu *gpu = submit->gpu;
39 	u32 dma_addr, primid = 0;
40 	int change;
41 
42 	/*
43 	 * If the GPU managed to complete this jobs fence, the timout is
44 	 * spurious. Bail out.
45 	 */
46 	if (dma_fence_is_signaled(submit->out_fence))
47 		goto out_no_timeout;
48 
49 	/*
50 	 * If the GPU is still making forward progress on the front-end (which
51 	 * should never loop) we shift out the timeout to give it a chance to
52 	 * finish the job.
53 	 */
54 	dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
55 	change = dma_addr - gpu->hangcheck_dma_addr;
56 	if (submit->exec_state == ETNA_PIPE_3D) {
57 		/* guard against concurrent usage from perfmon_sample */
58 		mutex_lock(&gpu->lock);
59 		gpu_write(gpu, VIVS_MC_PROFILE_CONFIG0,
60 			  VIVS_MC_PROFILE_CONFIG0_FE_CURRENT_PRIM <<
61 			  VIVS_MC_PROFILE_CONFIG0_FE__SHIFT);
62 		primid = gpu_read(gpu, VIVS_MC_PROFILE_FE_READ);
63 		mutex_unlock(&gpu->lock);
64 	}
65 	if (gpu->state == ETNA_GPU_STATE_RUNNING &&
66 	    (gpu->completed_fence != gpu->hangcheck_fence ||
67 	     change < 0 || change > 16 ||
68 	     (submit->exec_state == ETNA_PIPE_3D &&
69 	      gpu->hangcheck_primid != primid))) {
70 		gpu->hangcheck_dma_addr = dma_addr;
71 		gpu->hangcheck_primid = primid;
72 		gpu->hangcheck_fence = gpu->completed_fence;
73 		goto out_no_timeout;
74 	}
75 
76 	/* block scheduler */
77 	drm_sched_stop(&gpu->sched, sched_job);
78 
79 	if(sched_job)
80 		drm_sched_increase_karma(sched_job);
81 
82 	/* get the GPU back into the init state */
83 	etnaviv_core_dump(submit);
84 	etnaviv_gpu_recover_hang(submit);
85 
86 	drm_sched_resubmit_jobs(&gpu->sched);
87 
88 	drm_sched_start(&gpu->sched, 0);
89 	return DRM_GPU_SCHED_STAT_NOMINAL;
90 
91 out_no_timeout:
92 	list_add(&sched_job->list, &sched_job->sched->pending_list);
93 	return DRM_GPU_SCHED_STAT_NOMINAL;
94 }
95 
etnaviv_sched_free_job(struct drm_sched_job * sched_job)96 static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
97 {
98 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
99 
100 	drm_sched_job_cleanup(sched_job);
101 
102 	etnaviv_submit_put(submit);
103 }
104 
105 static const struct drm_sched_backend_ops etnaviv_sched_ops = {
106 	.run_job = etnaviv_sched_run_job,
107 	.timedout_job = etnaviv_sched_timedout_job,
108 	.free_job = etnaviv_sched_free_job,
109 };
110 
etnaviv_sched_push_job(struct etnaviv_gem_submit * submit)111 int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
112 {
113 	struct etnaviv_gpu *gpu = submit->gpu;
114 	int ret;
115 
116 	/*
117 	 * Hold the sched lock across the whole operation to avoid jobs being
118 	 * pushed out of order with regard to their sched fence seqnos as
119 	 * allocated in drm_sched_job_arm.
120 	 */
121 	mutex_lock(&gpu->sched_lock);
122 
123 	drm_sched_job_arm(&submit->sched_job);
124 
125 	submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
126 	ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id,
127 			      submit->out_fence, xa_limit_32b,
128 			      &gpu->next_user_fence, GFP_KERNEL);
129 	if (ret < 0) {
130 		drm_sched_job_cleanup(&submit->sched_job);
131 		goto out_unlock;
132 	}
133 
134 	/* the scheduler holds on to the job now */
135 	kref_get(&submit->refcount);
136 
137 	drm_sched_entity_push_job(&submit->sched_job);
138 
139 out_unlock:
140 	mutex_unlock(&gpu->sched_lock);
141 
142 	return ret;
143 }
144 
etnaviv_sched_init(struct etnaviv_gpu * gpu)145 int etnaviv_sched_init(struct etnaviv_gpu *gpu)
146 {
147 	int ret;
148 
149 	ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, NULL,
150 			     DRM_SCHED_PRIORITY_COUNT,
151 			     etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
152 			     msecs_to_jiffies(500), NULL, NULL,
153 			     dev_name(gpu->dev), gpu->dev);
154 	if (ret)
155 		return ret;
156 
157 	return 0;
158 }
159 
etnaviv_sched_fini(struct etnaviv_gpu * gpu)160 void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
161 {
162 	drm_sched_fini(&gpu->sched);
163 }
164