xref: /linux/drivers/gpu/drm/v3d/v3d_sched.c (revision d40981350844c2cfa437abfc80596e10ea8f1149)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2018 Broadcom */
3 
4 /**
5  * DOC: Broadcom V3D scheduling
6  *
7  * The shared DRM GPU scheduler is used to coordinate submitting jobs
8  * to the hardware.  Each DRM fd (roughly a client process) gets its
9  * own scheduler entity, which will process jobs in order.  The GPU
10  * scheduler will round-robin between clients to submit the next job.
11  *
12  * For simplicity, and in order to keep latency low for interactive
13  * jobs when bulk background jobs are queued up, we submit a new job
14  * to the HW only when it has completed the last one, instead of
15  * filling up the CT[01]Q FIFOs with jobs.  Similarly, we use
16  * drm_sched_job_add_dependency() to manage the dependency between bin and
17  * render, instead of having the clients submit jobs using the HW's
18  * semaphores to interlock between them.
19  */
20 
21 #include <linux/sched/clock.h>
22 #include <linux/kthread.h>
23 
24 #include <drm/drm_syncobj.h>
25 
26 #include "v3d_drv.h"
27 #include "v3d_regs.h"
28 #include "v3d_trace.h"
29 
30 #define V3D_CSD_CFG012_WG_COUNT_SHIFT 16
31 
32 static struct v3d_job *
33 to_v3d_job(struct drm_sched_job *sched_job)
34 {
35 	return container_of(sched_job, struct v3d_job, base);
36 }
37 
38 static struct v3d_bin_job *
39 to_bin_job(struct drm_sched_job *sched_job)
40 {
41 	return container_of(sched_job, struct v3d_bin_job, base.base);
42 }
43 
44 static struct v3d_render_job *
45 to_render_job(struct drm_sched_job *sched_job)
46 {
47 	return container_of(sched_job, struct v3d_render_job, base.base);
48 }
49 
50 static struct v3d_tfu_job *
51 to_tfu_job(struct drm_sched_job *sched_job)
52 {
53 	return container_of(sched_job, struct v3d_tfu_job, base.base);
54 }
55 
56 static struct v3d_csd_job *
57 to_csd_job(struct drm_sched_job *sched_job)
58 {
59 	return container_of(sched_job, struct v3d_csd_job, base.base);
60 }
61 
62 static struct v3d_cpu_job *
63 to_cpu_job(struct drm_sched_job *sched_job)
64 {
65 	return container_of(sched_job, struct v3d_cpu_job, base.base);
66 }
67 
68 static void
69 v3d_sched_job_free(struct drm_sched_job *sched_job)
70 {
71 	struct v3d_job *job = to_v3d_job(sched_job);
72 
73 	v3d_job_cleanup(job);
74 }
75 
76 static void
77 v3d_cpu_job_free(struct drm_sched_job *sched_job)
78 {
79 	struct v3d_cpu_job *job = to_cpu_job(sched_job);
80 	struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
81 	struct v3d_performance_query_info *performance_query = &job->performance_query;
82 
83 	if (timestamp_query->queries) {
84 		for (int i = 0; i < timestamp_query->count; i++)
85 			drm_syncobj_put(timestamp_query->queries[i].syncobj);
86 		kvfree(timestamp_query->queries);
87 	}
88 
89 	if (performance_query->queries) {
90 		for (int i = 0; i < performance_query->count; i++)
91 			drm_syncobj_put(performance_query->queries[i].syncobj);
92 		kvfree(performance_query->queries);
93 	}
94 
95 	v3d_job_cleanup(&job->base);
96 }
97 
98 static void
99 v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
100 {
101 	if (job->perfmon != v3d->active_perfmon)
102 		v3d_perfmon_stop(v3d, v3d->active_perfmon, true);
103 
104 	if (job->perfmon && v3d->active_perfmon != job->perfmon)
105 		v3d_perfmon_start(v3d, job->perfmon);
106 }
107 
108 static void
109 v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
110 {
111 	struct v3d_dev *v3d = job->v3d;
112 	struct v3d_file_priv *file = job->file->driver_priv;
113 	struct v3d_stats *global_stats = &v3d->queue[queue].stats;
114 	struct v3d_stats *local_stats = &file->stats[queue];
115 	u64 now = local_clock();
116 
117 	write_seqcount_begin(&local_stats->lock);
118 	local_stats->start_ns = now;
119 	write_seqcount_end(&local_stats->lock);
120 
121 	write_seqcount_begin(&global_stats->lock);
122 	global_stats->start_ns = now;
123 	write_seqcount_end(&global_stats->lock);
124 }
125 
126 static void
127 v3d_stats_update(struct v3d_stats *stats, u64 now)
128 {
129 	write_seqcount_begin(&stats->lock);
130 	stats->enabled_ns += now - stats->start_ns;
131 	stats->jobs_completed++;
132 	stats->start_ns = 0;
133 	write_seqcount_end(&stats->lock);
134 }
135 
136 void
137 v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
138 {
139 	struct v3d_dev *v3d = job->v3d;
140 	struct v3d_file_priv *file = job->file->driver_priv;
141 	struct v3d_stats *global_stats = &v3d->queue[queue].stats;
142 	struct v3d_stats *local_stats = &file->stats[queue];
143 	u64 now = local_clock();
144 
145 	v3d_stats_update(local_stats, now);
146 	v3d_stats_update(global_stats, now);
147 }
148 
149 static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
150 {
151 	struct v3d_bin_job *job = to_bin_job(sched_job);
152 	struct v3d_dev *v3d = job->base.v3d;
153 	struct drm_device *dev = &v3d->drm;
154 	struct dma_fence *fence;
155 	unsigned long irqflags;
156 
157 	if (unlikely(job->base.base.s_fence->finished.error))
158 		return NULL;
159 
160 	/* Lock required around bin_job update vs
161 	 * v3d_overflow_mem_work().
162 	 */
163 	spin_lock_irqsave(&v3d->job_lock, irqflags);
164 	v3d->bin_job = job;
165 	/* Clear out the overflow allocation, so we don't
166 	 * reuse the overflow attached to a previous job.
167 	 */
168 	V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0);
169 	spin_unlock_irqrestore(&v3d->job_lock, irqflags);
170 
171 	v3d_invalidate_caches(v3d);
172 
173 	fence = v3d_fence_create(v3d, V3D_BIN);
174 	if (IS_ERR(fence))
175 		return NULL;
176 
177 	if (job->base.irq_fence)
178 		dma_fence_put(job->base.irq_fence);
179 	job->base.irq_fence = dma_fence_get(fence);
180 
181 	trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno,
182 			    job->start, job->end);
183 
184 	v3d_job_start_stats(&job->base, V3D_BIN);
185 	v3d_switch_perfmon(v3d, &job->base);
186 
187 	/* Set the current and end address of the control list.
188 	 * Writing the end register is what starts the job.
189 	 */
190 	if (job->qma) {
191 		V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, job->qma);
192 		V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, job->qms);
193 	}
194 	if (job->qts) {
195 		V3D_CORE_WRITE(0, V3D_CLE_CT0QTS,
196 			       V3D_CLE_CT0QTS_ENABLE |
197 			       job->qts);
198 	}
199 	V3D_CORE_WRITE(0, V3D_CLE_CT0QBA, job->start);
200 	V3D_CORE_WRITE(0, V3D_CLE_CT0QEA, job->end);
201 
202 	return fence;
203 }
204 
205 static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
206 {
207 	struct v3d_render_job *job = to_render_job(sched_job);
208 	struct v3d_dev *v3d = job->base.v3d;
209 	struct drm_device *dev = &v3d->drm;
210 	struct dma_fence *fence;
211 
212 	if (unlikely(job->base.base.s_fence->finished.error))
213 		return NULL;
214 
215 	v3d->render_job = job;
216 
217 	/* Can we avoid this flush?  We need to be careful of
218 	 * scheduling, though -- imagine job0 rendering to texture and
219 	 * job1 reading, and them being executed as bin0, bin1,
220 	 * render0, render1, so that render1's flush at bin time
221 	 * wasn't enough.
222 	 */
223 	v3d_invalidate_caches(v3d);
224 
225 	fence = v3d_fence_create(v3d, V3D_RENDER);
226 	if (IS_ERR(fence))
227 		return NULL;
228 
229 	if (job->base.irq_fence)
230 		dma_fence_put(job->base.irq_fence);
231 	job->base.irq_fence = dma_fence_get(fence);
232 
233 	trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno,
234 			    job->start, job->end);
235 
236 	v3d_job_start_stats(&job->base, V3D_RENDER);
237 	v3d_switch_perfmon(v3d, &job->base);
238 
239 	/* XXX: Set the QCFG */
240 
241 	/* Set the current and end address of the control list.
242 	 * Writing the end register is what starts the job.
243 	 */
244 	V3D_CORE_WRITE(0, V3D_CLE_CT1QBA, job->start);
245 	V3D_CORE_WRITE(0, V3D_CLE_CT1QEA, job->end);
246 
247 	return fence;
248 }
249 
250 static struct dma_fence *
251 v3d_tfu_job_run(struct drm_sched_job *sched_job)
252 {
253 	struct v3d_tfu_job *job = to_tfu_job(sched_job);
254 	struct v3d_dev *v3d = job->base.v3d;
255 	struct drm_device *dev = &v3d->drm;
256 	struct dma_fence *fence;
257 
258 	fence = v3d_fence_create(v3d, V3D_TFU);
259 	if (IS_ERR(fence))
260 		return NULL;
261 
262 	v3d->tfu_job = job;
263 	if (job->base.irq_fence)
264 		dma_fence_put(job->base.irq_fence);
265 	job->base.irq_fence = dma_fence_get(fence);
266 
267 	trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
268 
269 	v3d_job_start_stats(&job->base, V3D_TFU);
270 
271 	V3D_WRITE(V3D_TFU_IIA(v3d->ver), job->args.iia);
272 	V3D_WRITE(V3D_TFU_IIS(v3d->ver), job->args.iis);
273 	V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica);
274 	V3D_WRITE(V3D_TFU_IUA(v3d->ver), job->args.iua);
275 	V3D_WRITE(V3D_TFU_IOA(v3d->ver), job->args.ioa);
276 	if (v3d->ver >= 71)
277 		V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc);
278 	V3D_WRITE(V3D_TFU_IOS(v3d->ver), job->args.ios);
279 	V3D_WRITE(V3D_TFU_COEF0(v3d->ver), job->args.coef[0]);
280 	if (v3d->ver >= 71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
281 		V3D_WRITE(V3D_TFU_COEF1(v3d->ver), job->args.coef[1]);
282 		V3D_WRITE(V3D_TFU_COEF2(v3d->ver), job->args.coef[2]);
283 		V3D_WRITE(V3D_TFU_COEF3(v3d->ver), job->args.coef[3]);
284 	}
285 	/* ICFG kicks off the job. */
286 	V3D_WRITE(V3D_TFU_ICFG(v3d->ver), job->args.icfg | V3D_TFU_ICFG_IOC);
287 
288 	return fence;
289 }
290 
291 static struct dma_fence *
292 v3d_csd_job_run(struct drm_sched_job *sched_job)
293 {
294 	struct v3d_csd_job *job = to_csd_job(sched_job);
295 	struct v3d_dev *v3d = job->base.v3d;
296 	struct drm_device *dev = &v3d->drm;
297 	struct dma_fence *fence;
298 	int i, csd_cfg0_reg, csd_cfg_reg_count;
299 
300 	v3d->csd_job = job;
301 
302 	v3d_invalidate_caches(v3d);
303 
304 	fence = v3d_fence_create(v3d, V3D_CSD);
305 	if (IS_ERR(fence))
306 		return NULL;
307 
308 	if (job->base.irq_fence)
309 		dma_fence_put(job->base.irq_fence);
310 	job->base.irq_fence = dma_fence_get(fence);
311 
312 	trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno);
313 
314 	v3d_job_start_stats(&job->base, V3D_CSD);
315 	v3d_switch_perfmon(v3d, &job->base);
316 
317 	csd_cfg0_reg = V3D_CSD_QUEUED_CFG0(v3d->ver);
318 	csd_cfg_reg_count = v3d->ver < 71 ? 6 : 7;
319 	for (i = 1; i <= csd_cfg_reg_count; i++)
320 		V3D_CORE_WRITE(0, csd_cfg0_reg + 4 * i, job->args.cfg[i]);
321 	/* CFG0 write kicks off the job. */
322 	V3D_CORE_WRITE(0, csd_cfg0_reg, job->args.cfg[0]);
323 
324 	return fence;
325 }
326 
327 static void
328 v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
329 {
330 	struct v3d_indirect_csd_info *indirect_csd = &job->indirect_csd;
331 	struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
332 	struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect);
333 	struct drm_v3d_submit_csd *args = &indirect_csd->job->args;
334 	struct v3d_dev *v3d = job->base.v3d;
335 	u32 num_batches, *wg_counts;
336 
337 	v3d_get_bo_vaddr(bo);
338 	v3d_get_bo_vaddr(indirect);
339 
340 	wg_counts = (uint32_t *)(bo->vaddr + indirect_csd->offset);
341 
342 	if (wg_counts[0] == 0 || wg_counts[1] == 0 || wg_counts[2] == 0)
343 		return;
344 
345 	args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
346 	args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
347 	args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
348 
349 	num_batches = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
350 		      (wg_counts[0] * wg_counts[1] * wg_counts[2]);
351 
352 	/* V3D 7.1.6 and later don't subtract 1 from the number of batches */
353 	if (v3d->ver < 71 || (v3d->ver == 71 && v3d->rev < 6))
354 		args->cfg[4] = num_batches - 1;
355 	else
356 		args->cfg[4] = num_batches;
357 
358 	WARN_ON(args->cfg[4] == ~0);
359 
360 	for (int i = 0; i < 3; i++) {
361 		/* 0xffffffff indicates that the uniform rewrite is not needed */
362 		if (indirect_csd->wg_uniform_offsets[i] != 0xffffffff) {
363 			u32 uniform_idx = indirect_csd->wg_uniform_offsets[i];
364 			((uint32_t *)indirect->vaddr)[uniform_idx] = wg_counts[i];
365 		}
366 	}
367 
368 	v3d_put_bo_vaddr(indirect);
369 	v3d_put_bo_vaddr(bo);
370 }
371 
372 static void
373 v3d_timestamp_query(struct v3d_cpu_job *job)
374 {
375 	struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
376 	struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
377 	u8 *value_addr;
378 
379 	v3d_get_bo_vaddr(bo);
380 
381 	for (int i = 0; i < timestamp_query->count; i++) {
382 		value_addr = ((u8 *)bo->vaddr) + timestamp_query->queries[i].offset;
383 		*((u64 *)value_addr) = i == 0 ? ktime_get_ns() : 0ull;
384 
385 		drm_syncobj_replace_fence(timestamp_query->queries[i].syncobj,
386 					  job->base.done_fence);
387 	}
388 
389 	v3d_put_bo_vaddr(bo);
390 }
391 
392 static void
393 v3d_reset_timestamp_queries(struct v3d_cpu_job *job)
394 {
395 	struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
396 	struct v3d_timestamp_query *queries = timestamp_query->queries;
397 	struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
398 	u8 *value_addr;
399 
400 	v3d_get_bo_vaddr(bo);
401 
402 	for (int i = 0; i < timestamp_query->count; i++) {
403 		value_addr = ((u8 *)bo->vaddr) + queries[i].offset;
404 		*((u64 *)value_addr) = 0;
405 
406 		drm_syncobj_replace_fence(queries[i].syncobj, NULL);
407 	}
408 
409 	v3d_put_bo_vaddr(bo);
410 }
411 
412 static void
413 write_to_buffer(void *dst, u32 idx, bool do_64bit, u64 value)
414 {
415 	if (do_64bit) {
416 		u64 *dst64 = (u64 *)dst;
417 
418 		dst64[idx] = value;
419 	} else {
420 		u32 *dst32 = (u32 *)dst;
421 
422 		dst32[idx] = (u32)value;
423 	}
424 }
425 
426 static void
427 v3d_copy_query_results(struct v3d_cpu_job *job)
428 {
429 	struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
430 	struct v3d_timestamp_query *queries = timestamp_query->queries;
431 	struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
432 	struct v3d_bo *timestamp = to_v3d_bo(job->base.bo[1]);
433 	struct v3d_copy_query_results_info *copy = &job->copy;
434 	struct dma_fence *fence;
435 	u8 *query_addr;
436 	bool available, write_result;
437 	u8 *data;
438 	int i;
439 
440 	v3d_get_bo_vaddr(bo);
441 	v3d_get_bo_vaddr(timestamp);
442 
443 	data = ((u8 *)bo->vaddr) + copy->offset;
444 
445 	for (i = 0; i < timestamp_query->count; i++) {
446 		fence = drm_syncobj_fence_get(queries[i].syncobj);
447 		available = fence ? dma_fence_is_signaled(fence) : false;
448 
449 		write_result = available || copy->do_partial;
450 		if (write_result) {
451 			query_addr = ((u8 *)timestamp->vaddr) + queries[i].offset;
452 			write_to_buffer(data, 0, copy->do_64bit, *((u64 *)query_addr));
453 		}
454 
455 		if (copy->availability_bit)
456 			write_to_buffer(data, 1, copy->do_64bit, available ? 1u : 0u);
457 
458 		data += copy->stride;
459 
460 		dma_fence_put(fence);
461 	}
462 
463 	v3d_put_bo_vaddr(timestamp);
464 	v3d_put_bo_vaddr(bo);
465 }
466 
467 static void
468 v3d_reset_performance_queries(struct v3d_cpu_job *job)
469 {
470 	struct v3d_performance_query_info *performance_query = &job->performance_query;
471 	struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
472 	struct v3d_dev *v3d = job->base.v3d;
473 	struct v3d_perfmon *perfmon;
474 
475 	for (int i = 0; i < performance_query->count; i++) {
476 		for (int j = 0; j < performance_query->nperfmons; j++) {
477 			perfmon = v3d_perfmon_find(v3d_priv,
478 						   performance_query->queries[i].kperfmon_ids[j]);
479 			if (!perfmon) {
480 				DRM_DEBUG("Failed to find perfmon.");
481 				continue;
482 			}
483 
484 			v3d_perfmon_stop(v3d, perfmon, false);
485 
486 			memset(perfmon->values, 0, perfmon->ncounters * sizeof(u64));
487 
488 			v3d_perfmon_put(perfmon);
489 		}
490 
491 		drm_syncobj_replace_fence(performance_query->queries[i].syncobj, NULL);
492 	}
493 }
494 
495 static void
496 v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 query)
497 {
498 	struct v3d_performance_query_info *performance_query = &job->performance_query;
499 	struct v3d_copy_query_results_info *copy = &job->copy;
500 	struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
501 	struct v3d_dev *v3d = job->base.v3d;
502 	struct v3d_perfmon *perfmon;
503 	u64 counter_values[V3D_MAX_COUNTERS];
504 
505 	for (int i = 0; i < performance_query->nperfmons; i++) {
506 		perfmon = v3d_perfmon_find(v3d_priv,
507 					   performance_query->queries[query].kperfmon_ids[i]);
508 		if (!perfmon) {
509 			DRM_DEBUG("Failed to find perfmon.");
510 			continue;
511 		}
512 
513 		v3d_perfmon_stop(v3d, perfmon, true);
514 
515 		memcpy(&counter_values[i * DRM_V3D_MAX_PERF_COUNTERS], perfmon->values,
516 		       perfmon->ncounters * sizeof(u64));
517 
518 		v3d_perfmon_put(perfmon);
519 	}
520 
521 	for (int i = 0; i < performance_query->ncounters; i++)
522 		write_to_buffer(data, i, copy->do_64bit, counter_values[i]);
523 }
524 
525 static void
526 v3d_copy_performance_query(struct v3d_cpu_job *job)
527 {
528 	struct v3d_performance_query_info *performance_query = &job->performance_query;
529 	struct v3d_copy_query_results_info *copy = &job->copy;
530 	struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
531 	struct dma_fence *fence;
532 	bool available, write_result;
533 	u8 *data;
534 
535 	v3d_get_bo_vaddr(bo);
536 
537 	data = ((u8 *)bo->vaddr) + copy->offset;
538 
539 	for (int i = 0; i < performance_query->count; i++) {
540 		fence = drm_syncobj_fence_get(performance_query->queries[i].syncobj);
541 		available = fence ? dma_fence_is_signaled(fence) : false;
542 
543 		write_result = available || copy->do_partial;
544 		if (write_result)
545 			v3d_write_performance_query_result(job, data, i);
546 
547 		if (copy->availability_bit)
548 			write_to_buffer(data, performance_query->ncounters,
549 					copy->do_64bit, available ? 1u : 0u);
550 
551 		data += copy->stride;
552 
553 		dma_fence_put(fence);
554 	}
555 
556 	v3d_put_bo_vaddr(bo);
557 }
558 
559 static const v3d_cpu_job_fn cpu_job_function[] = {
560 	[V3D_CPU_JOB_TYPE_INDIRECT_CSD] = v3d_rewrite_csd_job_wg_counts_from_indirect,
561 	[V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = v3d_timestamp_query,
562 	[V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = v3d_reset_timestamp_queries,
563 	[V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = v3d_copy_query_results,
564 	[V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = v3d_reset_performance_queries,
565 	[V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = v3d_copy_performance_query,
566 };
567 
568 static struct dma_fence *
569 v3d_cpu_job_run(struct drm_sched_job *sched_job)
570 {
571 	struct v3d_cpu_job *job = to_cpu_job(sched_job);
572 	struct v3d_dev *v3d = job->base.v3d;
573 
574 	v3d->cpu_job = job;
575 
576 	if (job->job_type >= ARRAY_SIZE(cpu_job_function)) {
577 		DRM_DEBUG_DRIVER("Unknown CPU job: %d\n", job->job_type);
578 		return NULL;
579 	}
580 
581 	v3d_job_start_stats(&job->base, V3D_CPU);
582 	trace_v3d_cpu_job_begin(&v3d->drm, job->job_type);
583 
584 	cpu_job_function[job->job_type](job);
585 
586 	trace_v3d_cpu_job_end(&v3d->drm, job->job_type);
587 	v3d_job_update_stats(&job->base, V3D_CPU);
588 
589 	return NULL;
590 }
591 
592 static struct dma_fence *
593 v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
594 {
595 	struct v3d_job *job = to_v3d_job(sched_job);
596 	struct v3d_dev *v3d = job->v3d;
597 
598 	v3d_job_start_stats(job, V3D_CACHE_CLEAN);
599 
600 	v3d_clean_caches(v3d);
601 
602 	v3d_job_update_stats(job, V3D_CACHE_CLEAN);
603 
604 	return NULL;
605 }
606 
607 static enum drm_gpu_sched_stat
608 v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
609 {
610 	enum v3d_queue q;
611 
612 	mutex_lock(&v3d->reset_lock);
613 
614 	/* block scheduler */
615 	for (q = 0; q < V3D_MAX_QUEUES; q++)
616 		drm_sched_stop(&v3d->queue[q].sched, sched_job);
617 
618 	if (sched_job)
619 		drm_sched_increase_karma(sched_job);
620 
621 	/* get the GPU back into the init state */
622 	v3d_reset(v3d);
623 
624 	for (q = 0; q < V3D_MAX_QUEUES; q++)
625 		drm_sched_resubmit_jobs(&v3d->queue[q].sched);
626 
627 	/* Unblock schedulers and restart their jobs. */
628 	for (q = 0; q < V3D_MAX_QUEUES; q++) {
629 		drm_sched_start(&v3d->queue[q].sched, true);
630 	}
631 
632 	mutex_unlock(&v3d->reset_lock);
633 
634 	return DRM_GPU_SCHED_STAT_NOMINAL;
635 }
636 
637 /* If the current address or return address have changed, then the GPU
638  * has probably made progress and we should delay the reset.  This
639  * could fail if the GPU got in an infinite loop in the CL, but that
640  * is pretty unlikely outside of an i-g-t testcase.
641  */
642 static enum drm_gpu_sched_stat
643 v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
644 		    u32 *timedout_ctca, u32 *timedout_ctra)
645 {
646 	struct v3d_job *job = to_v3d_job(sched_job);
647 	struct v3d_dev *v3d = job->v3d;
648 	u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q));
649 	u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q));
650 
651 	if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
652 		*timedout_ctca = ctca;
653 		*timedout_ctra = ctra;
654 		return DRM_GPU_SCHED_STAT_NOMINAL;
655 	}
656 
657 	return v3d_gpu_reset_for_timeout(v3d, sched_job);
658 }
659 
660 static enum drm_gpu_sched_stat
661 v3d_bin_job_timedout(struct drm_sched_job *sched_job)
662 {
663 	struct v3d_bin_job *job = to_bin_job(sched_job);
664 
665 	return v3d_cl_job_timedout(sched_job, V3D_BIN,
666 				   &job->timedout_ctca, &job->timedout_ctra);
667 }
668 
669 static enum drm_gpu_sched_stat
670 v3d_render_job_timedout(struct drm_sched_job *sched_job)
671 {
672 	struct v3d_render_job *job = to_render_job(sched_job);
673 
674 	return v3d_cl_job_timedout(sched_job, V3D_RENDER,
675 				   &job->timedout_ctca, &job->timedout_ctra);
676 }
677 
678 static enum drm_gpu_sched_stat
679 v3d_generic_job_timedout(struct drm_sched_job *sched_job)
680 {
681 	struct v3d_job *job = to_v3d_job(sched_job);
682 
683 	return v3d_gpu_reset_for_timeout(job->v3d, sched_job);
684 }
685 
686 static enum drm_gpu_sched_stat
687 v3d_csd_job_timedout(struct drm_sched_job *sched_job)
688 {
689 	struct v3d_csd_job *job = to_csd_job(sched_job);
690 	struct v3d_dev *v3d = job->base.v3d;
691 	u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver));
692 
693 	/* If we've made progress, skip reset and let the timer get
694 	 * rearmed.
695 	 */
696 	if (job->timedout_batches != batches) {
697 		job->timedout_batches = batches;
698 		return DRM_GPU_SCHED_STAT_NOMINAL;
699 	}
700 
701 	return v3d_gpu_reset_for_timeout(v3d, sched_job);
702 }
703 
704 static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
705 	.run_job = v3d_bin_job_run,
706 	.timedout_job = v3d_bin_job_timedout,
707 	.free_job = v3d_sched_job_free,
708 };
709 
710 static const struct drm_sched_backend_ops v3d_render_sched_ops = {
711 	.run_job = v3d_render_job_run,
712 	.timedout_job = v3d_render_job_timedout,
713 	.free_job = v3d_sched_job_free,
714 };
715 
716 static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
717 	.run_job = v3d_tfu_job_run,
718 	.timedout_job = v3d_generic_job_timedout,
719 	.free_job = v3d_sched_job_free,
720 };
721 
722 static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
723 	.run_job = v3d_csd_job_run,
724 	.timedout_job = v3d_csd_job_timedout,
725 	.free_job = v3d_sched_job_free
726 };
727 
728 static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
729 	.run_job = v3d_cache_clean_job_run,
730 	.timedout_job = v3d_generic_job_timedout,
731 	.free_job = v3d_sched_job_free
732 };
733 
734 static const struct drm_sched_backend_ops v3d_cpu_sched_ops = {
735 	.run_job = v3d_cpu_job_run,
736 	.timedout_job = v3d_generic_job_timedout,
737 	.free_job = v3d_cpu_job_free
738 };
739 
740 int
741 v3d_sched_init(struct v3d_dev *v3d)
742 {
743 	int hw_jobs_limit = 1;
744 	int job_hang_limit = 0;
745 	int hang_limit_ms = 500;
746 	int ret;
747 
748 	ret = drm_sched_init(&v3d->queue[V3D_BIN].sched,
749 			     &v3d_bin_sched_ops, NULL,
750 			     DRM_SCHED_PRIORITY_COUNT,
751 			     hw_jobs_limit, job_hang_limit,
752 			     msecs_to_jiffies(hang_limit_ms), NULL,
753 			     NULL, "v3d_bin", v3d->drm.dev);
754 	if (ret)
755 		return ret;
756 
757 	ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched,
758 			     &v3d_render_sched_ops, NULL,
759 			     DRM_SCHED_PRIORITY_COUNT,
760 			     hw_jobs_limit, job_hang_limit,
761 			     msecs_to_jiffies(hang_limit_ms), NULL,
762 			     NULL, "v3d_render", v3d->drm.dev);
763 	if (ret)
764 		goto fail;
765 
766 	ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
767 			     &v3d_tfu_sched_ops, NULL,
768 			     DRM_SCHED_PRIORITY_COUNT,
769 			     hw_jobs_limit, job_hang_limit,
770 			     msecs_to_jiffies(hang_limit_ms), NULL,
771 			     NULL, "v3d_tfu", v3d->drm.dev);
772 	if (ret)
773 		goto fail;
774 
775 	if (v3d_has_csd(v3d)) {
776 		ret = drm_sched_init(&v3d->queue[V3D_CSD].sched,
777 				     &v3d_csd_sched_ops, NULL,
778 				     DRM_SCHED_PRIORITY_COUNT,
779 				     hw_jobs_limit, job_hang_limit,
780 				     msecs_to_jiffies(hang_limit_ms), NULL,
781 				     NULL, "v3d_csd", v3d->drm.dev);
782 		if (ret)
783 			goto fail;
784 
785 		ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched,
786 				     &v3d_cache_clean_sched_ops, NULL,
787 				     DRM_SCHED_PRIORITY_COUNT,
788 				     hw_jobs_limit, job_hang_limit,
789 				     msecs_to_jiffies(hang_limit_ms), NULL,
790 				     NULL, "v3d_cache_clean", v3d->drm.dev);
791 		if (ret)
792 			goto fail;
793 	}
794 
795 	ret = drm_sched_init(&v3d->queue[V3D_CPU].sched,
796 			     &v3d_cpu_sched_ops, NULL,
797 			     DRM_SCHED_PRIORITY_COUNT,
798 			     1, job_hang_limit,
799 			     msecs_to_jiffies(hang_limit_ms), NULL,
800 			     NULL, "v3d_cpu", v3d->drm.dev);
801 	if (ret)
802 		goto fail;
803 
804 	return 0;
805 
806 fail:
807 	v3d_sched_fini(v3d);
808 	return ret;
809 }
810 
811 void
812 v3d_sched_fini(struct v3d_dev *v3d)
813 {
814 	enum v3d_queue q;
815 
816 	for (q = 0; q < V3D_MAX_QUEUES; q++) {
817 		if (v3d->queue[q].sched.ready)
818 			drm_sched_fini(&v3d->queue[q].sched);
819 	}
820 }
821