xref: /linux/drivers/gpu/drm/nouveau/nouveau_sched.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1b88baab8SDanilo Krummrich // SPDX-License-Identifier: MIT
2b88baab8SDanilo Krummrich 
3b88baab8SDanilo Krummrich #include <linux/slab.h>
4b88baab8SDanilo Krummrich #include <drm/gpu_scheduler.h>
5b88baab8SDanilo Krummrich #include <drm/drm_syncobj.h>
6b88baab8SDanilo Krummrich 
7b88baab8SDanilo Krummrich #include "nouveau_drv.h"
8b88baab8SDanilo Krummrich #include "nouveau_gem.h"
9b88baab8SDanilo Krummrich #include "nouveau_mem.h"
10b88baab8SDanilo Krummrich #include "nouveau_dma.h"
11b88baab8SDanilo Krummrich #include "nouveau_exec.h"
12b88baab8SDanilo Krummrich #include "nouveau_abi16.h"
13b88baab8SDanilo Krummrich #include "nouveau_sched.h"
14b88baab8SDanilo Krummrich 
15b88baab8SDanilo Krummrich #define NOUVEAU_SCHED_JOB_TIMEOUT_MS		10000
16b88baab8SDanilo Krummrich 
175f03a507SDanilo Krummrich /* Starts at 0, since the DRM scheduler interprets those parameters as (initial)
185f03a507SDanilo Krummrich  * index to the run-queue array.
195f03a507SDanilo Krummrich  */
205f03a507SDanilo Krummrich enum nouveau_sched_priority {
2119b4c60cSLuben Tuikov 	NOUVEAU_SCHED_PRIORITY_SINGLE = DRM_SCHED_PRIORITY_KERNEL,
225f03a507SDanilo Krummrich 	NOUVEAU_SCHED_PRIORITY_COUNT,
235f03a507SDanilo Krummrich };
245f03a507SDanilo Krummrich 
25b88baab8SDanilo Krummrich int
nouveau_job_init(struct nouveau_job * job,struct nouveau_job_args * args)26b88baab8SDanilo Krummrich nouveau_job_init(struct nouveau_job *job,
27b88baab8SDanilo Krummrich 		 struct nouveau_job_args *args)
28b88baab8SDanilo Krummrich {
295f03a507SDanilo Krummrich 	struct nouveau_sched *sched = args->sched;
30b88baab8SDanilo Krummrich 	int ret;
31b88baab8SDanilo Krummrich 
325f03a507SDanilo Krummrich 	INIT_LIST_HEAD(&job->entry);
335f03a507SDanilo Krummrich 
34b88baab8SDanilo Krummrich 	job->file_priv = args->file_priv;
35b88baab8SDanilo Krummrich 	job->cli = nouveau_cli(args->file_priv);
365f03a507SDanilo Krummrich 	job->sched = sched;
37b88baab8SDanilo Krummrich 
38b88baab8SDanilo Krummrich 	job->sync = args->sync;
39b88baab8SDanilo Krummrich 	job->resv_usage = args->resv_usage;
40b88baab8SDanilo Krummrich 
41b88baab8SDanilo Krummrich 	job->ops = args->ops;
42b88baab8SDanilo Krummrich 
43b88baab8SDanilo Krummrich 	job->in_sync.count = args->in_sync.count;
44b88baab8SDanilo Krummrich 	if (job->in_sync.count) {
45b88baab8SDanilo Krummrich 		if (job->sync)
46b88baab8SDanilo Krummrich 			return -EINVAL;
47b88baab8SDanilo Krummrich 
48b88baab8SDanilo Krummrich 		job->in_sync.data = kmemdup(args->in_sync.s,
49b88baab8SDanilo Krummrich 					 sizeof(*args->in_sync.s) *
50b88baab8SDanilo Krummrich 					 args->in_sync.count,
51b88baab8SDanilo Krummrich 					 GFP_KERNEL);
52b88baab8SDanilo Krummrich 		if (!job->in_sync.data)
53b88baab8SDanilo Krummrich 			return -ENOMEM;
54b88baab8SDanilo Krummrich 	}
55b88baab8SDanilo Krummrich 
56b88baab8SDanilo Krummrich 	job->out_sync.count = args->out_sync.count;
57b88baab8SDanilo Krummrich 	if (job->out_sync.count) {
58b88baab8SDanilo Krummrich 		if (job->sync) {
59b88baab8SDanilo Krummrich 			ret = -EINVAL;
60b88baab8SDanilo Krummrich 			goto err_free_in_sync;
61b88baab8SDanilo Krummrich 		}
62b88baab8SDanilo Krummrich 
63b88baab8SDanilo Krummrich 		job->out_sync.data = kmemdup(args->out_sync.s,
64b88baab8SDanilo Krummrich 					  sizeof(*args->out_sync.s) *
65b88baab8SDanilo Krummrich 					  args->out_sync.count,
66b88baab8SDanilo Krummrich 					  GFP_KERNEL);
67b88baab8SDanilo Krummrich 		if (!job->out_sync.data) {
68b88baab8SDanilo Krummrich 			ret = -ENOMEM;
69b88baab8SDanilo Krummrich 			goto err_free_in_sync;
70b88baab8SDanilo Krummrich 		}
71b88baab8SDanilo Krummrich 
72b88baab8SDanilo Krummrich 		job->out_sync.objs = kcalloc(job->out_sync.count,
73b88baab8SDanilo Krummrich 					     sizeof(*job->out_sync.objs),
74b88baab8SDanilo Krummrich 					     GFP_KERNEL);
75b88baab8SDanilo Krummrich 		if (!job->out_sync.objs) {
76b88baab8SDanilo Krummrich 			ret = -ENOMEM;
77b88baab8SDanilo Krummrich 			goto err_free_out_sync;
78b88baab8SDanilo Krummrich 		}
79b88baab8SDanilo Krummrich 
80b88baab8SDanilo Krummrich 		job->out_sync.chains = kcalloc(job->out_sync.count,
81b88baab8SDanilo Krummrich 					       sizeof(*job->out_sync.chains),
82b88baab8SDanilo Krummrich 					       GFP_KERNEL);
83b88baab8SDanilo Krummrich 		if (!job->out_sync.chains) {
84b88baab8SDanilo Krummrich 			ret = -ENOMEM;
85b88baab8SDanilo Krummrich 			goto err_free_objs;
86b88baab8SDanilo Krummrich 		}
87b88baab8SDanilo Krummrich 	}
88b88baab8SDanilo Krummrich 
8946990918SDanilo Krummrich 	ret = drm_sched_job_init(&job->base, &sched->entity,
9046990918SDanilo Krummrich 				 args->credits, NULL);
91b88baab8SDanilo Krummrich 	if (ret)
92b88baab8SDanilo Krummrich 		goto err_free_chains;
93b88baab8SDanilo Krummrich 
94b88baab8SDanilo Krummrich 	job->state = NOUVEAU_JOB_INITIALIZED;
95b88baab8SDanilo Krummrich 
96b88baab8SDanilo Krummrich 	return 0;
97b88baab8SDanilo Krummrich 
98b88baab8SDanilo Krummrich err_free_chains:
99b88baab8SDanilo Krummrich 	kfree(job->out_sync.chains);
100b88baab8SDanilo Krummrich err_free_objs:
101b88baab8SDanilo Krummrich 	kfree(job->out_sync.objs);
102b88baab8SDanilo Krummrich err_free_out_sync:
103b88baab8SDanilo Krummrich 	kfree(job->out_sync.data);
104b88baab8SDanilo Krummrich err_free_in_sync:
105b88baab8SDanilo Krummrich 	kfree(job->in_sync.data);
106b88baab8SDanilo Krummrich return ret;
107b88baab8SDanilo Krummrich }
108b88baab8SDanilo Krummrich 
109b88baab8SDanilo Krummrich void
nouveau_job_fini(struct nouveau_job * job)1105f03a507SDanilo Krummrich nouveau_job_fini(struct nouveau_job *job)
1115f03a507SDanilo Krummrich {
1125f03a507SDanilo Krummrich 	dma_fence_put(job->done_fence);
1135f03a507SDanilo Krummrich 	drm_sched_job_cleanup(&job->base);
1145f03a507SDanilo Krummrich 
1155f03a507SDanilo Krummrich 	job->ops->free(job);
1165f03a507SDanilo Krummrich }
1175f03a507SDanilo Krummrich 
1185f03a507SDanilo Krummrich void
nouveau_job_done(struct nouveau_job * job)1195f03a507SDanilo Krummrich nouveau_job_done(struct nouveau_job *job)
1205f03a507SDanilo Krummrich {
1215f03a507SDanilo Krummrich 	struct nouveau_sched *sched = job->sched;
1225f03a507SDanilo Krummrich 
1235f03a507SDanilo Krummrich 	spin_lock(&sched->job.list.lock);
1245f03a507SDanilo Krummrich 	list_del(&job->entry);
1255f03a507SDanilo Krummrich 	spin_unlock(&sched->job.list.lock);
1265f03a507SDanilo Krummrich 
1275f03a507SDanilo Krummrich 	wake_up(&sched->job.wq);
1285f03a507SDanilo Krummrich }
1295f03a507SDanilo Krummrich 
1305f03a507SDanilo Krummrich void
nouveau_job_free(struct nouveau_job * job)131b88baab8SDanilo Krummrich nouveau_job_free(struct nouveau_job *job)
132b88baab8SDanilo Krummrich {
133b88baab8SDanilo Krummrich 	kfree(job->in_sync.data);
134b88baab8SDanilo Krummrich 	kfree(job->out_sync.data);
135b88baab8SDanilo Krummrich 	kfree(job->out_sync.objs);
136b88baab8SDanilo Krummrich 	kfree(job->out_sync.chains);
137b88baab8SDanilo Krummrich }
138b88baab8SDanilo Krummrich 
139b88baab8SDanilo Krummrich static int
sync_find_fence(struct nouveau_job * job,struct drm_nouveau_sync * sync,struct dma_fence ** fence)140b88baab8SDanilo Krummrich sync_find_fence(struct nouveau_job *job,
141b88baab8SDanilo Krummrich 		struct drm_nouveau_sync *sync,
142b88baab8SDanilo Krummrich 		struct dma_fence **fence)
143b88baab8SDanilo Krummrich {
144b88baab8SDanilo Krummrich 	u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
145b88baab8SDanilo Krummrich 	u64 point = 0;
146b88baab8SDanilo Krummrich 	int ret;
147b88baab8SDanilo Krummrich 
148b88baab8SDanilo Krummrich 	if (stype != DRM_NOUVEAU_SYNC_SYNCOBJ &&
149b88baab8SDanilo Krummrich 	    stype != DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ)
150b88baab8SDanilo Krummrich 		return -EOPNOTSUPP;
151b88baab8SDanilo Krummrich 
152b88baab8SDanilo Krummrich 	if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ)
153b88baab8SDanilo Krummrich 		point = sync->timeline_value;
154b88baab8SDanilo Krummrich 
155b88baab8SDanilo Krummrich 	ret = drm_syncobj_find_fence(job->file_priv,
156b88baab8SDanilo Krummrich 				     sync->handle, point,
157e05f3938SFaith Ekstrand 				     0 /* flags */, fence);
158b88baab8SDanilo Krummrich 	if (ret)
159b88baab8SDanilo Krummrich 		return ret;
160b88baab8SDanilo Krummrich 
161b88baab8SDanilo Krummrich 	return 0;
162b88baab8SDanilo Krummrich }
163b88baab8SDanilo Krummrich 
164b88baab8SDanilo Krummrich static int
nouveau_job_add_deps(struct nouveau_job * job)165b88baab8SDanilo Krummrich nouveau_job_add_deps(struct nouveau_job *job)
166b88baab8SDanilo Krummrich {
167b88baab8SDanilo Krummrich 	struct dma_fence *in_fence = NULL;
168b88baab8SDanilo Krummrich 	int ret, i;
169b88baab8SDanilo Krummrich 
170b88baab8SDanilo Krummrich 	for (i = 0; i < job->in_sync.count; i++) {
171b88baab8SDanilo Krummrich 		struct drm_nouveau_sync *sync = &job->in_sync.data[i];
172b88baab8SDanilo Krummrich 
173b88baab8SDanilo Krummrich 		ret = sync_find_fence(job, sync, &in_fence);
174b88baab8SDanilo Krummrich 		if (ret) {
175b88baab8SDanilo Krummrich 			NV_PRINTK(warn, job->cli,
176b88baab8SDanilo Krummrich 				  "Failed to find syncobj (-> in): handle=%d\n",
177b88baab8SDanilo Krummrich 				  sync->handle);
178b88baab8SDanilo Krummrich 			return ret;
179b88baab8SDanilo Krummrich 		}
180b88baab8SDanilo Krummrich 
181b88baab8SDanilo Krummrich 		ret = drm_sched_job_add_dependency(&job->base, in_fence);
182b88baab8SDanilo Krummrich 		if (ret)
183b88baab8SDanilo Krummrich 			return ret;
184b88baab8SDanilo Krummrich 	}
185b88baab8SDanilo Krummrich 
186b88baab8SDanilo Krummrich 	return 0;
187b88baab8SDanilo Krummrich }
188b88baab8SDanilo Krummrich 
189b88baab8SDanilo Krummrich static void
nouveau_job_fence_attach_cleanup(struct nouveau_job * job)190b88baab8SDanilo Krummrich nouveau_job_fence_attach_cleanup(struct nouveau_job *job)
191b88baab8SDanilo Krummrich {
192b88baab8SDanilo Krummrich 	int i;
193b88baab8SDanilo Krummrich 
194b88baab8SDanilo Krummrich 	for (i = 0; i < job->out_sync.count; i++) {
195b88baab8SDanilo Krummrich 		struct drm_syncobj *obj = job->out_sync.objs[i];
196b88baab8SDanilo Krummrich 		struct dma_fence_chain *chain = job->out_sync.chains[i];
197b88baab8SDanilo Krummrich 
198b88baab8SDanilo Krummrich 		if (obj)
199b88baab8SDanilo Krummrich 			drm_syncobj_put(obj);
200b88baab8SDanilo Krummrich 
201b88baab8SDanilo Krummrich 		if (chain)
202b88baab8SDanilo Krummrich 			dma_fence_chain_free(chain);
203b88baab8SDanilo Krummrich 	}
204b88baab8SDanilo Krummrich }
205b88baab8SDanilo Krummrich 
206b88baab8SDanilo Krummrich static int
nouveau_job_fence_attach_prepare(struct nouveau_job * job)207b88baab8SDanilo Krummrich nouveau_job_fence_attach_prepare(struct nouveau_job *job)
208b88baab8SDanilo Krummrich {
209b88baab8SDanilo Krummrich 	int i, ret;
210b88baab8SDanilo Krummrich 
211b88baab8SDanilo Krummrich 	for (i = 0; i < job->out_sync.count; i++) {
212b88baab8SDanilo Krummrich 		struct drm_nouveau_sync *sync = &job->out_sync.data[i];
213b88baab8SDanilo Krummrich 		struct drm_syncobj **pobj = &job->out_sync.objs[i];
214b88baab8SDanilo Krummrich 		struct dma_fence_chain **pchain = &job->out_sync.chains[i];
215b88baab8SDanilo Krummrich 		u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
216b88baab8SDanilo Krummrich 
217b88baab8SDanilo Krummrich 		if (stype != DRM_NOUVEAU_SYNC_SYNCOBJ &&
218b88baab8SDanilo Krummrich 		    stype != DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
219b88baab8SDanilo Krummrich 			ret = -EINVAL;
220b88baab8SDanilo Krummrich 			goto err_sync_cleanup;
221b88baab8SDanilo Krummrich 		}
222b88baab8SDanilo Krummrich 
223b88baab8SDanilo Krummrich 		*pobj = drm_syncobj_find(job->file_priv, sync->handle);
224b88baab8SDanilo Krummrich 		if (!*pobj) {
225b88baab8SDanilo Krummrich 			NV_PRINTK(warn, job->cli,
226b88baab8SDanilo Krummrich 				  "Failed to find syncobj (-> out): handle=%d\n",
227b88baab8SDanilo Krummrich 				  sync->handle);
228b88baab8SDanilo Krummrich 			ret = -ENOENT;
229b88baab8SDanilo Krummrich 			goto err_sync_cleanup;
230b88baab8SDanilo Krummrich 		}
231b88baab8SDanilo Krummrich 
232b88baab8SDanilo Krummrich 		if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
233b88baab8SDanilo Krummrich 			*pchain = dma_fence_chain_alloc();
234b88baab8SDanilo Krummrich 			if (!*pchain) {
235b88baab8SDanilo Krummrich 				ret = -ENOMEM;
236b88baab8SDanilo Krummrich 				goto err_sync_cleanup;
237b88baab8SDanilo Krummrich 			}
238b88baab8SDanilo Krummrich 		}
239b88baab8SDanilo Krummrich 	}
240b88baab8SDanilo Krummrich 
241b88baab8SDanilo Krummrich 	return 0;
242b88baab8SDanilo Krummrich 
243b88baab8SDanilo Krummrich err_sync_cleanup:
244b88baab8SDanilo Krummrich 	nouveau_job_fence_attach_cleanup(job);
245b88baab8SDanilo Krummrich 	return ret;
246b88baab8SDanilo Krummrich }
247b88baab8SDanilo Krummrich 
248b88baab8SDanilo Krummrich static void
nouveau_job_fence_attach(struct nouveau_job * job)249b88baab8SDanilo Krummrich nouveau_job_fence_attach(struct nouveau_job *job)
250b88baab8SDanilo Krummrich {
251b88baab8SDanilo Krummrich 	struct dma_fence *fence = job->done_fence;
252b88baab8SDanilo Krummrich 	int i;
253b88baab8SDanilo Krummrich 
254b88baab8SDanilo Krummrich 	for (i = 0; i < job->out_sync.count; i++) {
255b88baab8SDanilo Krummrich 		struct drm_nouveau_sync *sync = &job->out_sync.data[i];
256b88baab8SDanilo Krummrich 		struct drm_syncobj **pobj = &job->out_sync.objs[i];
257b88baab8SDanilo Krummrich 		struct dma_fence_chain **pchain = &job->out_sync.chains[i];
258b88baab8SDanilo Krummrich 		u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
259b88baab8SDanilo Krummrich 
260b88baab8SDanilo Krummrich 		if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
261b88baab8SDanilo Krummrich 			drm_syncobj_add_point(*pobj, *pchain, fence,
262b88baab8SDanilo Krummrich 					      sync->timeline_value);
263b88baab8SDanilo Krummrich 		} else {
264b88baab8SDanilo Krummrich 			drm_syncobj_replace_fence(*pobj, fence);
265b88baab8SDanilo Krummrich 		}
266b88baab8SDanilo Krummrich 
267b88baab8SDanilo Krummrich 		drm_syncobj_put(*pobj);
268b88baab8SDanilo Krummrich 		*pobj = NULL;
269b88baab8SDanilo Krummrich 		*pchain = NULL;
270b88baab8SDanilo Krummrich 	}
271b88baab8SDanilo Krummrich }
272b88baab8SDanilo Krummrich 
273b88baab8SDanilo Krummrich int
nouveau_job_submit(struct nouveau_job * job)274b88baab8SDanilo Krummrich nouveau_job_submit(struct nouveau_job *job)
275b88baab8SDanilo Krummrich {
2765f03a507SDanilo Krummrich 	struct nouveau_sched *sched = job->sched;
277b88baab8SDanilo Krummrich 	struct dma_fence *done_fence = NULL;
278014f831aSDanilo Krummrich 	struct drm_gpuvm_exec vm_exec = {
279014f831aSDanilo Krummrich 		.vm = &nouveau_cli_uvmm(job->cli)->base,
280014f831aSDanilo Krummrich 		.flags = DRM_EXEC_IGNORE_DUPLICATES,
281014f831aSDanilo Krummrich 		.num_fences = 1,
282014f831aSDanilo Krummrich 	};
283b88baab8SDanilo Krummrich 	int ret;
284b88baab8SDanilo Krummrich 
285b88baab8SDanilo Krummrich 	ret = nouveau_job_add_deps(job);
286b88baab8SDanilo Krummrich 	if (ret)
287b88baab8SDanilo Krummrich 		goto err;
288b88baab8SDanilo Krummrich 
289b88baab8SDanilo Krummrich 	ret = nouveau_job_fence_attach_prepare(job);
290b88baab8SDanilo Krummrich 	if (ret)
291b88baab8SDanilo Krummrich 		goto err;
292b88baab8SDanilo Krummrich 
293b88baab8SDanilo Krummrich 	/* Make sure the job appears on the sched_entity's queue in the same
294b88baab8SDanilo Krummrich 	 * order as it was submitted.
295b88baab8SDanilo Krummrich 	 */
2965f03a507SDanilo Krummrich 	mutex_lock(&sched->mutex);
297b88baab8SDanilo Krummrich 
298b88baab8SDanilo Krummrich 	/* Guarantee we won't fail after the submit() callback returned
299b88baab8SDanilo Krummrich 	 * successfully.
300b88baab8SDanilo Krummrich 	 */
301b88baab8SDanilo Krummrich 	if (job->ops->submit) {
302014f831aSDanilo Krummrich 		ret = job->ops->submit(job, &vm_exec);
303b88baab8SDanilo Krummrich 		if (ret)
304b88baab8SDanilo Krummrich 			goto err_cleanup;
305b88baab8SDanilo Krummrich 	}
306b88baab8SDanilo Krummrich 
3075f03a507SDanilo Krummrich 	/* Submit was successful; add the job to the schedulers job list. */
3085f03a507SDanilo Krummrich 	spin_lock(&sched->job.list.lock);
3095f03a507SDanilo Krummrich 	list_add(&job->entry, &sched->job.list.head);
3105f03a507SDanilo Krummrich 	spin_unlock(&sched->job.list.lock);
3115f03a507SDanilo Krummrich 
312b88baab8SDanilo Krummrich 	drm_sched_job_arm(&job->base);
313b88baab8SDanilo Krummrich 	job->done_fence = dma_fence_get(&job->base.s_fence->finished);
314b88baab8SDanilo Krummrich 	if (job->sync)
315b88baab8SDanilo Krummrich 		done_fence = dma_fence_get(job->done_fence);
316b88baab8SDanilo Krummrich 
317b88baab8SDanilo Krummrich 	if (job->ops->armed_submit)
318014f831aSDanilo Krummrich 		job->ops->armed_submit(job, &vm_exec);
319b88baab8SDanilo Krummrich 
320b88baab8SDanilo Krummrich 	nouveau_job_fence_attach(job);
321b88baab8SDanilo Krummrich 
322b88baab8SDanilo Krummrich 	/* Set job state before pushing the job to the scheduler,
323b88baab8SDanilo Krummrich 	 * such that we do not overwrite the job state set in run().
324b88baab8SDanilo Krummrich 	 */
325b88baab8SDanilo Krummrich 	job->state = NOUVEAU_JOB_SUBMIT_SUCCESS;
326b88baab8SDanilo Krummrich 
327b88baab8SDanilo Krummrich 	drm_sched_entity_push_job(&job->base);
328b88baab8SDanilo Krummrich 
3295f03a507SDanilo Krummrich 	mutex_unlock(&sched->mutex);
330b88baab8SDanilo Krummrich 
331b88baab8SDanilo Krummrich 	if (done_fence) {
332b88baab8SDanilo Krummrich 		dma_fence_wait(done_fence, true);
333b88baab8SDanilo Krummrich 		dma_fence_put(done_fence);
334b88baab8SDanilo Krummrich 	}
335b88baab8SDanilo Krummrich 
336b88baab8SDanilo Krummrich 	return 0;
337b88baab8SDanilo Krummrich 
338b88baab8SDanilo Krummrich err_cleanup:
3395f03a507SDanilo Krummrich 	mutex_unlock(&sched->mutex);
340b88baab8SDanilo Krummrich 	nouveau_job_fence_attach_cleanup(job);
341b88baab8SDanilo Krummrich err:
342b88baab8SDanilo Krummrich 	job->state = NOUVEAU_JOB_SUBMIT_FAILED;
343b88baab8SDanilo Krummrich 	return ret;
344b88baab8SDanilo Krummrich }
345b88baab8SDanilo Krummrich 
346b88baab8SDanilo Krummrich static struct dma_fence *
nouveau_job_run(struct nouveau_job * job)347b88baab8SDanilo Krummrich nouveau_job_run(struct nouveau_job *job)
348b88baab8SDanilo Krummrich {
349b88baab8SDanilo Krummrich 	struct dma_fence *fence;
350b88baab8SDanilo Krummrich 
351b88baab8SDanilo Krummrich 	fence = job->ops->run(job);
352b88baab8SDanilo Krummrich 	if (IS_ERR(fence))
353b88baab8SDanilo Krummrich 		job->state = NOUVEAU_JOB_RUN_FAILED;
354b88baab8SDanilo Krummrich 	else
355b88baab8SDanilo Krummrich 		job->state = NOUVEAU_JOB_RUN_SUCCESS;
356b88baab8SDanilo Krummrich 
357b88baab8SDanilo Krummrich 	return fence;
358b88baab8SDanilo Krummrich }
359b88baab8SDanilo Krummrich 
360b88baab8SDanilo Krummrich static struct dma_fence *
nouveau_sched_run_job(struct drm_sched_job * sched_job)361b88baab8SDanilo Krummrich nouveau_sched_run_job(struct drm_sched_job *sched_job)
362b88baab8SDanilo Krummrich {
363b88baab8SDanilo Krummrich 	struct nouveau_job *job = to_nouveau_job(sched_job);
364b88baab8SDanilo Krummrich 
365b88baab8SDanilo Krummrich 	return nouveau_job_run(job);
366b88baab8SDanilo Krummrich }
367b88baab8SDanilo Krummrich 
368b88baab8SDanilo Krummrich static enum drm_gpu_sched_stat
nouveau_sched_timedout_job(struct drm_sched_job * sched_job)369b88baab8SDanilo Krummrich nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
370b88baab8SDanilo Krummrich {
37131499b01SDanilo Krummrich 	struct drm_gpu_scheduler *sched = sched_job->sched;
372b88baab8SDanilo Krummrich 	struct nouveau_job *job = to_nouveau_job(sched_job);
37331499b01SDanilo Krummrich 	enum drm_gpu_sched_stat stat = DRM_GPU_SCHED_STAT_NOMINAL;
374b88baab8SDanilo Krummrich 
37531499b01SDanilo Krummrich 	drm_sched_stop(sched, sched_job);
376b88baab8SDanilo Krummrich 
377b88baab8SDanilo Krummrich 	if (job->ops->timeout)
37831499b01SDanilo Krummrich 		stat = job->ops->timeout(job);
37931499b01SDanilo Krummrich 	else
38031499b01SDanilo Krummrich 		NV_PRINTK(warn, job->cli, "Generic job timeout.\n");
381b88baab8SDanilo Krummrich 
382*83b501c1SChristian König 	drm_sched_start(sched);
38331499b01SDanilo Krummrich 
38431499b01SDanilo Krummrich 	return stat;
385b88baab8SDanilo Krummrich }
386b88baab8SDanilo Krummrich 
387b88baab8SDanilo Krummrich static void
nouveau_sched_free_job(struct drm_sched_job * sched_job)388b88baab8SDanilo Krummrich nouveau_sched_free_job(struct drm_sched_job *sched_job)
389b88baab8SDanilo Krummrich {
390b88baab8SDanilo Krummrich 	struct nouveau_job *job = to_nouveau_job(sched_job);
391b88baab8SDanilo Krummrich 
392b88baab8SDanilo Krummrich 	nouveau_job_fini(job);
393b88baab8SDanilo Krummrich }
394b88baab8SDanilo Krummrich 
395b88baab8SDanilo Krummrich static const struct drm_sched_backend_ops nouveau_sched_ops = {
396b88baab8SDanilo Krummrich 	.run_job = nouveau_sched_run_job,
397b88baab8SDanilo Krummrich 	.timedout_job = nouveau_sched_timedout_job,
398b88baab8SDanilo Krummrich 	.free_job = nouveau_sched_free_job,
399b88baab8SDanilo Krummrich };
400b88baab8SDanilo Krummrich 
4019a0c32d6SDanilo Krummrich static int
nouveau_sched_init(struct nouveau_sched * sched,struct nouveau_drm * drm,struct workqueue_struct * wq,u32 credit_limit)4025f03a507SDanilo Krummrich nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
40346990918SDanilo Krummrich 		   struct workqueue_struct *wq, u32 credit_limit)
404b88baab8SDanilo Krummrich {
4055f03a507SDanilo Krummrich 	struct drm_gpu_scheduler *drm_sched = &sched->base;
4065f03a507SDanilo Krummrich 	struct drm_sched_entity *entity = &sched->entity;
4076f1cacf4SPhilipp Stanner 	const long timeout = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
4085f03a507SDanilo Krummrich 	int ret;
409b88baab8SDanilo Krummrich 
4105f03a507SDanilo Krummrich 	if (!wq) {
4115f03a507SDanilo Krummrich 		wq = alloc_workqueue("nouveau_sched_wq_%d", 0, WQ_MAX_ACTIVE,
4125f03a507SDanilo Krummrich 				     current->pid);
4135f03a507SDanilo Krummrich 		if (!wq)
414b88baab8SDanilo Krummrich 			return -ENOMEM;
415b88baab8SDanilo Krummrich 
4165f03a507SDanilo Krummrich 		sched->wq = wq;
417b88baab8SDanilo Krummrich 	}
418b88baab8SDanilo Krummrich 
4195f03a507SDanilo Krummrich 	ret = drm_sched_init(drm_sched, &nouveau_sched_ops, wq,
4205f03a507SDanilo Krummrich 			     NOUVEAU_SCHED_PRIORITY_COUNT,
4216f1cacf4SPhilipp Stanner 			     credit_limit, 0, timeout,
4225f03a507SDanilo Krummrich 			     NULL, NULL, "nouveau_sched", drm->dev->dev);
4235f03a507SDanilo Krummrich 	if (ret)
4245f03a507SDanilo Krummrich 		goto fail_wq;
4255f03a507SDanilo Krummrich 
42619b4c60cSLuben Tuikov 	/* Using DRM_SCHED_PRIORITY_KERNEL, since that's what we're required to use
4275f03a507SDanilo Krummrich 	 * when we want to have a single run-queue only.
4285f03a507SDanilo Krummrich 	 *
4295f03a507SDanilo Krummrich 	 * It's not documented, but one will find out when trying to use any
4305f03a507SDanilo Krummrich 	 * other priority running into faults, because the scheduler uses the
4315f03a507SDanilo Krummrich 	 * priority as array index.
4325f03a507SDanilo Krummrich 	 *
4335f03a507SDanilo Krummrich 	 * Can't use NOUVEAU_SCHED_PRIORITY_SINGLE either, because it's not
4345f03a507SDanilo Krummrich 	 * matching the enum type used in drm_sched_entity_init().
4355f03a507SDanilo Krummrich 	 */
43619b4c60cSLuben Tuikov 	ret = drm_sched_entity_init(entity, DRM_SCHED_PRIORITY_KERNEL,
4375f03a507SDanilo Krummrich 				    &drm_sched, 1, NULL);
4385f03a507SDanilo Krummrich 	if (ret)
4395f03a507SDanilo Krummrich 		goto fail_sched;
4405f03a507SDanilo Krummrich 
4415f03a507SDanilo Krummrich 	mutex_init(&sched->mutex);
4425f03a507SDanilo Krummrich 	spin_lock_init(&sched->job.list.lock);
4435f03a507SDanilo Krummrich 	INIT_LIST_HEAD(&sched->job.list.head);
4445f03a507SDanilo Krummrich 	init_waitqueue_head(&sched->job.wq);
4455f03a507SDanilo Krummrich 
4465f03a507SDanilo Krummrich 	return 0;
4475f03a507SDanilo Krummrich 
4485f03a507SDanilo Krummrich fail_sched:
4495f03a507SDanilo Krummrich 	drm_sched_fini(drm_sched);
4505f03a507SDanilo Krummrich fail_wq:
4515f03a507SDanilo Krummrich 	if (sched->wq)
4525f03a507SDanilo Krummrich 		destroy_workqueue(sched->wq);
4535f03a507SDanilo Krummrich 	return ret;
4545f03a507SDanilo Krummrich }
4555f03a507SDanilo Krummrich 
4569a0c32d6SDanilo Krummrich int
nouveau_sched_create(struct nouveau_sched ** psched,struct nouveau_drm * drm,struct workqueue_struct * wq,u32 credit_limit)4579a0c32d6SDanilo Krummrich nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
4589a0c32d6SDanilo Krummrich 		     struct workqueue_struct *wq, u32 credit_limit)
4599a0c32d6SDanilo Krummrich {
4609a0c32d6SDanilo Krummrich 	struct nouveau_sched *sched;
4619a0c32d6SDanilo Krummrich 	int ret;
4629a0c32d6SDanilo Krummrich 
4639a0c32d6SDanilo Krummrich 	sched = kzalloc(sizeof(*sched), GFP_KERNEL);
4649a0c32d6SDanilo Krummrich 	if (!sched)
4659a0c32d6SDanilo Krummrich 		return -ENOMEM;
4669a0c32d6SDanilo Krummrich 
4679a0c32d6SDanilo Krummrich 	ret = nouveau_sched_init(sched, drm, wq, credit_limit);
4689a0c32d6SDanilo Krummrich 	if (ret) {
4699a0c32d6SDanilo Krummrich 		kfree(sched);
4709a0c32d6SDanilo Krummrich 		return ret;
4719a0c32d6SDanilo Krummrich 	}
4729a0c32d6SDanilo Krummrich 
4739a0c32d6SDanilo Krummrich 	*psched = sched;
4749a0c32d6SDanilo Krummrich 
4759a0c32d6SDanilo Krummrich 	return 0;
4769a0c32d6SDanilo Krummrich }
4779a0c32d6SDanilo Krummrich 
4789a0c32d6SDanilo Krummrich 
4799a0c32d6SDanilo Krummrich static void
nouveau_sched_fini(struct nouveau_sched * sched)4805f03a507SDanilo Krummrich nouveau_sched_fini(struct nouveau_sched *sched)
481b88baab8SDanilo Krummrich {
4825f03a507SDanilo Krummrich 	struct drm_gpu_scheduler *drm_sched = &sched->base;
4835f03a507SDanilo Krummrich 	struct drm_sched_entity *entity = &sched->entity;
4845f03a507SDanilo Krummrich 
4855f03a507SDanilo Krummrich 	rmb(); /* for list_empty to work without lock */
4865f03a507SDanilo Krummrich 	wait_event(sched->job.wq, list_empty(&sched->job.list.head));
4875f03a507SDanilo Krummrich 
4885f03a507SDanilo Krummrich 	drm_sched_entity_fini(entity);
4895f03a507SDanilo Krummrich 	drm_sched_fini(drm_sched);
4905f03a507SDanilo Krummrich 
4915f03a507SDanilo Krummrich 	/* Destroy workqueue after scheduler tear down, otherwise it might still
4925f03a507SDanilo Krummrich 	 * be in use.
4935f03a507SDanilo Krummrich 	 */
4945f03a507SDanilo Krummrich 	if (sched->wq)
4955f03a507SDanilo Krummrich 		destroy_workqueue(sched->wq);
496b88baab8SDanilo Krummrich }
4979a0c32d6SDanilo Krummrich 
4989a0c32d6SDanilo Krummrich void
nouveau_sched_destroy(struct nouveau_sched ** psched)4999a0c32d6SDanilo Krummrich nouveau_sched_destroy(struct nouveau_sched **psched)
5009a0c32d6SDanilo Krummrich {
5019a0c32d6SDanilo Krummrich 	struct nouveau_sched *sched = *psched;
5029a0c32d6SDanilo Krummrich 
5039a0c32d6SDanilo Krummrich 	nouveau_sched_fini(sched);
5049a0c32d6SDanilo Krummrich 	kfree(sched);
5059a0c32d6SDanilo Krummrich 
5069a0c32d6SDanilo Krummrich 	*psched = NULL;
5079a0c32d6SDanilo Krummrich }
508