1 // SPDX-License-Identifier: MIT
2
3 #include <linux/slab.h>
4 #include <drm/gpu_scheduler.h>
5 #include <drm/drm_syncobj.h>
6
7 #include "nouveau_drv.h"
8 #include "nouveau_gem.h"
9 #include "nouveau_mem.h"
10 #include "nouveau_dma.h"
11 #include "nouveau_exec.h"
12 #include "nouveau_abi16.h"
13 #include "nouveau_sched.h"
14 #include "nouveau_chan.h"
15
16 #define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000
17
18 /* Starts at 0, since the DRM scheduler interprets those parameters as (initial)
19 * index to the run-queue array.
20 */
21 enum nouveau_sched_priority {
22 NOUVEAU_SCHED_PRIORITY_SINGLE = DRM_SCHED_PRIORITY_KERNEL,
23 NOUVEAU_SCHED_PRIORITY_COUNT,
24 };
25
26 int
nouveau_job_init(struct nouveau_job * job,struct nouveau_job_args * args)27 nouveau_job_init(struct nouveau_job *job,
28 struct nouveau_job_args *args)
29 {
30 struct nouveau_sched *sched = args->sched;
31 int ret;
32
33 INIT_LIST_HEAD(&job->entry);
34
35 job->file_priv = args->file_priv;
36 job->cli = nouveau_cli(args->file_priv);
37 job->sched = sched;
38
39 job->sync = args->sync;
40 job->resv_usage = args->resv_usage;
41
42 job->ops = args->ops;
43
44 job->in_sync.count = args->in_sync.count;
45 if (job->in_sync.count) {
46 if (job->sync)
47 return -EINVAL;
48
49 job->in_sync.data = kmemdup(args->in_sync.s,
50 sizeof(*args->in_sync.s) *
51 args->in_sync.count,
52 GFP_KERNEL);
53 if (!job->in_sync.data)
54 return -ENOMEM;
55 }
56
57 job->out_sync.count = args->out_sync.count;
58 if (job->out_sync.count) {
59 if (job->sync) {
60 ret = -EINVAL;
61 goto err_free_in_sync;
62 }
63
64 job->out_sync.data = kmemdup(args->out_sync.s,
65 sizeof(*args->out_sync.s) *
66 args->out_sync.count,
67 GFP_KERNEL);
68 if (!job->out_sync.data) {
69 ret = -ENOMEM;
70 goto err_free_in_sync;
71 }
72
73 job->out_sync.objs = kcalloc(job->out_sync.count,
74 sizeof(*job->out_sync.objs),
75 GFP_KERNEL);
76 if (!job->out_sync.objs) {
77 ret = -ENOMEM;
78 goto err_free_out_sync;
79 }
80
81 job->out_sync.chains = kcalloc(job->out_sync.count,
82 sizeof(*job->out_sync.chains),
83 GFP_KERNEL);
84 if (!job->out_sync.chains) {
85 ret = -ENOMEM;
86 goto err_free_objs;
87 }
88 }
89
90 ret = drm_sched_job_init(&job->base, &sched->entity,
91 args->credits, NULL,
92 job->file_priv->client_id);
93 if (ret)
94 goto err_free_chains;
95
96 job->state = NOUVEAU_JOB_INITIALIZED;
97
98 return 0;
99
100 err_free_chains:
101 kfree(job->out_sync.chains);
102 err_free_objs:
103 kfree(job->out_sync.objs);
104 err_free_out_sync:
105 kfree(job->out_sync.data);
106 err_free_in_sync:
107 kfree(job->in_sync.data);
108 return ret;
109 }
110
111 void
nouveau_job_fini(struct nouveau_job * job)112 nouveau_job_fini(struct nouveau_job *job)
113 {
114 dma_fence_put(job->done_fence);
115 drm_sched_job_cleanup(&job->base);
116
117 job->ops->free(job);
118 }
119
120 void
nouveau_job_done(struct nouveau_job * job)121 nouveau_job_done(struct nouveau_job *job)
122 {
123 struct nouveau_sched *sched = job->sched;
124
125 spin_lock(&sched->job_list.lock);
126 list_del(&job->entry);
127 spin_unlock(&sched->job_list.lock);
128 }
129
130 void
nouveau_job_free(struct nouveau_job * job)131 nouveau_job_free(struct nouveau_job *job)
132 {
133 kfree(job->in_sync.data);
134 kfree(job->out_sync.data);
135 kfree(job->out_sync.objs);
136 kfree(job->out_sync.chains);
137 }
138
139 static int
sync_find_fence(struct nouveau_job * job,struct drm_nouveau_sync * sync,struct dma_fence ** fence)140 sync_find_fence(struct nouveau_job *job,
141 struct drm_nouveau_sync *sync,
142 struct dma_fence **fence)
143 {
144 u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
145 u64 point = 0;
146 int ret;
147
148 if (stype != DRM_NOUVEAU_SYNC_SYNCOBJ &&
149 stype != DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ)
150 return -EOPNOTSUPP;
151
152 if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ)
153 point = sync->timeline_value;
154
155 ret = drm_syncobj_find_fence(job->file_priv,
156 sync->handle, point,
157 0 /* flags */, fence);
158 if (ret)
159 return ret;
160
161 return 0;
162 }
163
164 static int
nouveau_job_add_deps(struct nouveau_job * job)165 nouveau_job_add_deps(struct nouveau_job *job)
166 {
167 struct dma_fence *in_fence = NULL;
168 int ret, i;
169
170 for (i = 0; i < job->in_sync.count; i++) {
171 struct drm_nouveau_sync *sync = &job->in_sync.data[i];
172
173 ret = sync_find_fence(job, sync, &in_fence);
174 if (ret) {
175 NV_PRINTK(warn, job->cli,
176 "Failed to find syncobj (-> in): handle=%d\n",
177 sync->handle);
178 return ret;
179 }
180
181 ret = drm_sched_job_add_dependency(&job->base, in_fence);
182 if (ret)
183 return ret;
184 }
185
186 return 0;
187 }
188
189 static void
nouveau_job_fence_attach_cleanup(struct nouveau_job * job)190 nouveau_job_fence_attach_cleanup(struct nouveau_job *job)
191 {
192 int i;
193
194 for (i = 0; i < job->out_sync.count; i++) {
195 struct drm_syncobj *obj = job->out_sync.objs[i];
196 struct dma_fence_chain *chain = job->out_sync.chains[i];
197
198 if (obj)
199 drm_syncobj_put(obj);
200
201 if (chain)
202 dma_fence_chain_free(chain);
203 }
204 }
205
206 static int
nouveau_job_fence_attach_prepare(struct nouveau_job * job)207 nouveau_job_fence_attach_prepare(struct nouveau_job *job)
208 {
209 int i, ret;
210
211 for (i = 0; i < job->out_sync.count; i++) {
212 struct drm_nouveau_sync *sync = &job->out_sync.data[i];
213 struct drm_syncobj **pobj = &job->out_sync.objs[i];
214 struct dma_fence_chain **pchain = &job->out_sync.chains[i];
215 u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
216
217 if (stype != DRM_NOUVEAU_SYNC_SYNCOBJ &&
218 stype != DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
219 ret = -EINVAL;
220 goto err_sync_cleanup;
221 }
222
223 *pobj = drm_syncobj_find(job->file_priv, sync->handle);
224 if (!*pobj) {
225 NV_PRINTK(warn, job->cli,
226 "Failed to find syncobj (-> out): handle=%d\n",
227 sync->handle);
228 ret = -ENOENT;
229 goto err_sync_cleanup;
230 }
231
232 if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
233 *pchain = dma_fence_chain_alloc();
234 if (!*pchain) {
235 ret = -ENOMEM;
236 goto err_sync_cleanup;
237 }
238 }
239 }
240
241 return 0;
242
243 err_sync_cleanup:
244 nouveau_job_fence_attach_cleanup(job);
245 return ret;
246 }
247
248 static void
nouveau_job_fence_attach(struct nouveau_job * job)249 nouveau_job_fence_attach(struct nouveau_job *job)
250 {
251 struct dma_fence *fence = job->done_fence;
252 int i;
253
254 for (i = 0; i < job->out_sync.count; i++) {
255 struct drm_nouveau_sync *sync = &job->out_sync.data[i];
256 struct drm_syncobj **pobj = &job->out_sync.objs[i];
257 struct dma_fence_chain **pchain = &job->out_sync.chains[i];
258 u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
259
260 if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
261 drm_syncobj_add_point(*pobj, *pchain, fence,
262 sync->timeline_value);
263 } else {
264 drm_syncobj_replace_fence(*pobj, fence);
265 }
266
267 drm_syncobj_put(*pobj);
268 *pobj = NULL;
269 *pchain = NULL;
270 }
271 }
272
273 int
nouveau_job_submit(struct nouveau_job * job)274 nouveau_job_submit(struct nouveau_job *job)
275 {
276 struct nouveau_sched *sched = job->sched;
277 struct dma_fence *done_fence = NULL;
278 struct drm_gpuvm_exec vm_exec = {
279 .vm = &nouveau_cli_uvmm(job->cli)->base,
280 .flags = DRM_EXEC_IGNORE_DUPLICATES,
281 .num_fences = 1,
282 };
283 int ret;
284
285 ret = nouveau_job_add_deps(job);
286 if (ret)
287 goto err;
288
289 ret = nouveau_job_fence_attach_prepare(job);
290 if (ret)
291 goto err;
292
293 /* Make sure the job appears on the sched_entity's queue in the same
294 * order as it was submitted.
295 */
296 mutex_lock(&sched->mutex);
297
298 /* Guarantee we won't fail after the submit() callback returned
299 * successfully.
300 */
301 if (job->ops->submit) {
302 ret = job->ops->submit(job, &vm_exec);
303 if (ret)
304 goto err_cleanup;
305 }
306
307 /* Submit was successful; add the job to the schedulers job list. */
308 spin_lock(&sched->job_list.lock);
309 list_add(&job->entry, &sched->job_list.head);
310 spin_unlock(&sched->job_list.lock);
311
312 drm_sched_job_arm(&job->base);
313 job->done_fence = dma_fence_get(&job->base.s_fence->finished);
314 if (job->sync)
315 done_fence = dma_fence_get(job->done_fence);
316
317 if (job->ops->armed_submit)
318 job->ops->armed_submit(job, &vm_exec);
319
320 nouveau_job_fence_attach(job);
321
322 /* Set job state before pushing the job to the scheduler,
323 * such that we do not overwrite the job state set in run().
324 */
325 job->state = NOUVEAU_JOB_SUBMIT_SUCCESS;
326
327 drm_sched_entity_push_job(&job->base);
328
329 mutex_unlock(&sched->mutex);
330
331 if (done_fence) {
332 dma_fence_wait(done_fence, true);
333 dma_fence_put(done_fence);
334 }
335
336 return 0;
337
338 err_cleanup:
339 mutex_unlock(&sched->mutex);
340 nouveau_job_fence_attach_cleanup(job);
341 err:
342 job->state = NOUVEAU_JOB_SUBMIT_FAILED;
343 return ret;
344 }
345
346 static struct dma_fence *
nouveau_job_run(struct nouveau_job * job)347 nouveau_job_run(struct nouveau_job *job)
348 {
349 struct dma_fence *fence;
350
351 fence = job->ops->run(job);
352 if (IS_ERR(fence))
353 job->state = NOUVEAU_JOB_RUN_FAILED;
354 else
355 job->state = NOUVEAU_JOB_RUN_SUCCESS;
356
357 return fence;
358 }
359
360 static struct dma_fence *
nouveau_sched_run_job(struct drm_sched_job * sched_job)361 nouveau_sched_run_job(struct drm_sched_job *sched_job)
362 {
363 struct nouveau_job *job = to_nouveau_job(sched_job);
364
365 return nouveau_job_run(job);
366 }
367
368 static enum drm_gpu_sched_stat
nouveau_sched_timedout_job(struct drm_sched_job * sched_job)369 nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
370 {
371 struct drm_gpu_scheduler *sched = sched_job->sched;
372 struct nouveau_job *job = to_nouveau_job(sched_job);
373 enum drm_gpu_sched_stat stat = DRM_GPU_SCHED_STAT_RESET;
374
375 drm_sched_stop(sched, sched_job);
376
377 if (job->ops->timeout)
378 stat = job->ops->timeout(job);
379 else
380 NV_PRINTK(warn, job->cli, "Generic job timeout.\n");
381
382 drm_sched_start(sched, 0);
383
384 return stat;
385 }
386
387 static void
nouveau_sched_free_job(struct drm_sched_job * sched_job)388 nouveau_sched_free_job(struct drm_sched_job *sched_job)
389 {
390 struct nouveau_job *job = to_nouveau_job(sched_job);
391
392 nouveau_job_fini(job);
393 }
394
395 static void
nouveau_sched_cancel_job(struct drm_sched_job * sched_job)396 nouveau_sched_cancel_job(struct drm_sched_job *sched_job)
397 {
398 struct nouveau_fence *fence;
399 struct nouveau_job *job;
400
401 job = to_nouveau_job(sched_job);
402 fence = to_nouveau_fence(job->done_fence);
403
404 nouveau_fence_cancel(fence);
405 }
406
407 static const struct drm_sched_backend_ops nouveau_sched_ops = {
408 .run_job = nouveau_sched_run_job,
409 .timedout_job = nouveau_sched_timedout_job,
410 .free_job = nouveau_sched_free_job,
411 .cancel_job = nouveau_sched_cancel_job,
412 };
413
414 static int
nouveau_sched_init(struct nouveau_sched * sched,struct nouveau_drm * drm,struct workqueue_struct * wq,u32 credit_limit)415 nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
416 struct workqueue_struct *wq, u32 credit_limit)
417 {
418 struct drm_gpu_scheduler *drm_sched = &sched->base;
419 struct drm_sched_entity *entity = &sched->entity;
420 struct drm_sched_init_args args = {
421 .ops = &nouveau_sched_ops,
422 .num_rqs = DRM_SCHED_PRIORITY_COUNT,
423 .credit_limit = credit_limit,
424 .timeout = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS),
425 .name = "nouveau_sched",
426 .dev = drm->dev->dev
427 };
428 int ret;
429
430 if (!wq) {
431 wq = alloc_workqueue("nouveau_sched_wq_%d", 0, WQ_MAX_ACTIVE,
432 current->pid);
433 if (!wq)
434 return -ENOMEM;
435
436 sched->wq = wq;
437 }
438
439 args.submit_wq = wq,
440
441 ret = drm_sched_init(drm_sched, &args);
442 if (ret)
443 goto fail_wq;
444
445 /* Using DRM_SCHED_PRIORITY_KERNEL, since that's what we're required to use
446 * when we want to have a single run-queue only.
447 *
448 * It's not documented, but one will find out when trying to use any
449 * other priority running into faults, because the scheduler uses the
450 * priority as array index.
451 *
452 * Can't use NOUVEAU_SCHED_PRIORITY_SINGLE either, because it's not
453 * matching the enum type used in drm_sched_entity_init().
454 */
455 ret = drm_sched_entity_init(entity, DRM_SCHED_PRIORITY_KERNEL,
456 &drm_sched, 1, NULL);
457 if (ret)
458 goto fail_sched;
459
460 mutex_init(&sched->mutex);
461 spin_lock_init(&sched->job_list.lock);
462 INIT_LIST_HEAD(&sched->job_list.head);
463
464 return 0;
465
466 fail_sched:
467 drm_sched_fini(drm_sched);
468 fail_wq:
469 if (sched->wq)
470 destroy_workqueue(sched->wq);
471 return ret;
472 }
473
474 int
nouveau_sched_create(struct nouveau_sched ** psched,struct nouveau_drm * drm,struct workqueue_struct * wq,u32 credit_limit)475 nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
476 struct workqueue_struct *wq, u32 credit_limit)
477 {
478 struct nouveau_sched *sched;
479 int ret;
480
481 sched = kzalloc(sizeof(*sched), GFP_KERNEL);
482 if (!sched)
483 return -ENOMEM;
484
485 ret = nouveau_sched_init(sched, drm, wq, credit_limit);
486 if (ret) {
487 kfree(sched);
488 return ret;
489 }
490
491 *psched = sched;
492
493 return 0;
494 }
495
496 static void
nouveau_sched_fini(struct nouveau_sched * sched)497 nouveau_sched_fini(struct nouveau_sched *sched)
498 {
499 struct drm_gpu_scheduler *drm_sched = &sched->base;
500 struct drm_sched_entity *entity = &sched->entity;
501
502 drm_sched_entity_fini(entity);
503 drm_sched_fini(drm_sched);
504
505 /* Destroy workqueue after scheduler tear down, otherwise it might still
506 * be in use.
507 */
508 if (sched->wq)
509 destroy_workqueue(sched->wq);
510 }
511
512 void
nouveau_sched_destroy(struct nouveau_sched ** psched)513 nouveau_sched_destroy(struct nouveau_sched **psched)
514 {
515 struct nouveau_sched *sched = *psched;
516
517 nouveau_sched_fini(sched);
518 kfree(sched);
519
520 *psched = NULL;
521 }
522