1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 /**
25 * DOC: Overview
26 *
27 * The GPU scheduler provides entities which allow userspace to push jobs
28 * into software queues which are then scheduled on a hardware run queue.
29 * The software queues have a priority among them. The scheduler selects the entities
30 * from the run queue using a FIFO. The scheduler provides dependency handling
31 * features among jobs. The driver is supposed to provide callback functions for
32 * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
34 *
35 * The organisation of the scheduler is the following:
36 *
37 * 1. Each hw run queue has one scheduler
38 * 2. Each scheduler has multiple run queues with different priorities
39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40 * 3. Each scheduler run queue has a queue of entities to schedule
41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42 * the hardware.
43 *
44 * The jobs in an entity are always scheduled in the order in which they were pushed.
45 *
46 * Note that once a job was taken from the entities queue and pushed to the
47 * hardware, i.e. the pending queue, the entity must not be referenced anymore
48 * through the jobs entity pointer.
49 */
50
51 /**
52 * DOC: Flow Control
53 *
54 * The DRM GPU scheduler provides a flow control mechanism to regulate the rate
55 * in which the jobs fetched from scheduler entities are executed.
56 *
57 * In this context the &drm_gpu_scheduler keeps track of a driver specified
58 * credit limit representing the capacity of this scheduler and a credit count;
59 * every &drm_sched_job carries a driver specified number of credits.
60 *
61 * Once a job is executed (but not yet finished), the job's credits contribute
62 * to the scheduler's credit count until the job is finished. If by executing
63 * one more job the scheduler's credit count would exceed the scheduler's
64 * credit limit, the job won't be executed. Instead, the scheduler will wait
65 * until the credit count has decreased enough to not overflow its credit limit.
66 * This implies waiting for previously executed jobs.
67 */
68
69 #include <linux/export.h>
70 #include <linux/wait.h>
71 #include <linux/sched.h>
72 #include <linux/completion.h>
73 #include <linux/dma-resv.h>
74 #include <uapi/linux/sched/types.h>
75
76 #include <drm/drm_print.h>
77 #include <drm/drm_gem.h>
78 #include <drm/drm_syncobj.h>
79 #include <drm/gpu_scheduler.h>
80 #include <drm/spsc_queue.h>
81
82 #include "sched_internal.h"
83
84 #define CREATE_TRACE_POINTS
85 #include "gpu_scheduler_trace.h"
86
87 int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
88
89 /**
90 * DOC: sched_policy (int)
91 * Used to override default entities scheduling policy in a run queue.
92 */
93 MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
94 module_param_named(sched_policy, drm_sched_policy, int, 0444);
95
drm_sched_available_credits(struct drm_gpu_scheduler * sched)96 static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
97 {
98 u32 credits;
99
100 WARN_ON(check_sub_overflow(sched->credit_limit,
101 atomic_read(&sched->credit_count),
102 &credits));
103
104 return credits;
105 }
106
107 /**
108 * drm_sched_can_queue -- Can we queue more to the hardware?
109 * @sched: scheduler instance
110 * @entity: the scheduler entity
111 *
112 * Return true if we can push at least one more job from @entity, false
113 * otherwise.
114 */
drm_sched_can_queue(struct drm_gpu_scheduler * sched,struct drm_sched_entity * entity)115 static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
116 struct drm_sched_entity *entity)
117 {
118 struct drm_sched_job *s_job;
119
120 s_job = drm_sched_entity_queue_peek(entity);
121 if (!s_job)
122 return false;
123
124 /* If a job exceeds the credit limit, truncate it to the credit limit
125 * itself to guarantee forward progress.
126 */
127 if (s_job->credits > sched->credit_limit) {
128 dev_WARN(sched->dev,
129 "Jobs may not exceed the credit limit, truncate.\n");
130 s_job->credits = sched->credit_limit;
131 }
132
133 return drm_sched_available_credits(sched) >= s_job->credits;
134 }
135
drm_sched_entity_compare_before(struct rb_node * a,const struct rb_node * b)136 static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
137 const struct rb_node *b)
138 {
139 struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node);
140 struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node);
141
142 return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
143 }
144
drm_sched_rq_remove_fifo_locked(struct drm_sched_entity * entity,struct drm_sched_rq * rq)145 static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity,
146 struct drm_sched_rq *rq)
147 {
148 if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
149 rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
150 RB_CLEAR_NODE(&entity->rb_tree_node);
151 }
152 }
153
drm_sched_rq_update_fifo_locked(struct drm_sched_entity * entity,struct drm_sched_rq * rq,ktime_t ts)154 void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
155 struct drm_sched_rq *rq,
156 ktime_t ts)
157 {
158 /*
159 * Both locks need to be grabbed, one to protect from entity->rq change
160 * for entity from within concurrent drm_sched_entity_select_rq and the
161 * other to update the rb tree structure.
162 */
163 lockdep_assert_held(&entity->lock);
164 lockdep_assert_held(&rq->lock);
165
166 drm_sched_rq_remove_fifo_locked(entity, rq);
167
168 entity->oldest_job_waiting = ts;
169
170 rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root,
171 drm_sched_entity_compare_before);
172 }
173
174 /**
175 * drm_sched_rq_init - initialize a given run queue struct
176 *
177 * @sched: scheduler instance to associate with this run queue
178 * @rq: scheduler run queue
179 *
180 * Initializes a scheduler runqueue.
181 */
drm_sched_rq_init(struct drm_gpu_scheduler * sched,struct drm_sched_rq * rq)182 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
183 struct drm_sched_rq *rq)
184 {
185 spin_lock_init(&rq->lock);
186 INIT_LIST_HEAD(&rq->entities);
187 rq->rb_tree_root = RB_ROOT_CACHED;
188 rq->current_entity = NULL;
189 rq->sched = sched;
190 }
191
192 /**
193 * drm_sched_rq_add_entity - add an entity
194 *
195 * @rq: scheduler run queue
196 * @entity: scheduler entity
197 *
198 * Adds a scheduler entity to the run queue.
199 */
drm_sched_rq_add_entity(struct drm_sched_rq * rq,struct drm_sched_entity * entity)200 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
201 struct drm_sched_entity *entity)
202 {
203 lockdep_assert_held(&entity->lock);
204 lockdep_assert_held(&rq->lock);
205
206 if (!list_empty(&entity->list))
207 return;
208
209 atomic_inc(rq->sched->score);
210 list_add_tail(&entity->list, &rq->entities);
211 }
212
213 /**
214 * drm_sched_rq_remove_entity - remove an entity
215 *
216 * @rq: scheduler run queue
217 * @entity: scheduler entity
218 *
219 * Removes a scheduler entity from the run queue.
220 */
drm_sched_rq_remove_entity(struct drm_sched_rq * rq,struct drm_sched_entity * entity)221 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
222 struct drm_sched_entity *entity)
223 {
224 lockdep_assert_held(&entity->lock);
225
226 if (list_empty(&entity->list))
227 return;
228
229 spin_lock(&rq->lock);
230
231 atomic_dec(rq->sched->score);
232 list_del_init(&entity->list);
233
234 if (rq->current_entity == entity)
235 rq->current_entity = NULL;
236
237 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
238 drm_sched_rq_remove_fifo_locked(entity, rq);
239
240 spin_unlock(&rq->lock);
241 }
242
243 /**
244 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
245 *
246 * @sched: the gpu scheduler
247 * @rq: scheduler run queue to check.
248 *
249 * Try to find the next ready entity.
250 *
251 * Return an entity if one is found; return an error-pointer (!NULL) if an
252 * entity was ready, but the scheduler had insufficient credits to accommodate
253 * its job; return NULL, if no ready entity was found.
254 */
255 static struct drm_sched_entity *
drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler * sched,struct drm_sched_rq * rq)256 drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
257 struct drm_sched_rq *rq)
258 {
259 struct drm_sched_entity *entity;
260
261 spin_lock(&rq->lock);
262
263 entity = rq->current_entity;
264 if (entity) {
265 list_for_each_entry_continue(entity, &rq->entities, list) {
266 if (drm_sched_entity_is_ready(entity))
267 goto found;
268 }
269 }
270
271 list_for_each_entry(entity, &rq->entities, list) {
272 if (drm_sched_entity_is_ready(entity))
273 goto found;
274
275 if (entity == rq->current_entity)
276 break;
277 }
278
279 spin_unlock(&rq->lock);
280
281 return NULL;
282
283 found:
284 if (!drm_sched_can_queue(sched, entity)) {
285 /*
286 * If scheduler cannot take more jobs signal the caller to not
287 * consider lower priority queues.
288 */
289 entity = ERR_PTR(-ENOSPC);
290 } else {
291 rq->current_entity = entity;
292 reinit_completion(&entity->entity_idle);
293 }
294
295 spin_unlock(&rq->lock);
296
297 return entity;
298 }
299
300 /**
301 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
302 *
303 * @sched: the gpu scheduler
304 * @rq: scheduler run queue to check.
305 *
306 * Find oldest waiting ready entity.
307 *
308 * Return an entity if one is found; return an error-pointer (!NULL) if an
309 * entity was ready, but the scheduler had insufficient credits to accommodate
310 * its job; return NULL, if no ready entity was found.
311 */
312 static struct drm_sched_entity *
drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler * sched,struct drm_sched_rq * rq)313 drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched,
314 struct drm_sched_rq *rq)
315 {
316 struct rb_node *rb;
317
318 spin_lock(&rq->lock);
319 for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
320 struct drm_sched_entity *entity;
321
322 entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
323 if (drm_sched_entity_is_ready(entity)) {
324 /* If we can't queue yet, preserve the current entity in
325 * terms of fairness.
326 */
327 if (!drm_sched_can_queue(sched, entity)) {
328 spin_unlock(&rq->lock);
329 return ERR_PTR(-ENOSPC);
330 }
331
332 reinit_completion(&entity->entity_idle);
333 break;
334 }
335 }
336 spin_unlock(&rq->lock);
337
338 return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
339 }
340
341 /**
342 * drm_sched_run_job_queue - enqueue run-job work
343 * @sched: scheduler instance
344 */
drm_sched_run_job_queue(struct drm_gpu_scheduler * sched)345 static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
346 {
347 if (!READ_ONCE(sched->pause_submit))
348 queue_work(sched->submit_wq, &sched->work_run_job);
349 }
350
351 /**
352 * __drm_sched_run_free_queue - enqueue free-job work
353 * @sched: scheduler instance
354 */
__drm_sched_run_free_queue(struct drm_gpu_scheduler * sched)355 static void __drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
356 {
357 if (!READ_ONCE(sched->pause_submit))
358 queue_work(sched->submit_wq, &sched->work_free_job);
359 }
360
361 /**
362 * drm_sched_run_free_queue - enqueue free-job work if ready
363 * @sched: scheduler instance
364 */
drm_sched_run_free_queue(struct drm_gpu_scheduler * sched)365 static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
366 {
367 struct drm_sched_job *job;
368
369 job = list_first_entry_or_null(&sched->pending_list,
370 struct drm_sched_job, list);
371 if (job && dma_fence_is_signaled(&job->s_fence->finished))
372 __drm_sched_run_free_queue(sched);
373 }
374
drm_sched_run_free_queue_unlocked(struct drm_gpu_scheduler * sched)375 static void drm_sched_run_free_queue_unlocked(struct drm_gpu_scheduler *sched)
376 {
377 spin_lock(&sched->job_list_lock);
378 drm_sched_run_free_queue(sched);
379 spin_unlock(&sched->job_list_lock);
380 }
381
382 /**
383 * drm_sched_job_done - complete a job
384 * @s_job: pointer to the job which is done
385 *
386 * Finish the job's fence and resubmit the work items.
387 */
drm_sched_job_done(struct drm_sched_job * s_job,int result)388 static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
389 {
390 struct drm_sched_fence *s_fence = s_job->s_fence;
391 struct drm_gpu_scheduler *sched = s_fence->sched;
392
393 atomic_sub(s_job->credits, &sched->credit_count);
394 atomic_dec(sched->score);
395
396 trace_drm_sched_job_done(s_fence);
397
398 dma_fence_get(&s_fence->finished);
399 drm_sched_fence_finished(s_fence, result);
400 dma_fence_put(&s_fence->finished);
401 __drm_sched_run_free_queue(sched);
402 }
403
404 /**
405 * drm_sched_job_done_cb - the callback for a done job
406 * @f: fence
407 * @cb: fence callbacks
408 */
drm_sched_job_done_cb(struct dma_fence * f,struct dma_fence_cb * cb)409 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
410 {
411 struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
412
413 drm_sched_job_done(s_job, f->error);
414 }
415
416 /**
417 * drm_sched_start_timeout - start timeout for reset worker
418 *
419 * @sched: scheduler instance to start the worker for
420 *
421 * Start the timeout for the given scheduler.
422 */
drm_sched_start_timeout(struct drm_gpu_scheduler * sched)423 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
424 {
425 lockdep_assert_held(&sched->job_list_lock);
426
427 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
428 !list_empty(&sched->pending_list))
429 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
430 }
431
drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler * sched)432 static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
433 {
434 spin_lock(&sched->job_list_lock);
435 drm_sched_start_timeout(sched);
436 spin_unlock(&sched->job_list_lock);
437 }
438
439 /**
440 * drm_sched_tdr_queue_imm: - immediately start job timeout handler
441 *
442 * @sched: scheduler for which the timeout handling should be started.
443 *
444 * Start timeout handling immediately for the named scheduler.
445 */
drm_sched_tdr_queue_imm(struct drm_gpu_scheduler * sched)446 void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched)
447 {
448 spin_lock(&sched->job_list_lock);
449 sched->timeout = 0;
450 drm_sched_start_timeout(sched);
451 spin_unlock(&sched->job_list_lock);
452 }
453 EXPORT_SYMBOL(drm_sched_tdr_queue_imm);
454
455 /**
456 * drm_sched_fault - immediately start timeout handler
457 *
458 * @sched: scheduler where the timeout handling should be started.
459 *
460 * Start timeout handling immediately when the driver detects a hardware fault.
461 */
drm_sched_fault(struct drm_gpu_scheduler * sched)462 void drm_sched_fault(struct drm_gpu_scheduler *sched)
463 {
464 if (sched->timeout_wq)
465 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
466 }
467 EXPORT_SYMBOL(drm_sched_fault);
468
469 /**
470 * drm_sched_suspend_timeout - Suspend scheduler job timeout
471 *
472 * @sched: scheduler instance for which to suspend the timeout
473 *
474 * Suspend the delayed work timeout for the scheduler. This is done by
475 * modifying the delayed work timeout to an arbitrary large value,
476 * MAX_SCHEDULE_TIMEOUT in this case.
477 *
478 * Returns the timeout remaining
479 *
480 */
drm_sched_suspend_timeout(struct drm_gpu_scheduler * sched)481 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
482 {
483 unsigned long sched_timeout, now = jiffies;
484
485 sched_timeout = sched->work_tdr.timer.expires;
486
487 /*
488 * Modify the timeout to an arbitrarily large value. This also prevents
489 * the timeout to be restarted when new submissions arrive
490 */
491 if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
492 && time_after(sched_timeout, now))
493 return sched_timeout - now;
494 else
495 return sched->timeout;
496 }
497 EXPORT_SYMBOL(drm_sched_suspend_timeout);
498
499 /**
500 * drm_sched_resume_timeout - Resume scheduler job timeout
501 *
502 * @sched: scheduler instance for which to resume the timeout
503 * @remaining: remaining timeout
504 *
505 * Resume the delayed work timeout for the scheduler.
506 */
drm_sched_resume_timeout(struct drm_gpu_scheduler * sched,unsigned long remaining)507 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
508 unsigned long remaining)
509 {
510 spin_lock(&sched->job_list_lock);
511
512 if (list_empty(&sched->pending_list))
513 cancel_delayed_work(&sched->work_tdr);
514 else
515 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
516
517 spin_unlock(&sched->job_list_lock);
518 }
519 EXPORT_SYMBOL(drm_sched_resume_timeout);
520
drm_sched_job_begin(struct drm_sched_job * s_job)521 static void drm_sched_job_begin(struct drm_sched_job *s_job)
522 {
523 struct drm_gpu_scheduler *sched = s_job->sched;
524
525 spin_lock(&sched->job_list_lock);
526 list_add_tail(&s_job->list, &sched->pending_list);
527 drm_sched_start_timeout(sched);
528 spin_unlock(&sched->job_list_lock);
529 }
530
531 /**
532 * drm_sched_job_reinsert_on_false_timeout - reinsert the job on a false timeout
533 * @sched: scheduler instance
534 * @job: job to be reinserted on the pending list
535 *
536 * In the case of a "false timeout" - when a timeout occurs but the GPU isn't
537 * hung and is making progress, the scheduler must reinsert the job back into
538 * @sched->pending_list. Otherwise, the job and its resources won't be freed
539 * through the &struct drm_sched_backend_ops.free_job callback.
540 *
541 * This function must be used in "false timeout" cases only.
542 */
drm_sched_job_reinsert_on_false_timeout(struct drm_gpu_scheduler * sched,struct drm_sched_job * job)543 static void drm_sched_job_reinsert_on_false_timeout(struct drm_gpu_scheduler *sched,
544 struct drm_sched_job *job)
545 {
546 spin_lock(&sched->job_list_lock);
547 list_add(&job->list, &sched->pending_list);
548
549 /* After reinserting the job, the scheduler enqueues the free-job work
550 * again if ready. Otherwise, a signaled job could be added to the
551 * pending list, but never freed.
552 */
553 drm_sched_run_free_queue(sched);
554 spin_unlock(&sched->job_list_lock);
555 }
556
drm_sched_job_timedout(struct work_struct * work)557 static void drm_sched_job_timedout(struct work_struct *work)
558 {
559 struct drm_gpu_scheduler *sched;
560 struct drm_sched_job *job;
561 enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_RESET;
562
563 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
564
565 /* Protects against concurrent deletion in drm_sched_get_finished_job */
566 spin_lock(&sched->job_list_lock);
567 job = list_first_entry_or_null(&sched->pending_list,
568 struct drm_sched_job, list);
569
570 if (job) {
571 /*
572 * Remove the bad job so it cannot be freed by a concurrent
573 * &struct drm_sched_backend_ops.free_job. It will be
574 * reinserted after the scheduler's work items have been
575 * cancelled, at which point it's safe.
576 */
577 list_del_init(&job->list);
578 spin_unlock(&sched->job_list_lock);
579
580 status = job->sched->ops->timedout_job(job);
581
582 /*
583 * Guilty job did complete and hence needs to be manually removed
584 * See drm_sched_stop doc.
585 */
586 if (sched->free_guilty) {
587 job->sched->ops->free_job(job);
588 sched->free_guilty = false;
589 }
590
591 if (status == DRM_GPU_SCHED_STAT_NO_HANG)
592 drm_sched_job_reinsert_on_false_timeout(sched, job);
593 } else {
594 spin_unlock(&sched->job_list_lock);
595 }
596
597 if (status != DRM_GPU_SCHED_STAT_ENODEV)
598 drm_sched_start_timeout_unlocked(sched);
599 }
600
601 /**
602 * drm_sched_stop - stop the scheduler
603 *
604 * @sched: scheduler instance
605 * @bad: job which caused the time out
606 *
607 * Stop the scheduler and also removes and frees all completed jobs.
608 * Note: bad job will not be freed as it might be used later and so it's
609 * callers responsibility to release it manually if it's not part of the
610 * pending list any more.
611 *
612 * This function is typically used for reset recovery (see the docu of
613 * drm_sched_backend_ops.timedout_job() for details). Do not call it for
614 * scheduler teardown, i.e., before calling drm_sched_fini().
615 *
616 * As it's only used for reset recovery, drivers must not call this function
617 * in their &struct drm_sched_backend_ops.timedout_job callback when they
618 * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG.
619 */
drm_sched_stop(struct drm_gpu_scheduler * sched,struct drm_sched_job * bad)620 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
621 {
622 struct drm_sched_job *s_job, *tmp;
623
624 drm_sched_wqueue_stop(sched);
625
626 /*
627 * Reinsert back the bad job here - now it's safe as
628 * drm_sched_get_finished_job() cannot race against us and release the
629 * bad job at this point - we parked (waited for) any in progress
630 * (earlier) cleanups and drm_sched_get_finished_job() will not be
631 * called now until the scheduler's work items are submitted again.
632 */
633 if (bad && bad->sched == sched)
634 /*
635 * Add at the head of the queue to reflect it was the earliest
636 * job extracted.
637 */
638 list_add(&bad->list, &sched->pending_list);
639
640 /*
641 * Iterate the job list from later to earlier one and either deactive
642 * their HW callbacks or remove them from pending list if they already
643 * signaled.
644 * This iteration is thread safe as the scheduler's work items have been
645 * cancelled.
646 */
647 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
648 list) {
649 if (s_job->s_fence->parent &&
650 dma_fence_remove_callback(s_job->s_fence->parent,
651 &s_job->cb)) {
652 dma_fence_put(s_job->s_fence->parent);
653 s_job->s_fence->parent = NULL;
654 atomic_sub(s_job->credits, &sched->credit_count);
655 } else {
656 /*
657 * remove job from pending_list.
658 * Locking here is for concurrent resume timeout
659 */
660 spin_lock(&sched->job_list_lock);
661 list_del_init(&s_job->list);
662 spin_unlock(&sched->job_list_lock);
663
664 /*
665 * Wait for job's HW fence callback to finish using s_job
666 * before releasing it.
667 *
668 * Job is still alive so fence refcount at least 1
669 */
670 dma_fence_wait(&s_job->s_fence->finished, false);
671
672 /*
673 * We must keep bad job alive for later use during
674 * recovery by some of the drivers but leave a hint
675 * that the guilty job must be released.
676 */
677 if (bad != s_job)
678 sched->ops->free_job(s_job);
679 else
680 sched->free_guilty = true;
681 }
682 }
683
684 /*
685 * Stop pending timer in flight as we rearm it in drm_sched_start. This
686 * avoids the pending timeout work in progress to fire right away after
687 * this TDR finished and before the newly restarted jobs had a
688 * chance to complete.
689 */
690 cancel_delayed_work(&sched->work_tdr);
691 }
692 EXPORT_SYMBOL(drm_sched_stop);
693
694 /**
695 * drm_sched_start - recover jobs after a reset
696 *
697 * @sched: scheduler instance
698 * @errno: error to set on the pending fences
699 *
700 * This function is typically used for reset recovery (see the docu of
701 * drm_sched_backend_ops.timedout_job() for details). Do not call it for
702 * scheduler startup. The scheduler itself is fully operational after
703 * drm_sched_init() succeeded.
704 *
705 * As it's only used for reset recovery, drivers must not call this function
706 * in their &struct drm_sched_backend_ops.timedout_job callback when they
707 * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG.
708 */
drm_sched_start(struct drm_gpu_scheduler * sched,int errno)709 void drm_sched_start(struct drm_gpu_scheduler *sched, int errno)
710 {
711 struct drm_sched_job *s_job, *tmp;
712
713 /*
714 * Locking the list is not required here as the scheduler's work items
715 * are currently not running, so no new jobs are being inserted or
716 * removed. Also concurrent GPU recovers can't run in parallel.
717 */
718 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
719 struct dma_fence *fence = s_job->s_fence->parent;
720
721 atomic_add(s_job->credits, &sched->credit_count);
722
723 if (!fence) {
724 drm_sched_job_done(s_job, errno ?: -ECANCELED);
725 continue;
726 }
727
728 if (dma_fence_add_callback(fence, &s_job->cb,
729 drm_sched_job_done_cb))
730 drm_sched_job_done(s_job, fence->error ?: errno);
731 }
732
733 drm_sched_start_timeout_unlocked(sched);
734 drm_sched_wqueue_start(sched);
735 }
736 EXPORT_SYMBOL(drm_sched_start);
737
738 /**
739 * drm_sched_resubmit_jobs - Deprecated, don't use in new code!
740 *
741 * @sched: scheduler instance
742 *
743 * Re-submitting jobs was a concept AMD came up as cheap way to implement
744 * recovery after a job timeout.
745 *
746 * This turned out to be not working very well. First of all there are many
747 * problem with the dma_fence implementation and requirements. Either the
748 * implementation is risking deadlocks with core memory management or violating
749 * documented implementation details of the dma_fence object.
750 *
751 * Drivers can still save and restore their state for recovery operations, but
752 * we shouldn't make this a general scheduler feature around the dma_fence
753 * interface.
754 */
drm_sched_resubmit_jobs(struct drm_gpu_scheduler * sched)755 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
756 {
757 struct drm_sched_job *s_job, *tmp;
758 uint64_t guilty_context;
759 bool found_guilty = false;
760 struct dma_fence *fence;
761
762 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
763 struct drm_sched_fence *s_fence = s_job->s_fence;
764
765 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
766 found_guilty = true;
767 guilty_context = s_job->s_fence->scheduled.context;
768 }
769
770 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
771 dma_fence_set_error(&s_fence->finished, -ECANCELED);
772
773 fence = sched->ops->run_job(s_job);
774
775 if (IS_ERR_OR_NULL(fence)) {
776 if (IS_ERR(fence))
777 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
778
779 s_job->s_fence->parent = NULL;
780 } else {
781
782 s_job->s_fence->parent = dma_fence_get(fence);
783
784 /* Drop for orignal kref_init */
785 dma_fence_put(fence);
786 }
787 }
788 }
789 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
790
791 /**
792 * drm_sched_job_init - init a scheduler job
793 * @job: scheduler job to init
794 * @entity: scheduler entity to use
795 * @credits: the number of credits this job contributes to the schedulers
796 * credit limit
797 * @owner: job owner for debugging
798 * @drm_client_id: &struct drm_file.client_id of the owner (used by trace
799 * events)
800 *
801 * Refer to drm_sched_entity_push_job() documentation
802 * for locking considerations.
803 *
804 * Drivers must make sure drm_sched_job_cleanup() if this function returns
805 * successfully, even when @job is aborted before drm_sched_job_arm() is called.
806 *
807 * Note that this function does not assign a valid value to each struct member
808 * of struct drm_sched_job. Take a look at that struct's documentation to see
809 * who sets which struct member with what lifetime.
810 *
811 * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
812 * has died, which can mean that there's no valid runqueue for a @entity.
813 * This function returns -ENOENT in this case (which probably should be -EIO as
814 * a more meanigful return value).
815 *
816 * Returns 0 for success, negative error code otherwise.
817 */
drm_sched_job_init(struct drm_sched_job * job,struct drm_sched_entity * entity,u32 credits,void * owner,uint64_t drm_client_id)818 int drm_sched_job_init(struct drm_sched_job *job,
819 struct drm_sched_entity *entity,
820 u32 credits, void *owner,
821 uint64_t drm_client_id)
822 {
823 if (!entity->rq) {
824 /* This will most likely be followed by missing frames
825 * or worse--a blank screen--leave a trail in the
826 * logs, so this can be debugged easier.
827 */
828 dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__);
829 return -ENOENT;
830 }
831
832 if (unlikely(!credits)) {
833 pr_err("*ERROR* %s: credits cannot be 0!\n", __func__);
834 return -EINVAL;
835 }
836
837 /*
838 * We don't know for sure how the user has allocated. Thus, zero the
839 * struct so that unallowed (i.e., too early) usage of pointers that
840 * this function does not set is guaranteed to lead to a NULL pointer
841 * exception instead of UB.
842 */
843 memset(job, 0, sizeof(*job));
844
845 job->entity = entity;
846 job->credits = credits;
847 job->s_fence = drm_sched_fence_alloc(entity, owner, drm_client_id);
848 if (!job->s_fence)
849 return -ENOMEM;
850
851 INIT_LIST_HEAD(&job->list);
852
853 xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
854
855 return 0;
856 }
857 EXPORT_SYMBOL(drm_sched_job_init);
858
859 /**
860 * drm_sched_job_arm - arm a scheduler job for execution
861 * @job: scheduler job to arm
862 *
863 * This arms a scheduler job for execution. Specifically it initializes the
864 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
865 * or other places that need to track the completion of this job. It also
866 * initializes sequence numbers, which are fundamental for fence ordering.
867 *
868 * Refer to drm_sched_entity_push_job() documentation for locking
869 * considerations.
870 *
871 * Once this function was called, you *must* submit @job with
872 * drm_sched_entity_push_job().
873 *
874 * This can only be called if drm_sched_job_init() succeeded.
875 */
drm_sched_job_arm(struct drm_sched_job * job)876 void drm_sched_job_arm(struct drm_sched_job *job)
877 {
878 struct drm_gpu_scheduler *sched;
879 struct drm_sched_entity *entity = job->entity;
880
881 BUG_ON(!entity);
882 drm_sched_entity_select_rq(entity);
883 sched = entity->rq->sched;
884
885 job->sched = sched;
886 job->s_priority = entity->priority;
887
888 drm_sched_fence_init(job->s_fence, job->entity);
889 }
890 EXPORT_SYMBOL(drm_sched_job_arm);
891
892 /**
893 * drm_sched_job_add_dependency - adds the fence as a job dependency
894 * @job: scheduler job to add the dependencies to
895 * @fence: the dma_fence to add to the list of dependencies.
896 *
897 * Note that @fence is consumed in both the success and error cases.
898 *
899 * Returns:
900 * 0 on success, or an error on failing to expand the array.
901 */
drm_sched_job_add_dependency(struct drm_sched_job * job,struct dma_fence * fence)902 int drm_sched_job_add_dependency(struct drm_sched_job *job,
903 struct dma_fence *fence)
904 {
905 struct dma_fence *entry;
906 unsigned long index;
907 u32 id = 0;
908 int ret;
909
910 if (!fence)
911 return 0;
912
913 /* Deduplicate if we already depend on a fence from the same context.
914 * This lets the size of the array of deps scale with the number of
915 * engines involved, rather than the number of BOs.
916 */
917 xa_for_each(&job->dependencies, index, entry) {
918 if (entry->context != fence->context)
919 continue;
920
921 if (dma_fence_is_later(fence, entry)) {
922 dma_fence_put(entry);
923 xa_store(&job->dependencies, index, fence, GFP_KERNEL);
924 } else {
925 dma_fence_put(fence);
926 }
927 return 0;
928 }
929
930 ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
931 if (ret != 0)
932 dma_fence_put(fence);
933
934 return ret;
935 }
936 EXPORT_SYMBOL(drm_sched_job_add_dependency);
937
938 /**
939 * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
940 * @job: scheduler job to add the dependencies to
941 * @file: drm file private pointer
942 * @handle: syncobj handle to lookup
943 * @point: timeline point
944 *
945 * This adds the fence matching the given syncobj to @job.
946 *
947 * Returns:
948 * 0 on success, or an error on failing to expand the array.
949 */
drm_sched_job_add_syncobj_dependency(struct drm_sched_job * job,struct drm_file * file,u32 handle,u32 point)950 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
951 struct drm_file *file,
952 u32 handle,
953 u32 point)
954 {
955 struct dma_fence *fence;
956 int ret;
957
958 ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
959 if (ret)
960 return ret;
961
962 return drm_sched_job_add_dependency(job, fence);
963 }
964 EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
965
966 /**
967 * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
968 * @job: scheduler job to add the dependencies to
969 * @resv: the dma_resv object to get the fences from
970 * @usage: the dma_resv_usage to use to filter the fences
971 *
972 * This adds all fences matching the given usage from @resv to @job.
973 * Must be called with the @resv lock held.
974 *
975 * Returns:
976 * 0 on success, or an error on failing to expand the array.
977 */
drm_sched_job_add_resv_dependencies(struct drm_sched_job * job,struct dma_resv * resv,enum dma_resv_usage usage)978 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
979 struct dma_resv *resv,
980 enum dma_resv_usage usage)
981 {
982 struct dma_resv_iter cursor;
983 struct dma_fence *fence;
984 int ret;
985
986 dma_resv_assert_held(resv);
987
988 dma_resv_for_each_fence(&cursor, resv, usage, fence) {
989 /* Make sure to grab an additional ref on the added fence */
990 dma_fence_get(fence);
991 ret = drm_sched_job_add_dependency(job, fence);
992 if (ret) {
993 dma_fence_put(fence);
994 return ret;
995 }
996 }
997 return 0;
998 }
999 EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
1000
1001 /**
1002 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
1003 * dependencies
1004 * @job: scheduler job to add the dependencies to
1005 * @obj: the gem object to add new dependencies from.
1006 * @write: whether the job might write the object (so we need to depend on
1007 * shared fences in the reservation object).
1008 *
1009 * This should be called after drm_gem_lock_reservations() on your array of
1010 * GEM objects used in the job but before updating the reservations with your
1011 * own fences.
1012 *
1013 * Returns:
1014 * 0 on success, or an error on failing to expand the array.
1015 */
drm_sched_job_add_implicit_dependencies(struct drm_sched_job * job,struct drm_gem_object * obj,bool write)1016 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
1017 struct drm_gem_object *obj,
1018 bool write)
1019 {
1020 return drm_sched_job_add_resv_dependencies(job, obj->resv,
1021 dma_resv_usage_rw(write));
1022 }
1023 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
1024
1025 /**
1026 * drm_sched_job_has_dependency - check whether fence is the job's dependency
1027 * @job: scheduler job to check
1028 * @fence: fence to look for
1029 *
1030 * Returns:
1031 * True if @fence is found within the job's dependencies, or otherwise false.
1032 */
drm_sched_job_has_dependency(struct drm_sched_job * job,struct dma_fence * fence)1033 bool drm_sched_job_has_dependency(struct drm_sched_job *job,
1034 struct dma_fence *fence)
1035 {
1036 struct dma_fence *f;
1037 unsigned long index;
1038
1039 xa_for_each(&job->dependencies, index, f) {
1040 if (f == fence)
1041 return true;
1042 }
1043
1044 return false;
1045 }
1046 EXPORT_SYMBOL(drm_sched_job_has_dependency);
1047
1048 /**
1049 * drm_sched_job_cleanup - clean up scheduler job resources
1050 * @job: scheduler job to clean up
1051 *
1052 * Cleans up the resources allocated with drm_sched_job_init().
1053 *
1054 * Drivers should call this from their error unwind code if @job is aborted
1055 * before drm_sched_job_arm() is called.
1056 *
1057 * drm_sched_job_arm() is a point of no return since it initializes the fences
1058 * and their sequence number etc. Once that function has been called, you *must*
1059 * submit it with drm_sched_entity_push_job() and cannot simply abort it by
1060 * calling drm_sched_job_cleanup().
1061 *
1062 * This function should be called in the &drm_sched_backend_ops.free_job callback.
1063 */
drm_sched_job_cleanup(struct drm_sched_job * job)1064 void drm_sched_job_cleanup(struct drm_sched_job *job)
1065 {
1066 struct dma_fence *fence;
1067 unsigned long index;
1068
1069 if (kref_read(&job->s_fence->finished.refcount)) {
1070 /* The job has been processed by the scheduler, i.e.,
1071 * drm_sched_job_arm() and drm_sched_entity_push_job() have
1072 * been called.
1073 */
1074 dma_fence_put(&job->s_fence->finished);
1075 } else {
1076 /* The job was aborted before it has been committed to be run;
1077 * notably, drm_sched_job_arm() has not been called.
1078 */
1079 drm_sched_fence_free(job->s_fence);
1080 }
1081
1082 job->s_fence = NULL;
1083
1084 xa_for_each(&job->dependencies, index, fence) {
1085 dma_fence_put(fence);
1086 }
1087 xa_destroy(&job->dependencies);
1088
1089 }
1090 EXPORT_SYMBOL(drm_sched_job_cleanup);
1091
1092 /**
1093 * drm_sched_wakeup - Wake up the scheduler if it is ready to queue
1094 * @sched: scheduler instance
1095 *
1096 * Wake up the scheduler if we can queue jobs.
1097 */
drm_sched_wakeup(struct drm_gpu_scheduler * sched)1098 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
1099 {
1100 drm_sched_run_job_queue(sched);
1101 }
1102
1103 /**
1104 * drm_sched_select_entity - Select next entity to process
1105 *
1106 * @sched: scheduler instance
1107 *
1108 * Return an entity to process or NULL if none are found.
1109 *
1110 * Note, that we break out of the for-loop when "entity" is non-null, which can
1111 * also be an error-pointer--this assures we don't process lower priority
1112 * run-queues. See comments in the respectively called functions.
1113 */
1114 static struct drm_sched_entity *
drm_sched_select_entity(struct drm_gpu_scheduler * sched)1115 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
1116 {
1117 struct drm_sched_entity *entity;
1118 int i;
1119
1120 /* Start with the highest priority.
1121 */
1122 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1123 entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
1124 drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
1125 drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
1126 if (entity)
1127 break;
1128 }
1129
1130 return IS_ERR(entity) ? NULL : entity;
1131 }
1132
1133 /**
1134 * drm_sched_get_finished_job - fetch the next finished job to be destroyed
1135 *
1136 * @sched: scheduler instance
1137 *
1138 * Returns the next finished job from the pending list (if there is one)
1139 * ready for it to be destroyed.
1140 */
1141 static struct drm_sched_job *
drm_sched_get_finished_job(struct drm_gpu_scheduler * sched)1142 drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
1143 {
1144 struct drm_sched_job *job, *next;
1145
1146 spin_lock(&sched->job_list_lock);
1147
1148 job = list_first_entry_or_null(&sched->pending_list,
1149 struct drm_sched_job, list);
1150
1151 if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
1152 /* remove job from pending_list */
1153 list_del_init(&job->list);
1154
1155 /* cancel this job's TO timer */
1156 cancel_delayed_work(&sched->work_tdr);
1157 /* make the scheduled timestamp more accurate */
1158 next = list_first_entry_or_null(&sched->pending_list,
1159 typeof(*next), list);
1160
1161 if (next) {
1162 if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
1163 &next->s_fence->scheduled.flags))
1164 next->s_fence->scheduled.timestamp =
1165 dma_fence_timestamp(&job->s_fence->finished);
1166 /* start TO timer for next job */
1167 drm_sched_start_timeout(sched);
1168 }
1169 } else {
1170 job = NULL;
1171 }
1172
1173 spin_unlock(&sched->job_list_lock);
1174
1175 return job;
1176 }
1177
1178 /**
1179 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
1180 * @sched_list: list of drm_gpu_schedulers
1181 * @num_sched_list: number of drm_gpu_schedulers in the sched_list
1182 *
1183 * Returns pointer of the sched with the least load or NULL if none of the
1184 * drm_gpu_schedulers are ready
1185 */
1186 struct drm_gpu_scheduler *
drm_sched_pick_best(struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list)1187 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
1188 unsigned int num_sched_list)
1189 {
1190 struct drm_gpu_scheduler *sched, *picked_sched = NULL;
1191 int i;
1192 unsigned int min_score = UINT_MAX, num_score;
1193
1194 for (i = 0; i < num_sched_list; ++i) {
1195 sched = sched_list[i];
1196
1197 if (!sched->ready) {
1198 DRM_WARN("scheduler %s is not ready, skipping",
1199 sched->name);
1200 continue;
1201 }
1202
1203 num_score = atomic_read(sched->score);
1204 if (num_score < min_score) {
1205 min_score = num_score;
1206 picked_sched = sched;
1207 }
1208 }
1209
1210 return picked_sched;
1211 }
1212 EXPORT_SYMBOL(drm_sched_pick_best);
1213
1214 /**
1215 * drm_sched_free_job_work - worker to call free_job
1216 *
1217 * @w: free job work
1218 */
drm_sched_free_job_work(struct work_struct * w)1219 static void drm_sched_free_job_work(struct work_struct *w)
1220 {
1221 struct drm_gpu_scheduler *sched =
1222 container_of(w, struct drm_gpu_scheduler, work_free_job);
1223 struct drm_sched_job *job;
1224
1225 job = drm_sched_get_finished_job(sched);
1226 if (job)
1227 sched->ops->free_job(job);
1228
1229 drm_sched_run_free_queue_unlocked(sched);
1230 drm_sched_run_job_queue(sched);
1231 }
1232
1233 /**
1234 * drm_sched_run_job_work - worker to call run_job
1235 *
1236 * @w: run job work
1237 */
drm_sched_run_job_work(struct work_struct * w)1238 static void drm_sched_run_job_work(struct work_struct *w)
1239 {
1240 struct drm_gpu_scheduler *sched =
1241 container_of(w, struct drm_gpu_scheduler, work_run_job);
1242 struct drm_sched_entity *entity;
1243 struct dma_fence *fence;
1244 struct drm_sched_fence *s_fence;
1245 struct drm_sched_job *sched_job;
1246 int r;
1247
1248 /* Find entity with a ready job */
1249 entity = drm_sched_select_entity(sched);
1250 if (!entity)
1251 return; /* No more work */
1252
1253 sched_job = drm_sched_entity_pop_job(entity);
1254 if (!sched_job) {
1255 complete_all(&entity->entity_idle);
1256 drm_sched_run_job_queue(sched);
1257 return;
1258 }
1259
1260 s_fence = sched_job->s_fence;
1261
1262 atomic_add(sched_job->credits, &sched->credit_count);
1263 drm_sched_job_begin(sched_job);
1264
1265 trace_drm_sched_job_run(sched_job, entity);
1266 /*
1267 * The run_job() callback must by definition return a fence whose
1268 * refcount has been incremented for the scheduler already.
1269 */
1270 fence = sched->ops->run_job(sched_job);
1271 complete_all(&entity->entity_idle);
1272 drm_sched_fence_scheduled(s_fence, fence);
1273
1274 if (!IS_ERR_OR_NULL(fence)) {
1275 r = dma_fence_add_callback(fence, &sched_job->cb,
1276 drm_sched_job_done_cb);
1277 if (r == -ENOENT)
1278 drm_sched_job_done(sched_job, fence->error);
1279 else if (r)
1280 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
1281
1282 dma_fence_put(fence);
1283 } else {
1284 drm_sched_job_done(sched_job, IS_ERR(fence) ?
1285 PTR_ERR(fence) : 0);
1286 }
1287
1288 wake_up(&sched->job_scheduled);
1289 drm_sched_run_job_queue(sched);
1290 }
1291
drm_sched_alloc_wq(const char * name)1292 static struct workqueue_struct *drm_sched_alloc_wq(const char *name)
1293 {
1294 #if (IS_ENABLED(CONFIG_LOCKDEP))
1295 static struct lockdep_map map = {
1296 .name = "drm_sched_lockdep_map"
1297 };
1298
1299 /*
1300 * Avoid leaking a lockdep map on each drm sched creation and
1301 * destruction by using a single lockdep map for all drm sched
1302 * allocated submit_wq.
1303 */
1304
1305 return alloc_ordered_workqueue_lockdep_map(name, WQ_MEM_RECLAIM, &map);
1306 #else
1307 return alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
1308 #endif
1309 }
1310
1311 /**
1312 * drm_sched_init - Init a gpu scheduler instance
1313 *
1314 * @sched: scheduler instance
1315 * @args: scheduler initialization arguments
1316 *
1317 * Return 0 on success, otherwise error code.
1318 */
drm_sched_init(struct drm_gpu_scheduler * sched,const struct drm_sched_init_args * args)1319 int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_args *args)
1320 {
1321 int i;
1322
1323 sched->ops = args->ops;
1324 sched->credit_limit = args->credit_limit;
1325 sched->name = args->name;
1326 sched->timeout = args->timeout;
1327 sched->hang_limit = args->hang_limit;
1328 sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_wq;
1329 sched->score = args->score ? args->score : &sched->_score;
1330 sched->dev = args->dev;
1331
1332 if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) {
1333 /* This is a gross violation--tell drivers what the problem is.
1334 */
1335 dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
1336 __func__);
1337 return -EINVAL;
1338 } else if (sched->sched_rq) {
1339 /* Not an error, but warn anyway so drivers can
1340 * fine-tune their DRM calling order, and return all
1341 * is good.
1342 */
1343 dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__);
1344 return 0;
1345 }
1346
1347 if (args->submit_wq) {
1348 sched->submit_wq = args->submit_wq;
1349 sched->own_submit_wq = false;
1350 } else {
1351 sched->submit_wq = drm_sched_alloc_wq(args->name);
1352 if (!sched->submit_wq)
1353 return -ENOMEM;
1354
1355 sched->own_submit_wq = true;
1356 }
1357
1358 sched->sched_rq = kmalloc_array(args->num_rqs, sizeof(*sched->sched_rq),
1359 GFP_KERNEL | __GFP_ZERO);
1360 if (!sched->sched_rq)
1361 goto Out_check_own;
1362 sched->num_rqs = args->num_rqs;
1363 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1364 sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
1365 if (!sched->sched_rq[i])
1366 goto Out_unroll;
1367 drm_sched_rq_init(sched, sched->sched_rq[i]);
1368 }
1369
1370 init_waitqueue_head(&sched->job_scheduled);
1371 INIT_LIST_HEAD(&sched->pending_list);
1372 spin_lock_init(&sched->job_list_lock);
1373 atomic_set(&sched->credit_count, 0);
1374 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1375 INIT_WORK(&sched->work_run_job, drm_sched_run_job_work);
1376 INIT_WORK(&sched->work_free_job, drm_sched_free_job_work);
1377 atomic_set(&sched->_score, 0);
1378 atomic64_set(&sched->job_id_count, 0);
1379 sched->pause_submit = false;
1380
1381 sched->ready = true;
1382 return 0;
1383 Out_unroll:
1384 for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
1385 kfree(sched->sched_rq[i]);
1386
1387 kfree(sched->sched_rq);
1388 sched->sched_rq = NULL;
1389 Out_check_own:
1390 if (sched->own_submit_wq)
1391 destroy_workqueue(sched->submit_wq);
1392 dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
1393 return -ENOMEM;
1394 }
1395 EXPORT_SYMBOL(drm_sched_init);
1396
drm_sched_cancel_remaining_jobs(struct drm_gpu_scheduler * sched)1397 static void drm_sched_cancel_remaining_jobs(struct drm_gpu_scheduler *sched)
1398 {
1399 struct drm_sched_job *job, *tmp;
1400
1401 /* All other accessors are stopped. No locking necessary. */
1402 list_for_each_entry_safe_reverse(job, tmp, &sched->pending_list, list) {
1403 sched->ops->cancel_job(job);
1404 list_del(&job->list);
1405 sched->ops->free_job(job);
1406 }
1407 }
1408
1409 /**
1410 * drm_sched_fini - Destroy a gpu scheduler
1411 *
1412 * @sched: scheduler instance
1413 *
1414 * Tears down and cleans up the scheduler.
1415 *
1416 * This stops submission of new jobs to the hardware through &struct
1417 * drm_sched_backend_ops.run_job. If &struct drm_sched_backend_ops.cancel_job
1418 * is implemented, all jobs will be canceled through it and afterwards cleaned
1419 * up through &struct drm_sched_backend_ops.free_job. If cancel_job is not
1420 * implemented, memory could leak.
1421 */
drm_sched_fini(struct drm_gpu_scheduler * sched)1422 void drm_sched_fini(struct drm_gpu_scheduler *sched)
1423 {
1424 struct drm_sched_entity *s_entity;
1425 int i;
1426
1427 drm_sched_wqueue_stop(sched);
1428
1429 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1430 struct drm_sched_rq *rq = sched->sched_rq[i];
1431
1432 spin_lock(&rq->lock);
1433 list_for_each_entry(s_entity, &rq->entities, list)
1434 /*
1435 * Prevents reinsertion and marks job_queue as idle,
1436 * it will be removed from the rq in drm_sched_entity_fini()
1437 * eventually
1438 */
1439 s_entity->stopped = true;
1440 spin_unlock(&rq->lock);
1441 kfree(sched->sched_rq[i]);
1442 }
1443
1444 /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1445 wake_up_all(&sched->job_scheduled);
1446
1447 /* Confirm no work left behind accessing device structures */
1448 cancel_delayed_work_sync(&sched->work_tdr);
1449
1450 /* Avoid memory leaks if supported by the driver. */
1451 if (sched->ops->cancel_job)
1452 drm_sched_cancel_remaining_jobs(sched);
1453
1454 if (sched->own_submit_wq)
1455 destroy_workqueue(sched->submit_wq);
1456 sched->ready = false;
1457 kfree(sched->sched_rq);
1458 sched->sched_rq = NULL;
1459
1460 if (!list_empty(&sched->pending_list))
1461 dev_warn(sched->dev, "Tearing down scheduler while jobs are pending!\n");
1462 }
1463 EXPORT_SYMBOL(drm_sched_fini);
1464
1465 /**
1466 * drm_sched_increase_karma - Update sched_entity guilty flag
1467 *
1468 * @bad: The job guilty of time out
1469 *
1470 * Increment on every hang caused by the 'bad' job. If this exceeds the hang
1471 * limit of the scheduler then the respective sched entity is marked guilty and
1472 * jobs from it will not be scheduled further
1473 */
drm_sched_increase_karma(struct drm_sched_job * bad)1474 void drm_sched_increase_karma(struct drm_sched_job *bad)
1475 {
1476 int i;
1477 struct drm_sched_entity *tmp;
1478 struct drm_sched_entity *entity;
1479 struct drm_gpu_scheduler *sched = bad->sched;
1480
1481 /* don't change @bad's karma if it's from KERNEL RQ,
1482 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1483 * corrupt but keep in mind that kernel jobs always considered good.
1484 */
1485 if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1486 atomic_inc(&bad->karma);
1487
1488 for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
1489 struct drm_sched_rq *rq = sched->sched_rq[i];
1490
1491 spin_lock(&rq->lock);
1492 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1493 if (bad->s_fence->scheduled.context ==
1494 entity->fence_context) {
1495 if (entity->guilty)
1496 atomic_set(entity->guilty, 1);
1497 break;
1498 }
1499 }
1500 spin_unlock(&rq->lock);
1501 if (&entity->list != &rq->entities)
1502 break;
1503 }
1504 }
1505 }
1506 EXPORT_SYMBOL(drm_sched_increase_karma);
1507
1508 /**
1509 * drm_sched_wqueue_ready - Is the scheduler ready for submission
1510 *
1511 * @sched: scheduler instance
1512 *
1513 * Returns true if submission is ready
1514 */
drm_sched_wqueue_ready(struct drm_gpu_scheduler * sched)1515 bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
1516 {
1517 return sched->ready;
1518 }
1519 EXPORT_SYMBOL(drm_sched_wqueue_ready);
1520
1521 /**
1522 * drm_sched_wqueue_stop - stop scheduler submission
1523 * @sched: scheduler instance
1524 *
1525 * Stops the scheduler from pulling new jobs from entities. It also stops
1526 * freeing jobs automatically through drm_sched_backend_ops.free_job().
1527 */
drm_sched_wqueue_stop(struct drm_gpu_scheduler * sched)1528 void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
1529 {
1530 WRITE_ONCE(sched->pause_submit, true);
1531 cancel_work_sync(&sched->work_run_job);
1532 cancel_work_sync(&sched->work_free_job);
1533 }
1534 EXPORT_SYMBOL(drm_sched_wqueue_stop);
1535
1536 /**
1537 * drm_sched_wqueue_start - start scheduler submission
1538 * @sched: scheduler instance
1539 *
1540 * Restarts the scheduler after drm_sched_wqueue_stop() has stopped it.
1541 *
1542 * This function is not necessary for 'conventional' startup. The scheduler is
1543 * fully operational after drm_sched_init() succeeded.
1544 */
drm_sched_wqueue_start(struct drm_gpu_scheduler * sched)1545 void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
1546 {
1547 WRITE_ONCE(sched->pause_submit, false);
1548 queue_work(sched->submit_wq, &sched->work_run_job);
1549 queue_work(sched->submit_wq, &sched->work_free_job);
1550 }
1551 EXPORT_SYMBOL(drm_sched_wqueue_start);
1552