1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 /**
25 * DOC: Overview
26 *
27 * The GPU scheduler provides entities which allow userspace to push jobs
28 * into software queues which are then scheduled on a hardware run queue.
29 * The software queues have a priority among them. The scheduler selects the entities
30 * from the run queue using a FIFO. The scheduler provides dependency handling
31 * features among jobs. The driver is supposed to provide callback functions for
32 * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
34 *
35 * The organisation of the scheduler is the following:
36 *
37 * 1. Each hw run queue has one scheduler
38 * 2. Each scheduler has multiple run queues with different priorities
39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40 * 3. Each scheduler run queue has a queue of entities to schedule
41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42 * the hardware.
43 *
44 * The jobs in an entity are always scheduled in the order in which they were pushed.
45 *
46 * Note that once a job was taken from the entities queue and pushed to the
47 * hardware, i.e. the pending queue, the entity must not be referenced anymore
48 * through the jobs entity pointer.
49 */
50
51 /**
52 * DOC: Flow Control
53 *
54 * The DRM GPU scheduler provides a flow control mechanism to regulate the rate
55 * in which the jobs fetched from scheduler entities are executed.
56 *
57 * In this context the &drm_gpu_scheduler keeps track of a driver specified
58 * credit limit representing the capacity of this scheduler and a credit count;
59 * every &drm_sched_job carries a driver specified number of credits.
60 *
61 * Once a job is executed (but not yet finished), the job's credits contribute
62 * to the scheduler's credit count until the job is finished. If by executing
63 * one more job the scheduler's credit count would exceed the scheduler's
64 * credit limit, the job won't be executed. Instead, the scheduler will wait
65 * until the credit count has decreased enough to not overflow its credit limit.
66 * This implies waiting for previously executed jobs.
67 */
68
69 #include <linux/export.h>
70 #include <linux/wait.h>
71 #include <linux/sched.h>
72 #include <linux/completion.h>
73 #include <linux/dma-resv.h>
74 #include <uapi/linux/sched/types.h>
75
76 #include <drm/drm_print.h>
77 #include <drm/drm_gem.h>
78 #include <drm/drm_syncobj.h>
79 #include <drm/gpu_scheduler.h>
80 #include <drm/spsc_queue.h>
81
82 #include "sched_internal.h"
83
84 #define CREATE_TRACE_POINTS
85 #include "gpu_scheduler_trace.h"
86
87 int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
88
89 /**
90 * DOC: sched_policy (int)
91 * Used to override default entities scheduling policy in a run queue.
92 */
93 MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
94 module_param_named(sched_policy, drm_sched_policy, int, 0444);
95
drm_sched_available_credits(struct drm_gpu_scheduler * sched)96 static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
97 {
98 u32 credits;
99
100 WARN_ON(check_sub_overflow(sched->credit_limit,
101 atomic_read(&sched->credit_count),
102 &credits));
103
104 return credits;
105 }
106
107 /**
108 * drm_sched_can_queue -- Can we queue more to the hardware?
109 * @sched: scheduler instance
110 * @entity: the scheduler entity
111 *
112 * Return true if we can push at least one more job from @entity, false
113 * otherwise.
114 */
drm_sched_can_queue(struct drm_gpu_scheduler * sched,struct drm_sched_entity * entity)115 static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
116 struct drm_sched_entity *entity)
117 {
118 struct drm_sched_job *s_job;
119
120 s_job = drm_sched_entity_queue_peek(entity);
121 if (!s_job)
122 return false;
123
124 /* If a job exceeds the credit limit, truncate it to the credit limit
125 * itself to guarantee forward progress.
126 */
127 if (s_job->credits > sched->credit_limit) {
128 dev_WARN(sched->dev,
129 "Jobs may not exceed the credit limit, truncate.\n");
130 s_job->credits = sched->credit_limit;
131 }
132
133 return drm_sched_available_credits(sched) >= s_job->credits;
134 }
135
drm_sched_entity_compare_before(struct rb_node * a,const struct rb_node * b)136 static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
137 const struct rb_node *b)
138 {
139 struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node);
140 struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node);
141
142 return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
143 }
144
drm_sched_rq_remove_fifo_locked(struct drm_sched_entity * entity,struct drm_sched_rq * rq)145 static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity,
146 struct drm_sched_rq *rq)
147 {
148 if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
149 rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
150 RB_CLEAR_NODE(&entity->rb_tree_node);
151 }
152 }
153
drm_sched_rq_update_fifo_locked(struct drm_sched_entity * entity,struct drm_sched_rq * rq,ktime_t ts)154 void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
155 struct drm_sched_rq *rq,
156 ktime_t ts)
157 {
158 /*
159 * Both locks need to be grabbed, one to protect from entity->rq change
160 * for entity from within concurrent drm_sched_entity_select_rq and the
161 * other to update the rb tree structure.
162 */
163 lockdep_assert_held(&entity->lock);
164 lockdep_assert_held(&rq->lock);
165
166 drm_sched_rq_remove_fifo_locked(entity, rq);
167
168 entity->oldest_job_waiting = ts;
169
170 rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root,
171 drm_sched_entity_compare_before);
172 }
173
174 /**
175 * drm_sched_rq_init - initialize a given run queue struct
176 *
177 * @sched: scheduler instance to associate with this run queue
178 * @rq: scheduler run queue
179 *
180 * Initializes a scheduler runqueue.
181 */
drm_sched_rq_init(struct drm_gpu_scheduler * sched,struct drm_sched_rq * rq)182 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
183 struct drm_sched_rq *rq)
184 {
185 spin_lock_init(&rq->lock);
186 INIT_LIST_HEAD(&rq->entities);
187 rq->rb_tree_root = RB_ROOT_CACHED;
188 rq->current_entity = NULL;
189 rq->sched = sched;
190 }
191
192 /**
193 * drm_sched_rq_add_entity - add an entity
194 *
195 * @rq: scheduler run queue
196 * @entity: scheduler entity
197 *
198 * Adds a scheduler entity to the run queue.
199 */
drm_sched_rq_add_entity(struct drm_sched_rq * rq,struct drm_sched_entity * entity)200 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
201 struct drm_sched_entity *entity)
202 {
203 lockdep_assert_held(&entity->lock);
204 lockdep_assert_held(&rq->lock);
205
206 if (!list_empty(&entity->list))
207 return;
208
209 atomic_inc(rq->sched->score);
210 list_add_tail(&entity->list, &rq->entities);
211 }
212
213 /**
214 * drm_sched_rq_remove_entity - remove an entity
215 *
216 * @rq: scheduler run queue
217 * @entity: scheduler entity
218 *
219 * Removes a scheduler entity from the run queue.
220 */
drm_sched_rq_remove_entity(struct drm_sched_rq * rq,struct drm_sched_entity * entity)221 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
222 struct drm_sched_entity *entity)
223 {
224 lockdep_assert_held(&entity->lock);
225
226 if (list_empty(&entity->list))
227 return;
228
229 spin_lock(&rq->lock);
230
231 atomic_dec(rq->sched->score);
232 list_del_init(&entity->list);
233
234 if (rq->current_entity == entity)
235 rq->current_entity = NULL;
236
237 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
238 drm_sched_rq_remove_fifo_locked(entity, rq);
239
240 spin_unlock(&rq->lock);
241 }
242
243 /**
244 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
245 *
246 * @sched: the gpu scheduler
247 * @rq: scheduler run queue to check.
248 *
249 * Try to find the next ready entity.
250 *
251 * Return an entity if one is found; return an error-pointer (!NULL) if an
252 * entity was ready, but the scheduler had insufficient credits to accommodate
253 * its job; return NULL, if no ready entity was found.
254 */
255 static struct drm_sched_entity *
drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler * sched,struct drm_sched_rq * rq)256 drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
257 struct drm_sched_rq *rq)
258 {
259 struct drm_sched_entity *entity;
260
261 spin_lock(&rq->lock);
262
263 entity = rq->current_entity;
264 if (entity) {
265 list_for_each_entry_continue(entity, &rq->entities, list) {
266 if (drm_sched_entity_is_ready(entity))
267 goto found;
268 }
269 }
270
271 list_for_each_entry(entity, &rq->entities, list) {
272 if (drm_sched_entity_is_ready(entity))
273 goto found;
274
275 if (entity == rq->current_entity)
276 break;
277 }
278
279 spin_unlock(&rq->lock);
280
281 return NULL;
282
283 found:
284 if (!drm_sched_can_queue(sched, entity)) {
285 /*
286 * If scheduler cannot take more jobs signal the caller to not
287 * consider lower priority queues.
288 */
289 entity = ERR_PTR(-ENOSPC);
290 } else {
291 rq->current_entity = entity;
292 reinit_completion(&entity->entity_idle);
293 }
294
295 spin_unlock(&rq->lock);
296
297 return entity;
298 }
299
300 /**
301 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
302 *
303 * @sched: the gpu scheduler
304 * @rq: scheduler run queue to check.
305 *
306 * Find oldest waiting ready entity.
307 *
308 * Return an entity if one is found; return an error-pointer (!NULL) if an
309 * entity was ready, but the scheduler had insufficient credits to accommodate
310 * its job; return NULL, if no ready entity was found.
311 */
312 static struct drm_sched_entity *
drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler * sched,struct drm_sched_rq * rq)313 drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched,
314 struct drm_sched_rq *rq)
315 {
316 struct rb_node *rb;
317
318 spin_lock(&rq->lock);
319 for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
320 struct drm_sched_entity *entity;
321
322 entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
323 if (drm_sched_entity_is_ready(entity)) {
324 /* If we can't queue yet, preserve the current entity in
325 * terms of fairness.
326 */
327 if (!drm_sched_can_queue(sched, entity)) {
328 spin_unlock(&rq->lock);
329 return ERR_PTR(-ENOSPC);
330 }
331
332 reinit_completion(&entity->entity_idle);
333 break;
334 }
335 }
336 spin_unlock(&rq->lock);
337
338 return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
339 }
340
341 /**
342 * drm_sched_run_job_queue - enqueue run-job work
343 * @sched: scheduler instance
344 */
drm_sched_run_job_queue(struct drm_gpu_scheduler * sched)345 static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
346 {
347 if (!drm_sched_is_stopped(sched))
348 queue_work(sched->submit_wq, &sched->work_run_job);
349 }
350
351 /**
352 * drm_sched_run_free_queue - enqueue free-job work
353 * @sched: scheduler instance
354 */
drm_sched_run_free_queue(struct drm_gpu_scheduler * sched)355 static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
356 {
357 if (!drm_sched_is_stopped(sched))
358 queue_work(sched->submit_wq, &sched->work_free_job);
359 }
360
361 /**
362 * drm_sched_job_done - complete a job
363 * @s_job: pointer to the job which is done
364 * @result: 0 on success, -ERRNO on error
365 *
366 * Finish the job's fence and resubmit the work items.
367 */
drm_sched_job_done(struct drm_sched_job * s_job,int result)368 static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
369 {
370 struct drm_sched_fence *s_fence = s_job->s_fence;
371 struct drm_gpu_scheduler *sched = s_fence->sched;
372
373 atomic_sub(s_job->credits, &sched->credit_count);
374 atomic_dec(sched->score);
375
376 trace_drm_sched_job_done(s_fence);
377
378 dma_fence_get(&s_fence->finished);
379 drm_sched_fence_finished(s_fence, result);
380 dma_fence_put(&s_fence->finished);
381 drm_sched_run_free_queue(sched);
382 }
383
384 /**
385 * drm_sched_job_done_cb - the callback for a done job
386 * @f: fence
387 * @cb: fence callbacks
388 */
drm_sched_job_done_cb(struct dma_fence * f,struct dma_fence_cb * cb)389 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
390 {
391 struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
392
393 drm_sched_job_done(s_job, f->error);
394 }
395
396 /**
397 * drm_sched_start_timeout - start timeout for reset worker
398 *
399 * @sched: scheduler instance to start the worker for
400 *
401 * Start the timeout for the given scheduler.
402 */
drm_sched_start_timeout(struct drm_gpu_scheduler * sched)403 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
404 {
405 lockdep_assert_held(&sched->job_list_lock);
406
407 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
408 !list_empty(&sched->pending_list))
409 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
410 }
411
drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler * sched)412 static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
413 {
414 spin_lock(&sched->job_list_lock);
415 drm_sched_start_timeout(sched);
416 spin_unlock(&sched->job_list_lock);
417 }
418
419 /**
420 * drm_sched_tdr_queue_imm: - immediately start job timeout handler
421 *
422 * @sched: scheduler for which the timeout handling should be started.
423 *
424 * Start timeout handling immediately for the named scheduler.
425 */
drm_sched_tdr_queue_imm(struct drm_gpu_scheduler * sched)426 void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched)
427 {
428 spin_lock(&sched->job_list_lock);
429 sched->timeout = 0;
430 drm_sched_start_timeout(sched);
431 spin_unlock(&sched->job_list_lock);
432 }
433 EXPORT_SYMBOL(drm_sched_tdr_queue_imm);
434
435 /**
436 * drm_sched_fault - immediately start timeout handler
437 *
438 * @sched: scheduler where the timeout handling should be started.
439 *
440 * Start timeout handling immediately when the driver detects a hardware fault.
441 */
drm_sched_fault(struct drm_gpu_scheduler * sched)442 void drm_sched_fault(struct drm_gpu_scheduler *sched)
443 {
444 if (sched->timeout_wq)
445 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
446 }
447 EXPORT_SYMBOL(drm_sched_fault);
448
449 /**
450 * drm_sched_suspend_timeout - Suspend scheduler job timeout
451 *
452 * @sched: scheduler instance for which to suspend the timeout
453 *
454 * Suspend the delayed work timeout for the scheduler. This is done by
455 * modifying the delayed work timeout to an arbitrary large value,
456 * MAX_SCHEDULE_TIMEOUT in this case.
457 *
458 * Returns the timeout remaining
459 *
460 */
drm_sched_suspend_timeout(struct drm_gpu_scheduler * sched)461 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
462 {
463 unsigned long sched_timeout, now = jiffies;
464
465 sched_timeout = sched->work_tdr.timer.expires;
466
467 /*
468 * Modify the timeout to an arbitrarily large value. This also prevents
469 * the timeout to be restarted when new submissions arrive
470 */
471 if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
472 && time_after(sched_timeout, now))
473 return sched_timeout - now;
474 else
475 return sched->timeout;
476 }
477 EXPORT_SYMBOL(drm_sched_suspend_timeout);
478
479 /**
480 * drm_sched_resume_timeout - Resume scheduler job timeout
481 *
482 * @sched: scheduler instance for which to resume the timeout
483 * @remaining: remaining timeout
484 *
485 * Resume the delayed work timeout for the scheduler.
486 */
drm_sched_resume_timeout(struct drm_gpu_scheduler * sched,unsigned long remaining)487 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
488 unsigned long remaining)
489 {
490 spin_lock(&sched->job_list_lock);
491
492 if (list_empty(&sched->pending_list))
493 cancel_delayed_work(&sched->work_tdr);
494 else
495 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
496
497 spin_unlock(&sched->job_list_lock);
498 }
499 EXPORT_SYMBOL(drm_sched_resume_timeout);
500
drm_sched_job_begin(struct drm_sched_job * s_job)501 static void drm_sched_job_begin(struct drm_sched_job *s_job)
502 {
503 struct drm_gpu_scheduler *sched = s_job->sched;
504
505 spin_lock(&sched->job_list_lock);
506 list_add_tail(&s_job->list, &sched->pending_list);
507 drm_sched_start_timeout(sched);
508 spin_unlock(&sched->job_list_lock);
509 }
510
511 /**
512 * drm_sched_job_reinsert_on_false_timeout - reinsert the job on a false timeout
513 * @sched: scheduler instance
514 * @job: job to be reinserted on the pending list
515 *
516 * In the case of a "false timeout" - when a timeout occurs but the GPU isn't
517 * hung and is making progress, the scheduler must reinsert the job back into
518 * @sched->pending_list. Otherwise, the job and its resources won't be freed
519 * through the &struct drm_sched_backend_ops.free_job callback.
520 *
521 * This function must be used in "false timeout" cases only.
522 */
drm_sched_job_reinsert_on_false_timeout(struct drm_gpu_scheduler * sched,struct drm_sched_job * job)523 static void drm_sched_job_reinsert_on_false_timeout(struct drm_gpu_scheduler *sched,
524 struct drm_sched_job *job)
525 {
526 spin_lock(&sched->job_list_lock);
527 list_add(&job->list, &sched->pending_list);
528
529 /* After reinserting the job, the scheduler enqueues the free-job work
530 * again if ready. Otherwise, a signaled job could be added to the
531 * pending list, but never freed.
532 */
533 drm_sched_run_free_queue(sched);
534 spin_unlock(&sched->job_list_lock);
535 }
536
drm_sched_job_timedout(struct work_struct * work)537 static void drm_sched_job_timedout(struct work_struct *work)
538 {
539 struct drm_gpu_scheduler *sched;
540 struct drm_sched_job *job;
541 enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_RESET;
542
543 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
544
545 /* Protects against concurrent deletion in drm_sched_get_finished_job */
546 spin_lock(&sched->job_list_lock);
547 job = list_first_entry_or_null(&sched->pending_list,
548 struct drm_sched_job, list);
549
550 if (job) {
551 /*
552 * Remove the bad job so it cannot be freed by a concurrent
553 * &struct drm_sched_backend_ops.free_job. It will be
554 * reinserted after the scheduler's work items have been
555 * cancelled, at which point it's safe.
556 */
557 list_del_init(&job->list);
558 spin_unlock(&sched->job_list_lock);
559
560 status = job->sched->ops->timedout_job(job);
561
562 /*
563 * Guilty job did complete and hence needs to be manually removed
564 * See drm_sched_stop doc.
565 */
566 if (sched->free_guilty) {
567 job->sched->ops->free_job(job);
568 sched->free_guilty = false;
569 }
570
571 if (status == DRM_GPU_SCHED_STAT_NO_HANG)
572 drm_sched_job_reinsert_on_false_timeout(sched, job);
573 } else {
574 spin_unlock(&sched->job_list_lock);
575 }
576
577 if (status != DRM_GPU_SCHED_STAT_ENODEV)
578 drm_sched_start_timeout_unlocked(sched);
579 }
580
581 /**
582 * drm_sched_stop - stop the scheduler
583 *
584 * @sched: scheduler instance
585 * @bad: job which caused the time out
586 *
587 * Stop the scheduler and also removes and frees all completed jobs.
588 * Note: bad job will not be freed as it might be used later and so it's
589 * callers responsibility to release it manually if it's not part of the
590 * pending list any more.
591 *
592 * This function is typically used for reset recovery (see the docu of
593 * drm_sched_backend_ops.timedout_job() for details). Do not call it for
594 * scheduler teardown, i.e., before calling drm_sched_fini().
595 *
596 * As it's only used for reset recovery, drivers must not call this function
597 * in their &struct drm_sched_backend_ops.timedout_job callback when they
598 * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG.
599 */
drm_sched_stop(struct drm_gpu_scheduler * sched,struct drm_sched_job * bad)600 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
601 {
602 struct drm_sched_job *s_job, *tmp;
603
604 drm_sched_wqueue_stop(sched);
605
606 /*
607 * Reinsert back the bad job here - now it's safe as
608 * drm_sched_get_finished_job() cannot race against us and release the
609 * bad job at this point - we parked (waited for) any in progress
610 * (earlier) cleanups and drm_sched_get_finished_job() will not be
611 * called now until the scheduler's work items are submitted again.
612 */
613 if (bad && bad->sched == sched)
614 /*
615 * Add at the head of the queue to reflect it was the earliest
616 * job extracted.
617 */
618 list_add(&bad->list, &sched->pending_list);
619
620 /*
621 * Iterate the job list from later to earlier one and either deactive
622 * their HW callbacks or remove them from pending list if they already
623 * signaled.
624 * This iteration is thread safe as the scheduler's work items have been
625 * cancelled.
626 */
627 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
628 list) {
629 if (s_job->s_fence->parent &&
630 dma_fence_remove_callback(s_job->s_fence->parent,
631 &s_job->cb)) {
632 dma_fence_put(s_job->s_fence->parent);
633 s_job->s_fence->parent = NULL;
634 atomic_sub(s_job->credits, &sched->credit_count);
635 } else {
636 /*
637 * remove job from pending_list.
638 * Locking here is for concurrent resume timeout
639 */
640 spin_lock(&sched->job_list_lock);
641 list_del_init(&s_job->list);
642 spin_unlock(&sched->job_list_lock);
643
644 /*
645 * Wait for job's HW fence callback to finish using s_job
646 * before releasing it.
647 *
648 * Job is still alive so fence refcount at least 1
649 */
650 dma_fence_wait(&s_job->s_fence->finished, false);
651
652 /*
653 * We must keep bad job alive for later use during
654 * recovery by some of the drivers but leave a hint
655 * that the guilty job must be released.
656 */
657 if (bad != s_job)
658 sched->ops->free_job(s_job);
659 else
660 sched->free_guilty = true;
661 }
662 }
663
664 /*
665 * Stop pending timer in flight as we rearm it in drm_sched_start. This
666 * avoids the pending timeout work in progress to fire right away after
667 * this TDR finished and before the newly restarted jobs had a
668 * chance to complete.
669 */
670 cancel_delayed_work(&sched->work_tdr);
671 }
672 EXPORT_SYMBOL(drm_sched_stop);
673
674 /**
675 * drm_sched_start - recover jobs after a reset
676 *
677 * @sched: scheduler instance
678 * @errno: error to set on the pending fences
679 *
680 * This function is typically used for reset recovery (see the docu of
681 * drm_sched_backend_ops.timedout_job() for details). Do not call it for
682 * scheduler startup. The scheduler itself is fully operational after
683 * drm_sched_init() succeeded.
684 *
685 * As it's only used for reset recovery, drivers must not call this function
686 * in their &struct drm_sched_backend_ops.timedout_job callback when they
687 * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG.
688 */
drm_sched_start(struct drm_gpu_scheduler * sched,int errno)689 void drm_sched_start(struct drm_gpu_scheduler *sched, int errno)
690 {
691 struct drm_sched_job *s_job, *tmp;
692
693 /*
694 * Locking the list is not required here as the scheduler's work items
695 * are currently not running, so no new jobs are being inserted or
696 * removed. Also concurrent GPU recovers can't run in parallel.
697 */
698 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
699 struct dma_fence *fence = s_job->s_fence->parent;
700
701 atomic_add(s_job->credits, &sched->credit_count);
702
703 if (!fence) {
704 drm_sched_job_done(s_job, errno ?: -ECANCELED);
705 continue;
706 }
707
708 if (dma_fence_add_callback(fence, &s_job->cb,
709 drm_sched_job_done_cb))
710 drm_sched_job_done(s_job, fence->error ?: errno);
711 }
712
713 drm_sched_start_timeout_unlocked(sched);
714 drm_sched_wqueue_start(sched);
715 }
716 EXPORT_SYMBOL(drm_sched_start);
717
718 /**
719 * drm_sched_resubmit_jobs - Deprecated, don't use in new code!
720 *
721 * @sched: scheduler instance
722 *
723 * Re-submitting jobs was a concept AMD came up as cheap way to implement
724 * recovery after a job timeout.
725 *
726 * This turned out to be not working very well. First of all there are many
727 * problem with the dma_fence implementation and requirements. Either the
728 * implementation is risking deadlocks with core memory management or violating
729 * documented implementation details of the dma_fence object.
730 *
731 * Drivers can still save and restore their state for recovery operations, but
732 * we shouldn't make this a general scheduler feature around the dma_fence
733 * interface. The suggested driver-side replacement is to use
734 * drm_sched_for_each_pending_job() after stopping the scheduler and implement
735 * their own recovery operations.
736 */
drm_sched_resubmit_jobs(struct drm_gpu_scheduler * sched)737 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
738 {
739 struct drm_sched_job *s_job, *tmp;
740 uint64_t guilty_context;
741 bool found_guilty = false;
742 struct dma_fence *fence;
743
744 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
745 struct drm_sched_fence *s_fence = s_job->s_fence;
746
747 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
748 found_guilty = true;
749 guilty_context = s_job->s_fence->scheduled.context;
750 }
751
752 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
753 dma_fence_set_error(&s_fence->finished, -ECANCELED);
754
755 fence = sched->ops->run_job(s_job);
756
757 if (IS_ERR_OR_NULL(fence)) {
758 if (IS_ERR(fence))
759 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
760
761 s_job->s_fence->parent = NULL;
762 } else {
763
764 s_job->s_fence->parent = dma_fence_get(fence);
765
766 /* Drop for orignal kref_init */
767 dma_fence_put(fence);
768 }
769 }
770 }
771 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
772
773 /**
774 * drm_sched_job_init - init a scheduler job
775 * @job: scheduler job to init
776 * @entity: scheduler entity to use
777 * @credits: the number of credits this job contributes to the schedulers
778 * credit limit
779 * @owner: job owner for debugging
780 * @drm_client_id: &struct drm_file.client_id of the owner (used by trace
781 * events)
782 *
783 * Refer to drm_sched_entity_push_job() documentation
784 * for locking considerations.
785 *
786 * Drivers must make sure drm_sched_job_cleanup() if this function returns
787 * successfully, even when @job is aborted before drm_sched_job_arm() is called.
788 *
789 * Note that this function does not assign a valid value to each struct member
790 * of struct drm_sched_job. Take a look at that struct's documentation to see
791 * who sets which struct member with what lifetime.
792 *
793 * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
794 * has died, which can mean that there's no valid runqueue for a @entity.
795 * This function returns -ENOENT in this case (which probably should be -EIO as
796 * a more meanigful return value).
797 *
798 * Returns 0 for success, negative error code otherwise.
799 */
drm_sched_job_init(struct drm_sched_job * job,struct drm_sched_entity * entity,u32 credits,void * owner,uint64_t drm_client_id)800 int drm_sched_job_init(struct drm_sched_job *job,
801 struct drm_sched_entity *entity,
802 u32 credits, void *owner,
803 uint64_t drm_client_id)
804 {
805 if (!entity->rq) {
806 /* This will most likely be followed by missing frames
807 * or worse--a blank screen--leave a trail in the
808 * logs, so this can be debugged easier.
809 */
810 dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__);
811 return -ENOENT;
812 }
813
814 if (unlikely(!credits)) {
815 pr_err("*ERROR* %s: credits cannot be 0!\n", __func__);
816 return -EINVAL;
817 }
818
819 /*
820 * We don't know for sure how the user has allocated. Thus, zero the
821 * struct so that unallowed (i.e., too early) usage of pointers that
822 * this function does not set is guaranteed to lead to a NULL pointer
823 * exception instead of UB.
824 */
825 memset(job, 0, sizeof(*job));
826
827 job->entity = entity;
828 job->credits = credits;
829 job->s_fence = drm_sched_fence_alloc(entity, owner, drm_client_id);
830 if (!job->s_fence)
831 return -ENOMEM;
832
833 INIT_LIST_HEAD(&job->list);
834
835 xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
836
837 return 0;
838 }
839 EXPORT_SYMBOL(drm_sched_job_init);
840
841 /**
842 * drm_sched_job_arm - arm a scheduler job for execution
843 * @job: scheduler job to arm
844 *
845 * This arms a scheduler job for execution. Specifically it initializes the
846 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
847 * or other places that need to track the completion of this job. It also
848 * initializes sequence numbers, which are fundamental for fence ordering.
849 *
850 * Refer to drm_sched_entity_push_job() documentation for locking
851 * considerations.
852 *
853 * Once this function was called, you *must* submit @job with
854 * drm_sched_entity_push_job().
855 *
856 * This can only be called if drm_sched_job_init() succeeded.
857 */
drm_sched_job_arm(struct drm_sched_job * job)858 void drm_sched_job_arm(struct drm_sched_job *job)
859 {
860 struct drm_gpu_scheduler *sched;
861 struct drm_sched_entity *entity = job->entity;
862
863 BUG_ON(!entity);
864 drm_sched_entity_select_rq(entity);
865 sched = entity->rq->sched;
866
867 job->sched = sched;
868 job->s_priority = entity->priority;
869
870 drm_sched_fence_init(job->s_fence, job->entity);
871 }
872 EXPORT_SYMBOL(drm_sched_job_arm);
873
874 /**
875 * drm_sched_job_add_dependency - adds the fence as a job dependency
876 * @job: scheduler job to add the dependencies to
877 * @fence: the dma_fence to add to the list of dependencies.
878 *
879 * Note that @fence is consumed in both the success and error cases.
880 *
881 * Returns:
882 * 0 on success, or an error on failing to expand the array.
883 */
drm_sched_job_add_dependency(struct drm_sched_job * job,struct dma_fence * fence)884 int drm_sched_job_add_dependency(struct drm_sched_job *job,
885 struct dma_fence *fence)
886 {
887 struct dma_fence *entry;
888 unsigned long index;
889 u32 id = 0;
890 int ret;
891
892 if (!fence)
893 return 0;
894
895 /* Deduplicate if we already depend on a fence from the same context.
896 * This lets the size of the array of deps scale with the number of
897 * engines involved, rather than the number of BOs.
898 */
899 xa_for_each(&job->dependencies, index, entry) {
900 if (entry->context != fence->context)
901 continue;
902
903 if (dma_fence_is_later(fence, entry)) {
904 dma_fence_put(entry);
905 xa_store(&job->dependencies, index, fence, GFP_KERNEL);
906 } else {
907 dma_fence_put(fence);
908 }
909 return 0;
910 }
911
912 ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
913 if (ret != 0)
914 dma_fence_put(fence);
915
916 return ret;
917 }
918 EXPORT_SYMBOL(drm_sched_job_add_dependency);
919
920 /**
921 * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
922 * @job: scheduler job to add the dependencies to
923 * @file: drm file private pointer
924 * @handle: syncobj handle to lookup
925 * @point: timeline point
926 *
927 * This adds the fence matching the given syncobj to @job.
928 *
929 * Returns:
930 * 0 on success, or an error on failing to expand the array.
931 */
drm_sched_job_add_syncobj_dependency(struct drm_sched_job * job,struct drm_file * file,u32 handle,u32 point)932 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
933 struct drm_file *file,
934 u32 handle,
935 u32 point)
936 {
937 struct dma_fence *fence;
938 int ret;
939
940 ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
941 if (ret)
942 return ret;
943
944 return drm_sched_job_add_dependency(job, fence);
945 }
946 EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
947
948 /**
949 * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
950 * @job: scheduler job to add the dependencies to
951 * @resv: the dma_resv object to get the fences from
952 * @usage: the dma_resv_usage to use to filter the fences
953 *
954 * This adds all fences matching the given usage from @resv to @job.
955 * Must be called with the @resv lock held.
956 *
957 * Returns:
958 * 0 on success, or an error on failing to expand the array.
959 */
drm_sched_job_add_resv_dependencies(struct drm_sched_job * job,struct dma_resv * resv,enum dma_resv_usage usage)960 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
961 struct dma_resv *resv,
962 enum dma_resv_usage usage)
963 {
964 struct dma_resv_iter cursor;
965 struct dma_fence *fence;
966 int ret;
967
968 dma_resv_assert_held(resv);
969
970 dma_resv_for_each_fence(&cursor, resv, usage, fence) {
971 /*
972 * As drm_sched_job_add_dependency always consumes the fence
973 * reference (even when it fails), and dma_resv_for_each_fence
974 * is not obtaining one, we need to grab one before calling.
975 */
976 ret = drm_sched_job_add_dependency(job, dma_fence_get(fence));
977 if (ret)
978 return ret;
979 }
980 return 0;
981 }
982 EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
983
984 /**
985 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
986 * dependencies
987 * @job: scheduler job to add the dependencies to
988 * @obj: the gem object to add new dependencies from.
989 * @write: whether the job might write the object (so we need to depend on
990 * shared fences in the reservation object).
991 *
992 * This should be called after drm_gem_lock_reservations() on your array of
993 * GEM objects used in the job but before updating the reservations with your
994 * own fences.
995 *
996 * Returns:
997 * 0 on success, or an error on failing to expand the array.
998 */
drm_sched_job_add_implicit_dependencies(struct drm_sched_job * job,struct drm_gem_object * obj,bool write)999 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
1000 struct drm_gem_object *obj,
1001 bool write)
1002 {
1003 return drm_sched_job_add_resv_dependencies(job, obj->resv,
1004 dma_resv_usage_rw(write));
1005 }
1006 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
1007
1008 /**
1009 * drm_sched_job_has_dependency - check whether fence is the job's dependency
1010 * @job: scheduler job to check
1011 * @fence: fence to look for
1012 *
1013 * Returns:
1014 * True if @fence is found within the job's dependencies, or otherwise false.
1015 */
drm_sched_job_has_dependency(struct drm_sched_job * job,struct dma_fence * fence)1016 bool drm_sched_job_has_dependency(struct drm_sched_job *job,
1017 struct dma_fence *fence)
1018 {
1019 struct dma_fence *f;
1020 unsigned long index;
1021
1022 xa_for_each(&job->dependencies, index, f) {
1023 if (f == fence)
1024 return true;
1025 }
1026
1027 return false;
1028 }
1029 EXPORT_SYMBOL(drm_sched_job_has_dependency);
1030
1031 /**
1032 * drm_sched_job_cleanup - clean up scheduler job resources
1033 * @job: scheduler job to clean up
1034 *
1035 * Cleans up the resources allocated with drm_sched_job_init().
1036 *
1037 * Drivers should call this from their error unwind code if @job is aborted
1038 * before drm_sched_job_arm() is called.
1039 *
1040 * drm_sched_job_arm() is a point of no return since it initializes the fences
1041 * and their sequence number etc. Once that function has been called, you *must*
1042 * submit it with drm_sched_entity_push_job() and cannot simply abort it by
1043 * calling drm_sched_job_cleanup().
1044 *
1045 * This function should be called in the &drm_sched_backend_ops.free_job callback.
1046 */
drm_sched_job_cleanup(struct drm_sched_job * job)1047 void drm_sched_job_cleanup(struct drm_sched_job *job)
1048 {
1049 struct dma_fence *fence;
1050 unsigned long index;
1051
1052 if (kref_read(&job->s_fence->finished.refcount)) {
1053 /* The job has been processed by the scheduler, i.e.,
1054 * drm_sched_job_arm() and drm_sched_entity_push_job() have
1055 * been called.
1056 */
1057 dma_fence_put(&job->s_fence->finished);
1058 } else {
1059 /* The job was aborted before it has been committed to be run;
1060 * notably, drm_sched_job_arm() has not been called.
1061 */
1062 drm_sched_fence_free(job->s_fence);
1063 }
1064
1065 job->s_fence = NULL;
1066
1067 xa_for_each(&job->dependencies, index, fence) {
1068 dma_fence_put(fence);
1069 }
1070 xa_destroy(&job->dependencies);
1071
1072 }
1073 EXPORT_SYMBOL(drm_sched_job_cleanup);
1074
1075 /**
1076 * drm_sched_wakeup - Wake up the scheduler if it is ready to queue
1077 * @sched: scheduler instance
1078 *
1079 * Wake up the scheduler if we can queue jobs.
1080 */
drm_sched_wakeup(struct drm_gpu_scheduler * sched)1081 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
1082 {
1083 drm_sched_run_job_queue(sched);
1084 }
1085
1086 /**
1087 * drm_sched_select_entity - Select next entity to process
1088 *
1089 * @sched: scheduler instance
1090 *
1091 * Return an entity to process or NULL if none are found.
1092 *
1093 * Note, that we break out of the for-loop when "entity" is non-null, which can
1094 * also be an error-pointer--this assures we don't process lower priority
1095 * run-queues. See comments in the respectively called functions.
1096 */
1097 static struct drm_sched_entity *
drm_sched_select_entity(struct drm_gpu_scheduler * sched)1098 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
1099 {
1100 struct drm_sched_entity *entity;
1101 int i;
1102
1103 /* Start with the highest priority.
1104 */
1105 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1106 entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
1107 drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
1108 drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
1109 if (entity)
1110 break;
1111 }
1112
1113 return IS_ERR(entity) ? NULL : entity;
1114 }
1115
1116 /**
1117 * drm_sched_get_finished_job - fetch the next finished job to be destroyed
1118 *
1119 * @sched: scheduler instance
1120 * @have_more: are there more finished jobs on the list
1121 *
1122 * Informs the caller through @have_more whether there are more finished jobs
1123 * besides the returned one.
1124 *
1125 * Returns the next finished job from the pending list (if there is one)
1126 * ready for it to be destroyed.
1127 */
1128 static struct drm_sched_job *
drm_sched_get_finished_job(struct drm_gpu_scheduler * sched,bool * have_more)1129 drm_sched_get_finished_job(struct drm_gpu_scheduler *sched, bool *have_more)
1130 {
1131 struct drm_sched_job *job, *next;
1132
1133 spin_lock(&sched->job_list_lock);
1134
1135 job = list_first_entry_or_null(&sched->pending_list,
1136 struct drm_sched_job, list);
1137 if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
1138 /* remove job from pending_list */
1139 list_del_init(&job->list);
1140
1141 /* cancel this job's TO timer */
1142 cancel_delayed_work(&sched->work_tdr);
1143
1144 *have_more = false;
1145 next = list_first_entry_or_null(&sched->pending_list,
1146 typeof(*next), list);
1147 if (next) {
1148 /* make the scheduled timestamp more accurate */
1149 if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
1150 &next->s_fence->scheduled.flags))
1151 next->s_fence->scheduled.timestamp =
1152 dma_fence_timestamp(&job->s_fence->finished);
1153
1154 *have_more = dma_fence_is_signaled(&next->s_fence->finished);
1155
1156 /* start TO timer for next job */
1157 drm_sched_start_timeout(sched);
1158 }
1159 } else {
1160 job = NULL;
1161 }
1162
1163 spin_unlock(&sched->job_list_lock);
1164
1165 return job;
1166 }
1167
1168 /**
1169 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
1170 * @sched_list: list of drm_gpu_schedulers
1171 * @num_sched_list: number of drm_gpu_schedulers in the sched_list
1172 *
1173 * Returns pointer of the sched with the least load or NULL if none of the
1174 * drm_gpu_schedulers are ready
1175 */
1176 struct drm_gpu_scheduler *
drm_sched_pick_best(struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list)1177 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
1178 unsigned int num_sched_list)
1179 {
1180 struct drm_gpu_scheduler *sched, *picked_sched = NULL;
1181 int i;
1182 unsigned int min_score = UINT_MAX, num_score;
1183
1184 for (i = 0; i < num_sched_list; ++i) {
1185 sched = sched_list[i];
1186
1187 if (!sched->ready) {
1188 DRM_WARN("scheduler %s is not ready, skipping",
1189 sched->name);
1190 continue;
1191 }
1192
1193 num_score = atomic_read(sched->score);
1194 if (num_score < min_score) {
1195 min_score = num_score;
1196 picked_sched = sched;
1197 }
1198 }
1199
1200 return picked_sched;
1201 }
1202 EXPORT_SYMBOL(drm_sched_pick_best);
1203
1204 /**
1205 * drm_sched_free_job_work - worker to call free_job
1206 *
1207 * @w: free job work
1208 */
drm_sched_free_job_work(struct work_struct * w)1209 static void drm_sched_free_job_work(struct work_struct *w)
1210 {
1211 struct drm_gpu_scheduler *sched =
1212 container_of(w, struct drm_gpu_scheduler, work_free_job);
1213 struct drm_sched_job *job;
1214 bool have_more;
1215
1216 job = drm_sched_get_finished_job(sched, &have_more);
1217 if (job) {
1218 sched->ops->free_job(job);
1219 if (have_more)
1220 drm_sched_run_free_queue(sched);
1221 }
1222
1223 drm_sched_run_job_queue(sched);
1224 }
1225
1226 /**
1227 * drm_sched_run_job_work - worker to call run_job
1228 *
1229 * @w: run job work
1230 */
drm_sched_run_job_work(struct work_struct * w)1231 static void drm_sched_run_job_work(struct work_struct *w)
1232 {
1233 struct drm_gpu_scheduler *sched =
1234 container_of(w, struct drm_gpu_scheduler, work_run_job);
1235 struct drm_sched_entity *entity;
1236 struct dma_fence *fence;
1237 struct drm_sched_fence *s_fence;
1238 struct drm_sched_job *sched_job;
1239 int r;
1240
1241 /* Find entity with a ready job */
1242 entity = drm_sched_select_entity(sched);
1243 if (!entity) {
1244 /*
1245 * Either no more work to do, or the next ready job needs more
1246 * credits than the scheduler has currently available.
1247 */
1248 return;
1249 }
1250
1251 sched_job = drm_sched_entity_pop_job(entity);
1252 if (!sched_job) {
1253 complete_all(&entity->entity_idle);
1254 drm_sched_run_job_queue(sched);
1255 return;
1256 }
1257
1258 s_fence = sched_job->s_fence;
1259
1260 atomic_add(sched_job->credits, &sched->credit_count);
1261 drm_sched_job_begin(sched_job);
1262
1263 trace_drm_sched_job_run(sched_job, entity);
1264 /*
1265 * The run_job() callback must by definition return a fence whose
1266 * refcount has been incremented for the scheduler already.
1267 */
1268 fence = sched->ops->run_job(sched_job);
1269 complete_all(&entity->entity_idle);
1270 drm_sched_fence_scheduled(s_fence, fence);
1271
1272 if (!IS_ERR_OR_NULL(fence)) {
1273 r = dma_fence_add_callback(fence, &sched_job->cb,
1274 drm_sched_job_done_cb);
1275 if (r == -ENOENT)
1276 drm_sched_job_done(sched_job, fence->error);
1277 else if (r)
1278 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
1279
1280 dma_fence_put(fence);
1281 } else {
1282 drm_sched_job_done(sched_job, IS_ERR(fence) ?
1283 PTR_ERR(fence) : 0);
1284 }
1285
1286 wake_up(&sched->job_scheduled);
1287 drm_sched_run_job_queue(sched);
1288 }
1289
drm_sched_alloc_wq(const char * name)1290 static struct workqueue_struct *drm_sched_alloc_wq(const char *name)
1291 {
1292 #if (IS_ENABLED(CONFIG_LOCKDEP))
1293 static struct lockdep_map map = {
1294 .name = "drm_sched_lockdep_map"
1295 };
1296
1297 /*
1298 * Avoid leaking a lockdep map on each drm sched creation and
1299 * destruction by using a single lockdep map for all drm sched
1300 * allocated submit_wq.
1301 */
1302
1303 return alloc_ordered_workqueue_lockdep_map(name, WQ_MEM_RECLAIM, &map);
1304 #else
1305 return alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
1306 #endif
1307 }
1308
1309 /**
1310 * drm_sched_init - Init a gpu scheduler instance
1311 *
1312 * @sched: scheduler instance
1313 * @args: scheduler initialization arguments
1314 *
1315 * Return 0 on success, otherwise error code.
1316 */
drm_sched_init(struct drm_gpu_scheduler * sched,const struct drm_sched_init_args * args)1317 int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_args *args)
1318 {
1319 int i;
1320
1321 sched->ops = args->ops;
1322 sched->credit_limit = args->credit_limit;
1323 sched->name = args->name;
1324 sched->timeout = args->timeout;
1325 sched->hang_limit = args->hang_limit;
1326 sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_percpu_wq;
1327 sched->score = args->score ? args->score : &sched->_score;
1328 sched->dev = args->dev;
1329
1330 if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) {
1331 /* This is a gross violation--tell drivers what the problem is.
1332 */
1333 dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
1334 __func__);
1335 return -EINVAL;
1336 } else if (sched->sched_rq) {
1337 /* Not an error, but warn anyway so drivers can
1338 * fine-tune their DRM calling order, and return all
1339 * is good.
1340 */
1341 dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__);
1342 return 0;
1343 }
1344
1345 if (args->submit_wq) {
1346 sched->submit_wq = args->submit_wq;
1347 sched->own_submit_wq = false;
1348 } else {
1349 sched->submit_wq = drm_sched_alloc_wq(args->name);
1350 if (!sched->submit_wq)
1351 return -ENOMEM;
1352
1353 sched->own_submit_wq = true;
1354 }
1355
1356 sched->sched_rq = kmalloc_objs(*sched->sched_rq, args->num_rqs,
1357 GFP_KERNEL | __GFP_ZERO);
1358 if (!sched->sched_rq)
1359 goto Out_check_own;
1360 sched->num_rqs = args->num_rqs;
1361 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1362 sched->sched_rq[i] = kzalloc_obj(*sched->sched_rq[i]);
1363 if (!sched->sched_rq[i])
1364 goto Out_unroll;
1365 drm_sched_rq_init(sched, sched->sched_rq[i]);
1366 }
1367
1368 init_waitqueue_head(&sched->job_scheduled);
1369 INIT_LIST_HEAD(&sched->pending_list);
1370 spin_lock_init(&sched->job_list_lock);
1371 atomic_set(&sched->credit_count, 0);
1372 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1373 INIT_WORK(&sched->work_run_job, drm_sched_run_job_work);
1374 INIT_WORK(&sched->work_free_job, drm_sched_free_job_work);
1375 atomic_set(&sched->_score, 0);
1376 atomic64_set(&sched->job_id_count, 0);
1377 sched->pause_submit = false;
1378
1379 sched->ready = true;
1380 return 0;
1381 Out_unroll:
1382 for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
1383 kfree(sched->sched_rq[i]);
1384
1385 kfree(sched->sched_rq);
1386 sched->sched_rq = NULL;
1387 Out_check_own:
1388 if (sched->own_submit_wq)
1389 destroy_workqueue(sched->submit_wq);
1390 dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
1391 return -ENOMEM;
1392 }
1393 EXPORT_SYMBOL(drm_sched_init);
1394
drm_sched_cancel_remaining_jobs(struct drm_gpu_scheduler * sched)1395 static void drm_sched_cancel_remaining_jobs(struct drm_gpu_scheduler *sched)
1396 {
1397 struct drm_sched_job *job, *tmp;
1398
1399 /* All other accessors are stopped. No locking necessary. */
1400 list_for_each_entry_safe_reverse(job, tmp, &sched->pending_list, list) {
1401 sched->ops->cancel_job(job);
1402 list_del(&job->list);
1403 sched->ops->free_job(job);
1404 }
1405 }
1406
1407 /**
1408 * drm_sched_fini - Destroy a gpu scheduler
1409 *
1410 * @sched: scheduler instance
1411 *
1412 * Tears down and cleans up the scheduler.
1413 *
1414 * This stops submission of new jobs to the hardware through &struct
1415 * drm_sched_backend_ops.run_job. If &struct drm_sched_backend_ops.cancel_job
1416 * is implemented, all jobs will be canceled through it and afterwards cleaned
1417 * up through &struct drm_sched_backend_ops.free_job. If cancel_job is not
1418 * implemented, memory could leak.
1419 */
drm_sched_fini(struct drm_gpu_scheduler * sched)1420 void drm_sched_fini(struct drm_gpu_scheduler *sched)
1421 {
1422 struct drm_sched_entity *s_entity;
1423 int i;
1424
1425 drm_sched_wqueue_stop(sched);
1426
1427 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1428 struct drm_sched_rq *rq = sched->sched_rq[i];
1429
1430 spin_lock(&rq->lock);
1431 list_for_each_entry(s_entity, &rq->entities, list) {
1432 /*
1433 * Prevents reinsertion and marks job_queue as idle,
1434 * it will be removed from the rq in drm_sched_entity_fini()
1435 * eventually
1436 *
1437 * FIXME:
1438 * This lacks the proper spin_lock(&s_entity->lock) and
1439 * is, therefore, a race condition. Most notably, it
1440 * can race with drm_sched_entity_push_job(). The lock
1441 * cannot be taken here, however, because this would
1442 * lead to lock inversion -> deadlock.
1443 *
1444 * The best solution probably is to enforce the life
1445 * time rule of all entities having to be torn down
1446 * before their scheduler. Then, however, locking could
1447 * be dropped alltogether from this function.
1448 *
1449 * For now, this remains a potential race in all
1450 * drivers that keep entities alive for longer than
1451 * the scheduler.
1452 *
1453 * The READ_ONCE() is there to make the lockless read
1454 * (warning about the lockless write below) slightly
1455 * less broken...
1456 */
1457 if (!READ_ONCE(s_entity->stopped))
1458 dev_warn(sched->dev, "Tearing down scheduler with active entities!\n");
1459 s_entity->stopped = true;
1460 }
1461 spin_unlock(&rq->lock);
1462 kfree(sched->sched_rq[i]);
1463 }
1464
1465 /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1466 wake_up_all(&sched->job_scheduled);
1467
1468 /* Confirm no work left behind accessing device structures */
1469 cancel_delayed_work_sync(&sched->work_tdr);
1470
1471 /* Avoid memory leaks if supported by the driver. */
1472 if (sched->ops->cancel_job)
1473 drm_sched_cancel_remaining_jobs(sched);
1474
1475 if (sched->own_submit_wq)
1476 destroy_workqueue(sched->submit_wq);
1477 sched->ready = false;
1478 kfree(sched->sched_rq);
1479 sched->sched_rq = NULL;
1480
1481 if (!list_empty(&sched->pending_list))
1482 dev_warn(sched->dev, "Tearing down scheduler while jobs are pending!\n");
1483 }
1484 EXPORT_SYMBOL(drm_sched_fini);
1485
1486 /**
1487 * drm_sched_increase_karma - Update sched_entity guilty flag
1488 *
1489 * @bad: The job guilty of time out
1490 *
1491 * Increment on every hang caused by the 'bad' job. If this exceeds the hang
1492 * limit of the scheduler then the respective sched entity is marked guilty and
1493 * jobs from it will not be scheduled further
1494 */
drm_sched_increase_karma(struct drm_sched_job * bad)1495 void drm_sched_increase_karma(struct drm_sched_job *bad)
1496 {
1497 int i;
1498 struct drm_sched_entity *tmp;
1499 struct drm_sched_entity *entity;
1500 struct drm_gpu_scheduler *sched = bad->sched;
1501
1502 /* don't change @bad's karma if it's from KERNEL RQ,
1503 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1504 * corrupt but keep in mind that kernel jobs always considered good.
1505 */
1506 if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1507 atomic_inc(&bad->karma);
1508
1509 for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
1510 struct drm_sched_rq *rq = sched->sched_rq[i];
1511
1512 spin_lock(&rq->lock);
1513 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1514 if (bad->s_fence->scheduled.context ==
1515 entity->fence_context) {
1516 if (entity->guilty)
1517 atomic_set(entity->guilty, 1);
1518 break;
1519 }
1520 }
1521 spin_unlock(&rq->lock);
1522 if (&entity->list != &rq->entities)
1523 break;
1524 }
1525 }
1526 }
1527 EXPORT_SYMBOL(drm_sched_increase_karma);
1528
1529 /**
1530 * drm_sched_wqueue_ready - Is the scheduler ready for submission
1531 *
1532 * @sched: scheduler instance
1533 *
1534 * Returns true if submission is ready
1535 */
drm_sched_wqueue_ready(struct drm_gpu_scheduler * sched)1536 bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
1537 {
1538 return sched->ready;
1539 }
1540 EXPORT_SYMBOL(drm_sched_wqueue_ready);
1541
1542 /**
1543 * drm_sched_wqueue_stop - stop scheduler submission
1544 * @sched: scheduler instance
1545 *
1546 * Stops the scheduler from pulling new jobs from entities. It also stops
1547 * freeing jobs automatically through drm_sched_backend_ops.free_job().
1548 */
drm_sched_wqueue_stop(struct drm_gpu_scheduler * sched)1549 void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
1550 {
1551 WRITE_ONCE(sched->pause_submit, true);
1552 cancel_work_sync(&sched->work_run_job);
1553 cancel_work_sync(&sched->work_free_job);
1554 }
1555 EXPORT_SYMBOL(drm_sched_wqueue_stop);
1556
1557 /**
1558 * drm_sched_wqueue_start - start scheduler submission
1559 * @sched: scheduler instance
1560 *
1561 * Restarts the scheduler after drm_sched_wqueue_stop() has stopped it.
1562 *
1563 * This function is not necessary for 'conventional' startup. The scheduler is
1564 * fully operational after drm_sched_init() succeeded.
1565 */
drm_sched_wqueue_start(struct drm_gpu_scheduler * sched)1566 void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
1567 {
1568 WRITE_ONCE(sched->pause_submit, false);
1569 queue_work(sched->submit_wq, &sched->work_run_job);
1570 queue_work(sched->submit_wq, &sched->work_free_job);
1571 }
1572 EXPORT_SYMBOL(drm_sched_wqueue_start);
1573
1574 /**
1575 * drm_sched_is_stopped() - Checks whether drm_sched is stopped
1576 * @sched: DRM scheduler
1577 *
1578 * Return: true if sched is stopped, false otherwise
1579 */
drm_sched_is_stopped(struct drm_gpu_scheduler * sched)1580 bool drm_sched_is_stopped(struct drm_gpu_scheduler *sched)
1581 {
1582 return READ_ONCE(sched->pause_submit);
1583 }
1584 EXPORT_SYMBOL(drm_sched_is_stopped);
1585
1586 /**
1587 * drm_sched_job_is_signaled() - DRM scheduler job is signaled
1588 * @job: DRM scheduler job
1589 *
1590 * Determine if DRM scheduler job is signaled. DRM scheduler should be stopped
1591 * to obtain a stable snapshot of state. Both parent fence (hardware fence) and
1592 * finished fence (software fence) are checked to determine signaling state.
1593 *
1594 * Return: true if job is signaled, false otherwise
1595 */
drm_sched_job_is_signaled(struct drm_sched_job * job)1596 bool drm_sched_job_is_signaled(struct drm_sched_job *job)
1597 {
1598 struct drm_sched_fence *s_fence = job->s_fence;
1599
1600 WARN_ON(!drm_sched_is_stopped(job->sched));
1601 return (s_fence->parent && dma_fence_is_signaled(s_fence->parent)) ||
1602 dma_fence_is_signaled(&s_fence->finished);
1603 }
1604 EXPORT_SYMBOL(drm_sched_job_is_signaled);
1605