xref: /linux/drivers/gpu/drm/scheduler/sched_main.c (revision 5d95cbf21a4a550f2a2050c947083de2587cf46d)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in an entity are always scheduled in the order in which they were pushed.
45  *
46  * Note that once a job was taken from the entities queue and pushed to the
47  * hardware, i.e. the pending queue, the entity must not be referenced anymore
48  * through the jobs entity pointer.
49  */
50 
51 /**
52  * DOC: Flow Control
53  *
54  * The DRM GPU scheduler provides a flow control mechanism to regulate the rate
55  * in which the jobs fetched from scheduler entities are executed.
56  *
57  * In this context the &drm_gpu_scheduler keeps track of a driver specified
58  * credit limit representing the capacity of this scheduler and a credit count;
59  * every &drm_sched_job carries a driver specified number of credits.
60  *
61  * Once a job is executed (but not yet finished), the job's credits contribute
62  * to the scheduler's credit count until the job is finished. If by executing
63  * one more job the scheduler's credit count would exceed the scheduler's
64  * credit limit, the job won't be executed. Instead, the scheduler will wait
65  * until the credit count has decreased enough to not overflow its credit limit.
66  * This implies waiting for previously executed jobs.
67  */
68 
69 #include <linux/export.h>
70 #include <linux/wait.h>
71 #include <linux/sched.h>
72 #include <linux/completion.h>
73 #include <linux/dma-resv.h>
74 #include <uapi/linux/sched/types.h>
75 
76 #include <drm/drm_print.h>
77 #include <drm/drm_gem.h>
78 #include <drm/drm_syncobj.h>
79 #include <drm/gpu_scheduler.h>
80 #include <drm/spsc_queue.h>
81 
82 #include "sched_internal.h"
83 
84 #define CREATE_TRACE_POINTS
85 #include "gpu_scheduler_trace.h"
86 
87 int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
88 
89 /**
90  * DOC: sched_policy (int)
91  * Used to override default entities scheduling policy in a run queue.
92  */
93 MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
94 module_param_named(sched_policy, drm_sched_policy, int, 0444);
95 
96 static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
97 {
98 	u32 credits;
99 
100 	WARN_ON(check_sub_overflow(sched->credit_limit,
101 				   atomic_read(&sched->credit_count),
102 				   &credits));
103 
104 	return credits;
105 }
106 
107 /**
108  * drm_sched_can_queue -- Can we queue more to the hardware?
109  * @sched: scheduler instance
110  * @entity: the scheduler entity
111  *
112  * Return true if we can push at least one more job from @entity, false
113  * otherwise.
114  */
115 static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
116 				struct drm_sched_entity *entity)
117 {
118 	struct drm_sched_job *s_job;
119 
120 	s_job = drm_sched_entity_queue_peek(entity);
121 	if (!s_job)
122 		return false;
123 
124 	/* If a job exceeds the credit limit, truncate it to the credit limit
125 	 * itself to guarantee forward progress.
126 	 */
127 	if (s_job->credits > sched->credit_limit) {
128 		dev_WARN(sched->dev,
129 			 "Jobs may not exceed the credit limit, truncate.\n");
130 		s_job->credits = sched->credit_limit;
131 	}
132 
133 	return drm_sched_available_credits(sched) >= s_job->credits;
134 }
135 
136 static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
137 							    const struct rb_node *b)
138 {
139 	struct drm_sched_entity *ent_a =  rb_entry((a), struct drm_sched_entity, rb_tree_node);
140 	struct drm_sched_entity *ent_b =  rb_entry((b), struct drm_sched_entity, rb_tree_node);
141 
142 	return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
143 }
144 
145 static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity,
146 					    struct drm_sched_rq *rq)
147 {
148 	if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
149 		rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
150 		RB_CLEAR_NODE(&entity->rb_tree_node);
151 	}
152 }
153 
154 void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
155 				     struct drm_sched_rq *rq,
156 				     ktime_t ts)
157 {
158 	/*
159 	 * Both locks need to be grabbed, one to protect from entity->rq change
160 	 * for entity from within concurrent drm_sched_entity_select_rq and the
161 	 * other to update the rb tree structure.
162 	 */
163 	lockdep_assert_held(&entity->lock);
164 	lockdep_assert_held(&rq->lock);
165 
166 	drm_sched_rq_remove_fifo_locked(entity, rq);
167 
168 	entity->oldest_job_waiting = ts;
169 
170 	rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root,
171 		      drm_sched_entity_compare_before);
172 }
173 
174 /**
175  * drm_sched_rq_init - initialize a given run queue struct
176  *
177  * @sched: scheduler instance to associate with this run queue
178  * @rq: scheduler run queue
179  *
180  * Initializes a scheduler runqueue.
181  */
182 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
183 			      struct drm_sched_rq *rq)
184 {
185 	spin_lock_init(&rq->lock);
186 	INIT_LIST_HEAD(&rq->entities);
187 	rq->rb_tree_root = RB_ROOT_CACHED;
188 	rq->current_entity = NULL;
189 	rq->sched = sched;
190 }
191 
192 /**
193  * drm_sched_rq_add_entity - add an entity
194  *
195  * @rq: scheduler run queue
196  * @entity: scheduler entity
197  *
198  * Adds a scheduler entity to the run queue.
199  */
200 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
201 			     struct drm_sched_entity *entity)
202 {
203 	lockdep_assert_held(&entity->lock);
204 	lockdep_assert_held(&rq->lock);
205 
206 	if (!list_empty(&entity->list))
207 		return;
208 
209 	atomic_inc(rq->sched->score);
210 	list_add_tail(&entity->list, &rq->entities);
211 }
212 
213 /**
214  * drm_sched_rq_remove_entity - remove an entity
215  *
216  * @rq: scheduler run queue
217  * @entity: scheduler entity
218  *
219  * Removes a scheduler entity from the run queue.
220  */
221 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
222 				struct drm_sched_entity *entity)
223 {
224 	lockdep_assert_held(&entity->lock);
225 
226 	if (list_empty(&entity->list))
227 		return;
228 
229 	spin_lock(&rq->lock);
230 
231 	atomic_dec(rq->sched->score);
232 	list_del_init(&entity->list);
233 
234 	if (rq->current_entity == entity)
235 		rq->current_entity = NULL;
236 
237 	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
238 		drm_sched_rq_remove_fifo_locked(entity, rq);
239 
240 	spin_unlock(&rq->lock);
241 }
242 
243 /**
244  * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
245  *
246  * @sched: the gpu scheduler
247  * @rq: scheduler run queue to check.
248  *
249  * Try to find the next ready entity.
250  *
251  * Return an entity if one is found; return an error-pointer (!NULL) if an
252  * entity was ready, but the scheduler had insufficient credits to accommodate
253  * its job; return NULL, if no ready entity was found.
254  */
255 static struct drm_sched_entity *
256 drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
257 			      struct drm_sched_rq *rq)
258 {
259 	struct drm_sched_entity *entity;
260 
261 	spin_lock(&rq->lock);
262 
263 	entity = rq->current_entity;
264 	if (entity) {
265 		list_for_each_entry_continue(entity, &rq->entities, list) {
266 			if (drm_sched_entity_is_ready(entity)) {
267 				/* If we can't queue yet, preserve the current
268 				 * entity in terms of fairness.
269 				 */
270 				if (!drm_sched_can_queue(sched, entity)) {
271 					spin_unlock(&rq->lock);
272 					return ERR_PTR(-ENOSPC);
273 				}
274 
275 				rq->current_entity = entity;
276 				reinit_completion(&entity->entity_idle);
277 				spin_unlock(&rq->lock);
278 				return entity;
279 			}
280 		}
281 	}
282 
283 	list_for_each_entry(entity, &rq->entities, list) {
284 		if (drm_sched_entity_is_ready(entity)) {
285 			/* If we can't queue yet, preserve the current entity in
286 			 * terms of fairness.
287 			 */
288 			if (!drm_sched_can_queue(sched, entity)) {
289 				spin_unlock(&rq->lock);
290 				return ERR_PTR(-ENOSPC);
291 			}
292 
293 			rq->current_entity = entity;
294 			reinit_completion(&entity->entity_idle);
295 			spin_unlock(&rq->lock);
296 			return entity;
297 		}
298 
299 		if (entity == rq->current_entity)
300 			break;
301 	}
302 
303 	spin_unlock(&rq->lock);
304 
305 	return NULL;
306 }
307 
308 /**
309  * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
310  *
311  * @sched: the gpu scheduler
312  * @rq: scheduler run queue to check.
313  *
314  * Find oldest waiting ready entity.
315  *
316  * Return an entity if one is found; return an error-pointer (!NULL) if an
317  * entity was ready, but the scheduler had insufficient credits to accommodate
318  * its job; return NULL, if no ready entity was found.
319  */
320 static struct drm_sched_entity *
321 drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched,
322 				struct drm_sched_rq *rq)
323 {
324 	struct rb_node *rb;
325 
326 	spin_lock(&rq->lock);
327 	for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
328 		struct drm_sched_entity *entity;
329 
330 		entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
331 		if (drm_sched_entity_is_ready(entity)) {
332 			/* If we can't queue yet, preserve the current entity in
333 			 * terms of fairness.
334 			 */
335 			if (!drm_sched_can_queue(sched, entity)) {
336 				spin_unlock(&rq->lock);
337 				return ERR_PTR(-ENOSPC);
338 			}
339 
340 			reinit_completion(&entity->entity_idle);
341 			break;
342 		}
343 	}
344 	spin_unlock(&rq->lock);
345 
346 	return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
347 }
348 
349 /**
350  * drm_sched_run_job_queue - enqueue run-job work
351  * @sched: scheduler instance
352  */
353 static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
354 {
355 	if (!READ_ONCE(sched->pause_submit))
356 		queue_work(sched->submit_wq, &sched->work_run_job);
357 }
358 
359 /**
360  * __drm_sched_run_free_queue - enqueue free-job work
361  * @sched: scheduler instance
362  */
363 static void __drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
364 {
365 	if (!READ_ONCE(sched->pause_submit))
366 		queue_work(sched->submit_wq, &sched->work_free_job);
367 }
368 
369 /**
370  * drm_sched_run_free_queue - enqueue free-job work if ready
371  * @sched: scheduler instance
372  */
373 static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
374 {
375 	struct drm_sched_job *job;
376 
377 	spin_lock(&sched->job_list_lock);
378 	job = list_first_entry_or_null(&sched->pending_list,
379 				       struct drm_sched_job, list);
380 	if (job && dma_fence_is_signaled(&job->s_fence->finished))
381 		__drm_sched_run_free_queue(sched);
382 	spin_unlock(&sched->job_list_lock);
383 }
384 
385 /**
386  * drm_sched_job_done - complete a job
387  * @s_job: pointer to the job which is done
388  *
389  * Finish the job's fence and resubmit the work items.
390  */
391 static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
392 {
393 	struct drm_sched_fence *s_fence = s_job->s_fence;
394 	struct drm_gpu_scheduler *sched = s_fence->sched;
395 
396 	atomic_sub(s_job->credits, &sched->credit_count);
397 	atomic_dec(sched->score);
398 
399 	trace_drm_sched_job_done(s_fence);
400 
401 	dma_fence_get(&s_fence->finished);
402 	drm_sched_fence_finished(s_fence, result);
403 	dma_fence_put(&s_fence->finished);
404 	__drm_sched_run_free_queue(sched);
405 }
406 
407 /**
408  * drm_sched_job_done_cb - the callback for a done job
409  * @f: fence
410  * @cb: fence callbacks
411  */
412 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
413 {
414 	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
415 
416 	drm_sched_job_done(s_job, f->error);
417 }
418 
419 /**
420  * drm_sched_start_timeout - start timeout for reset worker
421  *
422  * @sched: scheduler instance to start the worker for
423  *
424  * Start the timeout for the given scheduler.
425  */
426 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
427 {
428 	lockdep_assert_held(&sched->job_list_lock);
429 
430 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
431 	    !list_empty(&sched->pending_list))
432 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
433 }
434 
435 static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
436 {
437 	spin_lock(&sched->job_list_lock);
438 	drm_sched_start_timeout(sched);
439 	spin_unlock(&sched->job_list_lock);
440 }
441 
442 /**
443  * drm_sched_tdr_queue_imm: - immediately start job timeout handler
444  *
445  * @sched: scheduler for which the timeout handling should be started.
446  *
447  * Start timeout handling immediately for the named scheduler.
448  */
449 void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched)
450 {
451 	spin_lock(&sched->job_list_lock);
452 	sched->timeout = 0;
453 	drm_sched_start_timeout(sched);
454 	spin_unlock(&sched->job_list_lock);
455 }
456 EXPORT_SYMBOL(drm_sched_tdr_queue_imm);
457 
458 /**
459  * drm_sched_fault - immediately start timeout handler
460  *
461  * @sched: scheduler where the timeout handling should be started.
462  *
463  * Start timeout handling immediately when the driver detects a hardware fault.
464  */
465 void drm_sched_fault(struct drm_gpu_scheduler *sched)
466 {
467 	if (sched->timeout_wq)
468 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
469 }
470 EXPORT_SYMBOL(drm_sched_fault);
471 
472 /**
473  * drm_sched_suspend_timeout - Suspend scheduler job timeout
474  *
475  * @sched: scheduler instance for which to suspend the timeout
476  *
477  * Suspend the delayed work timeout for the scheduler. This is done by
478  * modifying the delayed work timeout to an arbitrary large value,
479  * MAX_SCHEDULE_TIMEOUT in this case.
480  *
481  * Returns the timeout remaining
482  *
483  */
484 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
485 {
486 	unsigned long sched_timeout, now = jiffies;
487 
488 	sched_timeout = sched->work_tdr.timer.expires;
489 
490 	/*
491 	 * Modify the timeout to an arbitrarily large value. This also prevents
492 	 * the timeout to be restarted when new submissions arrive
493 	 */
494 	if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
495 			&& time_after(sched_timeout, now))
496 		return sched_timeout - now;
497 	else
498 		return sched->timeout;
499 }
500 EXPORT_SYMBOL(drm_sched_suspend_timeout);
501 
502 /**
503  * drm_sched_resume_timeout - Resume scheduler job timeout
504  *
505  * @sched: scheduler instance for which to resume the timeout
506  * @remaining: remaining timeout
507  *
508  * Resume the delayed work timeout for the scheduler.
509  */
510 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
511 		unsigned long remaining)
512 {
513 	spin_lock(&sched->job_list_lock);
514 
515 	if (list_empty(&sched->pending_list))
516 		cancel_delayed_work(&sched->work_tdr);
517 	else
518 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
519 
520 	spin_unlock(&sched->job_list_lock);
521 }
522 EXPORT_SYMBOL(drm_sched_resume_timeout);
523 
524 static void drm_sched_job_begin(struct drm_sched_job *s_job)
525 {
526 	struct drm_gpu_scheduler *sched = s_job->sched;
527 
528 	spin_lock(&sched->job_list_lock);
529 	list_add_tail(&s_job->list, &sched->pending_list);
530 	drm_sched_start_timeout(sched);
531 	spin_unlock(&sched->job_list_lock);
532 }
533 
534 static void drm_sched_job_timedout(struct work_struct *work)
535 {
536 	struct drm_gpu_scheduler *sched;
537 	struct drm_sched_job *job;
538 	enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
539 
540 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
541 
542 	/* Protects against concurrent deletion in drm_sched_get_finished_job */
543 	spin_lock(&sched->job_list_lock);
544 	job = list_first_entry_or_null(&sched->pending_list,
545 				       struct drm_sched_job, list);
546 
547 	if (job) {
548 		/*
549 		 * Remove the bad job so it cannot be freed by a concurrent
550 		 * &struct drm_sched_backend_ops.free_job. It will be
551 		 * reinserted after the scheduler's work items have been
552 		 * cancelled, at which point it's safe.
553 		 */
554 		list_del_init(&job->list);
555 		spin_unlock(&sched->job_list_lock);
556 
557 		status = job->sched->ops->timedout_job(job);
558 
559 		/*
560 		 * Guilty job did complete and hence needs to be manually removed
561 		 * See drm_sched_stop doc.
562 		 */
563 		if (sched->free_guilty) {
564 			job->sched->ops->free_job(job);
565 			sched->free_guilty = false;
566 		}
567 	} else {
568 		spin_unlock(&sched->job_list_lock);
569 	}
570 
571 	if (status != DRM_GPU_SCHED_STAT_ENODEV)
572 		drm_sched_start_timeout_unlocked(sched);
573 }
574 
575 /**
576  * drm_sched_stop - stop the scheduler
577  *
578  * @sched: scheduler instance
579  * @bad: job which caused the time out
580  *
581  * Stop the scheduler and also removes and frees all completed jobs.
582  * Note: bad job will not be freed as it might be used later and so it's
583  * callers responsibility to release it manually if it's not part of the
584  * pending list any more.
585  *
586  * This function is typically used for reset recovery (see the docu of
587  * drm_sched_backend_ops.timedout_job() for details). Do not call it for
588  * scheduler teardown, i.e., before calling drm_sched_fini().
589  */
590 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
591 {
592 	struct drm_sched_job *s_job, *tmp;
593 
594 	drm_sched_wqueue_stop(sched);
595 
596 	/*
597 	 * Reinsert back the bad job here - now it's safe as
598 	 * drm_sched_get_finished_job() cannot race against us and release the
599 	 * bad job at this point - we parked (waited for) any in progress
600 	 * (earlier) cleanups and drm_sched_get_finished_job() will not be
601 	 * called now until the scheduler's work items are submitted again.
602 	 */
603 	if (bad && bad->sched == sched)
604 		/*
605 		 * Add at the head of the queue to reflect it was the earliest
606 		 * job extracted.
607 		 */
608 		list_add(&bad->list, &sched->pending_list);
609 
610 	/*
611 	 * Iterate the job list from later to  earlier one and either deactive
612 	 * their HW callbacks or remove them from pending list if they already
613 	 * signaled.
614 	 * This iteration is thread safe as the scheduler's work items have been
615 	 * cancelled.
616 	 */
617 	list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
618 					 list) {
619 		if (s_job->s_fence->parent &&
620 		    dma_fence_remove_callback(s_job->s_fence->parent,
621 					      &s_job->cb)) {
622 			dma_fence_put(s_job->s_fence->parent);
623 			s_job->s_fence->parent = NULL;
624 			atomic_sub(s_job->credits, &sched->credit_count);
625 		} else {
626 			/*
627 			 * remove job from pending_list.
628 			 * Locking here is for concurrent resume timeout
629 			 */
630 			spin_lock(&sched->job_list_lock);
631 			list_del_init(&s_job->list);
632 			spin_unlock(&sched->job_list_lock);
633 
634 			/*
635 			 * Wait for job's HW fence callback to finish using s_job
636 			 * before releasing it.
637 			 *
638 			 * Job is still alive so fence refcount at least 1
639 			 */
640 			dma_fence_wait(&s_job->s_fence->finished, false);
641 
642 			/*
643 			 * We must keep bad job alive for later use during
644 			 * recovery by some of the drivers but leave a hint
645 			 * that the guilty job must be released.
646 			 */
647 			if (bad != s_job)
648 				sched->ops->free_job(s_job);
649 			else
650 				sched->free_guilty = true;
651 		}
652 	}
653 
654 	/*
655 	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
656 	 * avoids the pending timeout work in progress to fire right away after
657 	 * this TDR finished and before the newly restarted jobs had a
658 	 * chance to complete.
659 	 */
660 	cancel_delayed_work(&sched->work_tdr);
661 }
662 EXPORT_SYMBOL(drm_sched_stop);
663 
664 /**
665  * drm_sched_start - recover jobs after a reset
666  *
667  * @sched: scheduler instance
668  * @errno: error to set on the pending fences
669  *
670  * This function is typically used for reset recovery (see the docu of
671  * drm_sched_backend_ops.timedout_job() for details). Do not call it for
672  * scheduler startup. The scheduler itself is fully operational after
673  * drm_sched_init() succeeded.
674  */
675 void drm_sched_start(struct drm_gpu_scheduler *sched, int errno)
676 {
677 	struct drm_sched_job *s_job, *tmp;
678 
679 	/*
680 	 * Locking the list is not required here as the scheduler's work items
681 	 * are currently not running, so no new jobs are being inserted or
682 	 * removed. Also concurrent GPU recovers can't run in parallel.
683 	 */
684 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
685 		struct dma_fence *fence = s_job->s_fence->parent;
686 
687 		atomic_add(s_job->credits, &sched->credit_count);
688 
689 		if (!fence) {
690 			drm_sched_job_done(s_job, errno ?: -ECANCELED);
691 			continue;
692 		}
693 
694 		if (dma_fence_add_callback(fence, &s_job->cb,
695 					   drm_sched_job_done_cb))
696 			drm_sched_job_done(s_job, fence->error ?: errno);
697 	}
698 
699 	drm_sched_start_timeout_unlocked(sched);
700 	drm_sched_wqueue_start(sched);
701 }
702 EXPORT_SYMBOL(drm_sched_start);
703 
704 /**
705  * drm_sched_resubmit_jobs - Deprecated, don't use in new code!
706  *
707  * @sched: scheduler instance
708  *
709  * Re-submitting jobs was a concept AMD came up as cheap way to implement
710  * recovery after a job timeout.
711  *
712  * This turned out to be not working very well. First of all there are many
713  * problem with the dma_fence implementation and requirements. Either the
714  * implementation is risking deadlocks with core memory management or violating
715  * documented implementation details of the dma_fence object.
716  *
717  * Drivers can still save and restore their state for recovery operations, but
718  * we shouldn't make this a general scheduler feature around the dma_fence
719  * interface.
720  */
721 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
722 {
723 	struct drm_sched_job *s_job, *tmp;
724 	uint64_t guilty_context;
725 	bool found_guilty = false;
726 	struct dma_fence *fence;
727 
728 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
729 		struct drm_sched_fence *s_fence = s_job->s_fence;
730 
731 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
732 			found_guilty = true;
733 			guilty_context = s_job->s_fence->scheduled.context;
734 		}
735 
736 		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
737 			dma_fence_set_error(&s_fence->finished, -ECANCELED);
738 
739 		fence = sched->ops->run_job(s_job);
740 
741 		if (IS_ERR_OR_NULL(fence)) {
742 			if (IS_ERR(fence))
743 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
744 
745 			s_job->s_fence->parent = NULL;
746 		} else {
747 
748 			s_job->s_fence->parent = dma_fence_get(fence);
749 
750 			/* Drop for orignal kref_init */
751 			dma_fence_put(fence);
752 		}
753 	}
754 }
755 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
756 
757 /**
758  * drm_sched_job_init - init a scheduler job
759  * @job: scheduler job to init
760  * @entity: scheduler entity to use
761  * @credits: the number of credits this job contributes to the schedulers
762  * credit limit
763  * @owner: job owner for debugging
764  * @drm_client_id: &struct drm_file.client_id of the owner (used by trace
765  * events)
766  *
767  * Refer to drm_sched_entity_push_job() documentation
768  * for locking considerations.
769  *
770  * Drivers must make sure drm_sched_job_cleanup() if this function returns
771  * successfully, even when @job is aborted before drm_sched_job_arm() is called.
772  *
773  * Note that this function does not assign a valid value to each struct member
774  * of struct drm_sched_job. Take a look at that struct's documentation to see
775  * who sets which struct member with what lifetime.
776  *
777  * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
778  * has died, which can mean that there's no valid runqueue for a @entity.
779  * This function returns -ENOENT in this case (which probably should be -EIO as
780  * a more meanigful return value).
781  *
782  * Returns 0 for success, negative error code otherwise.
783  */
784 int drm_sched_job_init(struct drm_sched_job *job,
785 		       struct drm_sched_entity *entity,
786 		       u32 credits, void *owner,
787 		       uint64_t drm_client_id)
788 {
789 	if (!entity->rq) {
790 		/* This will most likely be followed by missing frames
791 		 * or worse--a blank screen--leave a trail in the
792 		 * logs, so this can be debugged easier.
793 		 */
794 		dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__);
795 		return -ENOENT;
796 	}
797 
798 	if (unlikely(!credits)) {
799 		pr_err("*ERROR* %s: credits cannot be 0!\n", __func__);
800 		return -EINVAL;
801 	}
802 
803 	/*
804 	 * We don't know for sure how the user has allocated. Thus, zero the
805 	 * struct so that unallowed (i.e., too early) usage of pointers that
806 	 * this function does not set is guaranteed to lead to a NULL pointer
807 	 * exception instead of UB.
808 	 */
809 	memset(job, 0, sizeof(*job));
810 
811 	job->entity = entity;
812 	job->credits = credits;
813 	job->s_fence = drm_sched_fence_alloc(entity, owner, drm_client_id);
814 	if (!job->s_fence)
815 		return -ENOMEM;
816 
817 	INIT_LIST_HEAD(&job->list);
818 
819 	xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
820 
821 	return 0;
822 }
823 EXPORT_SYMBOL(drm_sched_job_init);
824 
825 /**
826  * drm_sched_job_arm - arm a scheduler job for execution
827  * @job: scheduler job to arm
828  *
829  * This arms a scheduler job for execution. Specifically it initializes the
830  * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
831  * or other places that need to track the completion of this job. It also
832  * initializes sequence numbers, which are fundamental for fence ordering.
833  *
834  * Refer to drm_sched_entity_push_job() documentation for locking
835  * considerations.
836  *
837  * Once this function was called, you *must* submit @job with
838  * drm_sched_entity_push_job().
839  *
840  * This can only be called if drm_sched_job_init() succeeded.
841  */
842 void drm_sched_job_arm(struct drm_sched_job *job)
843 {
844 	struct drm_gpu_scheduler *sched;
845 	struct drm_sched_entity *entity = job->entity;
846 
847 	BUG_ON(!entity);
848 	drm_sched_entity_select_rq(entity);
849 	sched = entity->rq->sched;
850 
851 	job->sched = sched;
852 	job->s_priority = entity->priority;
853 
854 	drm_sched_fence_init(job->s_fence, job->entity);
855 }
856 EXPORT_SYMBOL(drm_sched_job_arm);
857 
858 /**
859  * drm_sched_job_add_dependency - adds the fence as a job dependency
860  * @job: scheduler job to add the dependencies to
861  * @fence: the dma_fence to add to the list of dependencies.
862  *
863  * Note that @fence is consumed in both the success and error cases.
864  *
865  * Returns:
866  * 0 on success, or an error on failing to expand the array.
867  */
868 int drm_sched_job_add_dependency(struct drm_sched_job *job,
869 				 struct dma_fence *fence)
870 {
871 	struct dma_fence *entry;
872 	unsigned long index;
873 	u32 id = 0;
874 	int ret;
875 
876 	if (!fence)
877 		return 0;
878 
879 	/* Deduplicate if we already depend on a fence from the same context.
880 	 * This lets the size of the array of deps scale with the number of
881 	 * engines involved, rather than the number of BOs.
882 	 */
883 	xa_for_each(&job->dependencies, index, entry) {
884 		if (entry->context != fence->context)
885 			continue;
886 
887 		if (dma_fence_is_later(fence, entry)) {
888 			dma_fence_put(entry);
889 			xa_store(&job->dependencies, index, fence, GFP_KERNEL);
890 		} else {
891 			dma_fence_put(fence);
892 		}
893 		return 0;
894 	}
895 
896 	ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
897 	if (ret != 0)
898 		dma_fence_put(fence);
899 
900 	return ret;
901 }
902 EXPORT_SYMBOL(drm_sched_job_add_dependency);
903 
904 /**
905  * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
906  * @job: scheduler job to add the dependencies to
907  * @file: drm file private pointer
908  * @handle: syncobj handle to lookup
909  * @point: timeline point
910  *
911  * This adds the fence matching the given syncobj to @job.
912  *
913  * Returns:
914  * 0 on success, or an error on failing to expand the array.
915  */
916 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
917 					 struct drm_file *file,
918 					 u32 handle,
919 					 u32 point)
920 {
921 	struct dma_fence *fence;
922 	int ret;
923 
924 	ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
925 	if (ret)
926 		return ret;
927 
928 	return drm_sched_job_add_dependency(job, fence);
929 }
930 EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
931 
932 /**
933  * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
934  * @job: scheduler job to add the dependencies to
935  * @resv: the dma_resv object to get the fences from
936  * @usage: the dma_resv_usage to use to filter the fences
937  *
938  * This adds all fences matching the given usage from @resv to @job.
939  * Must be called with the @resv lock held.
940  *
941  * Returns:
942  * 0 on success, or an error on failing to expand the array.
943  */
944 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
945 					struct dma_resv *resv,
946 					enum dma_resv_usage usage)
947 {
948 	struct dma_resv_iter cursor;
949 	struct dma_fence *fence;
950 	int ret;
951 
952 	dma_resv_assert_held(resv);
953 
954 	dma_resv_for_each_fence(&cursor, resv, usage, fence) {
955 		/* Make sure to grab an additional ref on the added fence */
956 		dma_fence_get(fence);
957 		ret = drm_sched_job_add_dependency(job, fence);
958 		if (ret) {
959 			dma_fence_put(fence);
960 			return ret;
961 		}
962 	}
963 	return 0;
964 }
965 EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
966 
967 /**
968  * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
969  *   dependencies
970  * @job: scheduler job to add the dependencies to
971  * @obj: the gem object to add new dependencies from.
972  * @write: whether the job might write the object (so we need to depend on
973  * shared fences in the reservation object).
974  *
975  * This should be called after drm_gem_lock_reservations() on your array of
976  * GEM objects used in the job but before updating the reservations with your
977  * own fences.
978  *
979  * Returns:
980  * 0 on success, or an error on failing to expand the array.
981  */
982 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
983 					    struct drm_gem_object *obj,
984 					    bool write)
985 {
986 	return drm_sched_job_add_resv_dependencies(job, obj->resv,
987 						   dma_resv_usage_rw(write));
988 }
989 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
990 
991 /**
992  * drm_sched_job_has_dependency - check whether fence is the job's dependency
993  * @job: scheduler job to check
994  * @fence: fence to look for
995  *
996  * Returns:
997  * True if @fence is found within the job's dependencies, or otherwise false.
998  */
999 bool drm_sched_job_has_dependency(struct drm_sched_job *job,
1000 				  struct dma_fence *fence)
1001 {
1002 	struct dma_fence *f;
1003 	unsigned long index;
1004 
1005 	xa_for_each(&job->dependencies, index, f) {
1006 		if (f == fence)
1007 			return true;
1008 	}
1009 
1010 	return false;
1011 }
1012 EXPORT_SYMBOL(drm_sched_job_has_dependency);
1013 
1014 /**
1015  * drm_sched_job_cleanup - clean up scheduler job resources
1016  * @job: scheduler job to clean up
1017  *
1018  * Cleans up the resources allocated with drm_sched_job_init().
1019  *
1020  * Drivers should call this from their error unwind code if @job is aborted
1021  * before drm_sched_job_arm() is called.
1022  *
1023  * drm_sched_job_arm() is a point of no return since it initializes the fences
1024  * and their sequence number etc. Once that function has been called, you *must*
1025  * submit it with drm_sched_entity_push_job() and cannot simply abort it by
1026  * calling drm_sched_job_cleanup().
1027  *
1028  * This function should be called in the &drm_sched_backend_ops.free_job callback.
1029  */
1030 void drm_sched_job_cleanup(struct drm_sched_job *job)
1031 {
1032 	struct dma_fence *fence;
1033 	unsigned long index;
1034 
1035 	if (kref_read(&job->s_fence->finished.refcount)) {
1036 		/* The job has been processed by the scheduler, i.e.,
1037 		 * drm_sched_job_arm() and drm_sched_entity_push_job() have
1038 		 * been called.
1039 		 */
1040 		dma_fence_put(&job->s_fence->finished);
1041 	} else {
1042 		/* The job was aborted before it has been committed to be run;
1043 		 * notably, drm_sched_job_arm() has not been called.
1044 		 */
1045 		drm_sched_fence_free(job->s_fence);
1046 	}
1047 
1048 	job->s_fence = NULL;
1049 
1050 	xa_for_each(&job->dependencies, index, fence) {
1051 		dma_fence_put(fence);
1052 	}
1053 	xa_destroy(&job->dependencies);
1054 
1055 }
1056 EXPORT_SYMBOL(drm_sched_job_cleanup);
1057 
1058 /**
1059  * drm_sched_wakeup - Wake up the scheduler if it is ready to queue
1060  * @sched: scheduler instance
1061  *
1062  * Wake up the scheduler if we can queue jobs.
1063  */
1064 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
1065 {
1066 	drm_sched_run_job_queue(sched);
1067 }
1068 
1069 /**
1070  * drm_sched_select_entity - Select next entity to process
1071  *
1072  * @sched: scheduler instance
1073  *
1074  * Return an entity to process or NULL if none are found.
1075  *
1076  * Note, that we break out of the for-loop when "entity" is non-null, which can
1077  * also be an error-pointer--this assures we don't process lower priority
1078  * run-queues. See comments in the respectively called functions.
1079  */
1080 static struct drm_sched_entity *
1081 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
1082 {
1083 	struct drm_sched_entity *entity;
1084 	int i;
1085 
1086 	/* Start with the highest priority.
1087 	 */
1088 	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1089 		entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
1090 			drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
1091 			drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
1092 		if (entity)
1093 			break;
1094 	}
1095 
1096 	return IS_ERR(entity) ? NULL : entity;
1097 }
1098 
1099 /**
1100  * drm_sched_get_finished_job - fetch the next finished job to be destroyed
1101  *
1102  * @sched: scheduler instance
1103  *
1104  * Returns the next finished job from the pending list (if there is one)
1105  * ready for it to be destroyed.
1106  */
1107 static struct drm_sched_job *
1108 drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
1109 {
1110 	struct drm_sched_job *job, *next;
1111 
1112 	spin_lock(&sched->job_list_lock);
1113 
1114 	job = list_first_entry_or_null(&sched->pending_list,
1115 				       struct drm_sched_job, list);
1116 
1117 	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
1118 		/* remove job from pending_list */
1119 		list_del_init(&job->list);
1120 
1121 		/* cancel this job's TO timer */
1122 		cancel_delayed_work(&sched->work_tdr);
1123 		/* make the scheduled timestamp more accurate */
1124 		next = list_first_entry_or_null(&sched->pending_list,
1125 						typeof(*next), list);
1126 
1127 		if (next) {
1128 			if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
1129 				     &next->s_fence->scheduled.flags))
1130 				next->s_fence->scheduled.timestamp =
1131 					dma_fence_timestamp(&job->s_fence->finished);
1132 			/* start TO timer for next job */
1133 			drm_sched_start_timeout(sched);
1134 		}
1135 	} else {
1136 		job = NULL;
1137 	}
1138 
1139 	spin_unlock(&sched->job_list_lock);
1140 
1141 	return job;
1142 }
1143 
1144 /**
1145  * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
1146  * @sched_list: list of drm_gpu_schedulers
1147  * @num_sched_list: number of drm_gpu_schedulers in the sched_list
1148  *
1149  * Returns pointer of the sched with the least load or NULL if none of the
1150  * drm_gpu_schedulers are ready
1151  */
1152 struct drm_gpu_scheduler *
1153 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
1154 		     unsigned int num_sched_list)
1155 {
1156 	struct drm_gpu_scheduler *sched, *picked_sched = NULL;
1157 	int i;
1158 	unsigned int min_score = UINT_MAX, num_score;
1159 
1160 	for (i = 0; i < num_sched_list; ++i) {
1161 		sched = sched_list[i];
1162 
1163 		if (!sched->ready) {
1164 			DRM_WARN("scheduler %s is not ready, skipping",
1165 				 sched->name);
1166 			continue;
1167 		}
1168 
1169 		num_score = atomic_read(sched->score);
1170 		if (num_score < min_score) {
1171 			min_score = num_score;
1172 			picked_sched = sched;
1173 		}
1174 	}
1175 
1176 	return picked_sched;
1177 }
1178 EXPORT_SYMBOL(drm_sched_pick_best);
1179 
1180 /**
1181  * drm_sched_free_job_work - worker to call free_job
1182  *
1183  * @w: free job work
1184  */
1185 static void drm_sched_free_job_work(struct work_struct *w)
1186 {
1187 	struct drm_gpu_scheduler *sched =
1188 		container_of(w, struct drm_gpu_scheduler, work_free_job);
1189 	struct drm_sched_job *job;
1190 
1191 	job = drm_sched_get_finished_job(sched);
1192 	if (job)
1193 		sched->ops->free_job(job);
1194 
1195 	drm_sched_run_free_queue(sched);
1196 	drm_sched_run_job_queue(sched);
1197 }
1198 
1199 /**
1200  * drm_sched_run_job_work - worker to call run_job
1201  *
1202  * @w: run job work
1203  */
1204 static void drm_sched_run_job_work(struct work_struct *w)
1205 {
1206 	struct drm_gpu_scheduler *sched =
1207 		container_of(w, struct drm_gpu_scheduler, work_run_job);
1208 	struct drm_sched_entity *entity;
1209 	struct dma_fence *fence;
1210 	struct drm_sched_fence *s_fence;
1211 	struct drm_sched_job *sched_job;
1212 	int r;
1213 
1214 	/* Find entity with a ready job */
1215 	entity = drm_sched_select_entity(sched);
1216 	if (!entity)
1217 		return;	/* No more work */
1218 
1219 	sched_job = drm_sched_entity_pop_job(entity);
1220 	if (!sched_job) {
1221 		complete_all(&entity->entity_idle);
1222 		drm_sched_run_job_queue(sched);
1223 		return;
1224 	}
1225 
1226 	s_fence = sched_job->s_fence;
1227 
1228 	atomic_add(sched_job->credits, &sched->credit_count);
1229 	drm_sched_job_begin(sched_job);
1230 
1231 	trace_drm_sched_job_run(sched_job, entity);
1232 	/*
1233 	 * The run_job() callback must by definition return a fence whose
1234 	 * refcount has been incremented for the scheduler already.
1235 	 */
1236 	fence = sched->ops->run_job(sched_job);
1237 	complete_all(&entity->entity_idle);
1238 	drm_sched_fence_scheduled(s_fence, fence);
1239 
1240 	if (!IS_ERR_OR_NULL(fence)) {
1241 		r = dma_fence_add_callback(fence, &sched_job->cb,
1242 					   drm_sched_job_done_cb);
1243 		if (r == -ENOENT)
1244 			drm_sched_job_done(sched_job, fence->error);
1245 		else if (r)
1246 			DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
1247 
1248 		dma_fence_put(fence);
1249 	} else {
1250 		drm_sched_job_done(sched_job, IS_ERR(fence) ?
1251 				   PTR_ERR(fence) : 0);
1252 	}
1253 
1254 	wake_up(&sched->job_scheduled);
1255 	drm_sched_run_job_queue(sched);
1256 }
1257 
1258 static struct workqueue_struct *drm_sched_alloc_wq(const char *name)
1259 {
1260 #if (IS_ENABLED(CONFIG_LOCKDEP))
1261 	static struct lockdep_map map = {
1262 		.name = "drm_sched_lockdep_map"
1263 	};
1264 
1265 	/*
1266 	 * Avoid leaking a lockdep map on each drm sched creation and
1267 	 * destruction by using a single lockdep map for all drm sched
1268 	 * allocated submit_wq.
1269 	 */
1270 
1271 	return alloc_ordered_workqueue_lockdep_map(name, WQ_MEM_RECLAIM, &map);
1272 #else
1273 	return alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
1274 #endif
1275 }
1276 
1277 /**
1278  * drm_sched_init - Init a gpu scheduler instance
1279  *
1280  * @sched: scheduler instance
1281  * @args: scheduler initialization arguments
1282  *
1283  * Return 0 on success, otherwise error code.
1284  */
1285 int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_args *args)
1286 {
1287 	int i;
1288 
1289 	sched->ops = args->ops;
1290 	sched->credit_limit = args->credit_limit;
1291 	sched->name = args->name;
1292 	sched->timeout = args->timeout;
1293 	sched->hang_limit = args->hang_limit;
1294 	sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_wq;
1295 	sched->score = args->score ? args->score : &sched->_score;
1296 	sched->dev = args->dev;
1297 
1298 	if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) {
1299 		/* This is a gross violation--tell drivers what the  problem is.
1300 		 */
1301 		dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
1302 			__func__);
1303 		return -EINVAL;
1304 	} else if (sched->sched_rq) {
1305 		/* Not an error, but warn anyway so drivers can
1306 		 * fine-tune their DRM calling order, and return all
1307 		 * is good.
1308 		 */
1309 		dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__);
1310 		return 0;
1311 	}
1312 
1313 	if (args->submit_wq) {
1314 		sched->submit_wq = args->submit_wq;
1315 		sched->own_submit_wq = false;
1316 	} else {
1317 		sched->submit_wq = drm_sched_alloc_wq(args->name);
1318 		if (!sched->submit_wq)
1319 			return -ENOMEM;
1320 
1321 		sched->own_submit_wq = true;
1322 	}
1323 
1324 	sched->sched_rq = kmalloc_array(args->num_rqs, sizeof(*sched->sched_rq),
1325 					GFP_KERNEL | __GFP_ZERO);
1326 	if (!sched->sched_rq)
1327 		goto Out_check_own;
1328 	sched->num_rqs = args->num_rqs;
1329 	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1330 		sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
1331 		if (!sched->sched_rq[i])
1332 			goto Out_unroll;
1333 		drm_sched_rq_init(sched, sched->sched_rq[i]);
1334 	}
1335 
1336 	init_waitqueue_head(&sched->job_scheduled);
1337 	INIT_LIST_HEAD(&sched->pending_list);
1338 	spin_lock_init(&sched->job_list_lock);
1339 	atomic_set(&sched->credit_count, 0);
1340 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1341 	INIT_WORK(&sched->work_run_job, drm_sched_run_job_work);
1342 	INIT_WORK(&sched->work_free_job, drm_sched_free_job_work);
1343 	atomic_set(&sched->_score, 0);
1344 	atomic64_set(&sched->job_id_count, 0);
1345 	sched->pause_submit = false;
1346 
1347 	sched->ready = true;
1348 	return 0;
1349 Out_unroll:
1350 	for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
1351 		kfree(sched->sched_rq[i]);
1352 
1353 	kfree(sched->sched_rq);
1354 	sched->sched_rq = NULL;
1355 Out_check_own:
1356 	if (sched->own_submit_wq)
1357 		destroy_workqueue(sched->submit_wq);
1358 	dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
1359 	return -ENOMEM;
1360 }
1361 EXPORT_SYMBOL(drm_sched_init);
1362 
1363 /**
1364  * drm_sched_fini - Destroy a gpu scheduler
1365  *
1366  * @sched: scheduler instance
1367  *
1368  * Tears down and cleans up the scheduler.
1369  *
1370  * This stops submission of new jobs to the hardware through
1371  * drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job()
1372  * will not be called for all jobs still in drm_gpu_scheduler.pending_list.
1373  * There is no solution for this currently. Thus, it is up to the driver to make
1374  * sure that:
1375  *
1376  *  a) drm_sched_fini() is only called after for all submitted jobs
1377  *     drm_sched_backend_ops.free_job() has been called or that
1378  *  b) the jobs for which drm_sched_backend_ops.free_job() has not been called
1379  *     after drm_sched_fini() ran are freed manually.
1380  *
1381  * FIXME: Take care of the above problem and prevent this function from leaking
1382  * the jobs in drm_gpu_scheduler.pending_list under any circumstances.
1383  */
1384 void drm_sched_fini(struct drm_gpu_scheduler *sched)
1385 {
1386 	struct drm_sched_entity *s_entity;
1387 	int i;
1388 
1389 	drm_sched_wqueue_stop(sched);
1390 
1391 	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1392 		struct drm_sched_rq *rq = sched->sched_rq[i];
1393 
1394 		spin_lock(&rq->lock);
1395 		list_for_each_entry(s_entity, &rq->entities, list)
1396 			/*
1397 			 * Prevents reinsertion and marks job_queue as idle,
1398 			 * it will be removed from the rq in drm_sched_entity_fini()
1399 			 * eventually
1400 			 */
1401 			s_entity->stopped = true;
1402 		spin_unlock(&rq->lock);
1403 		kfree(sched->sched_rq[i]);
1404 	}
1405 
1406 	/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1407 	wake_up_all(&sched->job_scheduled);
1408 
1409 	/* Confirm no work left behind accessing device structures */
1410 	cancel_delayed_work_sync(&sched->work_tdr);
1411 
1412 	if (sched->own_submit_wq)
1413 		destroy_workqueue(sched->submit_wq);
1414 	sched->ready = false;
1415 	kfree(sched->sched_rq);
1416 	sched->sched_rq = NULL;
1417 }
1418 EXPORT_SYMBOL(drm_sched_fini);
1419 
1420 /**
1421  * drm_sched_increase_karma - Update sched_entity guilty flag
1422  *
1423  * @bad: The job guilty of time out
1424  *
1425  * Increment on every hang caused by the 'bad' job. If this exceeds the hang
1426  * limit of the scheduler then the respective sched entity is marked guilty and
1427  * jobs from it will not be scheduled further
1428  */
1429 void drm_sched_increase_karma(struct drm_sched_job *bad)
1430 {
1431 	int i;
1432 	struct drm_sched_entity *tmp;
1433 	struct drm_sched_entity *entity;
1434 	struct drm_gpu_scheduler *sched = bad->sched;
1435 
1436 	/* don't change @bad's karma if it's from KERNEL RQ,
1437 	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1438 	 * corrupt but keep in mind that kernel jobs always considered good.
1439 	 */
1440 	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1441 		atomic_inc(&bad->karma);
1442 
1443 		for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
1444 			struct drm_sched_rq *rq = sched->sched_rq[i];
1445 
1446 			spin_lock(&rq->lock);
1447 			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1448 				if (bad->s_fence->scheduled.context ==
1449 				    entity->fence_context) {
1450 					if (entity->guilty)
1451 						atomic_set(entity->guilty, 1);
1452 					break;
1453 				}
1454 			}
1455 			spin_unlock(&rq->lock);
1456 			if (&entity->list != &rq->entities)
1457 				break;
1458 		}
1459 	}
1460 }
1461 EXPORT_SYMBOL(drm_sched_increase_karma);
1462 
1463 /**
1464  * drm_sched_wqueue_ready - Is the scheduler ready for submission
1465  *
1466  * @sched: scheduler instance
1467  *
1468  * Returns true if submission is ready
1469  */
1470 bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
1471 {
1472 	return sched->ready;
1473 }
1474 EXPORT_SYMBOL(drm_sched_wqueue_ready);
1475 
1476 /**
1477  * drm_sched_wqueue_stop - stop scheduler submission
1478  * @sched: scheduler instance
1479  *
1480  * Stops the scheduler from pulling new jobs from entities. It also stops
1481  * freeing jobs automatically through drm_sched_backend_ops.free_job().
1482  */
1483 void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
1484 {
1485 	WRITE_ONCE(sched->pause_submit, true);
1486 	cancel_work_sync(&sched->work_run_job);
1487 	cancel_work_sync(&sched->work_free_job);
1488 }
1489 EXPORT_SYMBOL(drm_sched_wqueue_stop);
1490 
1491 /**
1492  * drm_sched_wqueue_start - start scheduler submission
1493  * @sched: scheduler instance
1494  *
1495  * Restarts the scheduler after drm_sched_wqueue_stop() has stopped it.
1496  *
1497  * This function is not necessary for 'conventional' startup. The scheduler is
1498  * fully operational after drm_sched_init() succeeded.
1499  */
1500 void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
1501 {
1502 	WRITE_ONCE(sched->pause_submit, false);
1503 	queue_work(sched->submit_wq, &sched->work_run_job);
1504 	queue_work(sched->submit_wq, &sched->work_free_job);
1505 }
1506 EXPORT_SYMBOL(drm_sched_wqueue_start);
1507