xref: /linux/drivers/gpu/drm/scheduler/sched_main.c (revision a4871e6201c46c8e1d04308265b4b4c5753c8209)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in an entity are always scheduled in the order in which they were pushed.
45  *
46  * Note that once a job was taken from the entities queue and pushed to the
47  * hardware, i.e. the pending queue, the entity must not be referenced anymore
48  * through the jobs entity pointer.
49  */
50 
51 /**
52  * DOC: Flow Control
53  *
54  * The DRM GPU scheduler provides a flow control mechanism to regulate the rate
55  * in which the jobs fetched from scheduler entities are executed.
56  *
57  * In this context the &drm_gpu_scheduler keeps track of a driver specified
58  * credit limit representing the capacity of this scheduler and a credit count;
59  * every &drm_sched_job carries a driver specified number of credits.
60  *
61  * Once a job is executed (but not yet finished), the job's credits contribute
62  * to the scheduler's credit count until the job is finished. If by executing
63  * one more job the scheduler's credit count would exceed the scheduler's
64  * credit limit, the job won't be executed. Instead, the scheduler will wait
65  * until the credit count has decreased enough to not overflow its credit limit.
66  * This implies waiting for previously executed jobs.
67  */
68 
69 #include <linux/wait.h>
70 #include <linux/sched.h>
71 #include <linux/completion.h>
72 #include <linux/dma-resv.h>
73 #include <uapi/linux/sched/types.h>
74 
75 #include <drm/drm_print.h>
76 #include <drm/drm_gem.h>
77 #include <drm/drm_syncobj.h>
78 #include <drm/gpu_scheduler.h>
79 #include <drm/spsc_queue.h>
80 
81 #include "sched_internal.h"
82 
83 #define CREATE_TRACE_POINTS
84 #include "gpu_scheduler_trace.h"
85 
86 #ifdef CONFIG_LOCKDEP
87 static struct lockdep_map drm_sched_lockdep_map = {
88 	.name = "drm_sched_lockdep_map"
89 };
90 #endif
91 
92 int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
93 
94 /**
95  * DOC: sched_policy (int)
96  * Used to override default entities scheduling policy in a run queue.
97  */
98 MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
99 module_param_named(sched_policy, drm_sched_policy, int, 0444);
100 
101 static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
102 {
103 	u32 credits;
104 
105 	WARN_ON(check_sub_overflow(sched->credit_limit,
106 				   atomic_read(&sched->credit_count),
107 				   &credits));
108 
109 	return credits;
110 }
111 
112 /**
113  * drm_sched_can_queue -- Can we queue more to the hardware?
114  * @sched: scheduler instance
115  * @entity: the scheduler entity
116  *
117  * Return true if we can push at least one more job from @entity, false
118  * otherwise.
119  */
120 static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
121 				struct drm_sched_entity *entity)
122 {
123 	struct drm_sched_job *s_job;
124 
125 	s_job = drm_sched_entity_queue_peek(entity);
126 	if (!s_job)
127 		return false;
128 
129 	/* If a job exceeds the credit limit, truncate it to the credit limit
130 	 * itself to guarantee forward progress.
131 	 */
132 	if (s_job->credits > sched->credit_limit) {
133 		dev_WARN(sched->dev,
134 			 "Jobs may not exceed the credit limit, truncate.\n");
135 		s_job->credits = sched->credit_limit;
136 	}
137 
138 	return drm_sched_available_credits(sched) >= s_job->credits;
139 }
140 
141 static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
142 							    const struct rb_node *b)
143 {
144 	struct drm_sched_entity *ent_a =  rb_entry((a), struct drm_sched_entity, rb_tree_node);
145 	struct drm_sched_entity *ent_b =  rb_entry((b), struct drm_sched_entity, rb_tree_node);
146 
147 	return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
148 }
149 
150 static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity,
151 					    struct drm_sched_rq *rq)
152 {
153 	if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
154 		rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
155 		RB_CLEAR_NODE(&entity->rb_tree_node);
156 	}
157 }
158 
159 void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
160 				     struct drm_sched_rq *rq,
161 				     ktime_t ts)
162 {
163 	/*
164 	 * Both locks need to be grabbed, one to protect from entity->rq change
165 	 * for entity from within concurrent drm_sched_entity_select_rq and the
166 	 * other to update the rb tree structure.
167 	 */
168 	lockdep_assert_held(&entity->lock);
169 	lockdep_assert_held(&rq->lock);
170 
171 	drm_sched_rq_remove_fifo_locked(entity, rq);
172 
173 	entity->oldest_job_waiting = ts;
174 
175 	rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root,
176 		      drm_sched_entity_compare_before);
177 }
178 
179 /**
180  * drm_sched_rq_init - initialize a given run queue struct
181  *
182  * @sched: scheduler instance to associate with this run queue
183  * @rq: scheduler run queue
184  *
185  * Initializes a scheduler runqueue.
186  */
187 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
188 			      struct drm_sched_rq *rq)
189 {
190 	spin_lock_init(&rq->lock);
191 	INIT_LIST_HEAD(&rq->entities);
192 	rq->rb_tree_root = RB_ROOT_CACHED;
193 	rq->current_entity = NULL;
194 	rq->sched = sched;
195 }
196 
197 /**
198  * drm_sched_rq_add_entity - add an entity
199  *
200  * @rq: scheduler run queue
201  * @entity: scheduler entity
202  *
203  * Adds a scheduler entity to the run queue.
204  */
205 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
206 			     struct drm_sched_entity *entity)
207 {
208 	lockdep_assert_held(&entity->lock);
209 	lockdep_assert_held(&rq->lock);
210 
211 	if (!list_empty(&entity->list))
212 		return;
213 
214 	atomic_inc(rq->sched->score);
215 	list_add_tail(&entity->list, &rq->entities);
216 }
217 
218 /**
219  * drm_sched_rq_remove_entity - remove an entity
220  *
221  * @rq: scheduler run queue
222  * @entity: scheduler entity
223  *
224  * Removes a scheduler entity from the run queue.
225  */
226 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
227 				struct drm_sched_entity *entity)
228 {
229 	lockdep_assert_held(&entity->lock);
230 
231 	if (list_empty(&entity->list))
232 		return;
233 
234 	spin_lock(&rq->lock);
235 
236 	atomic_dec(rq->sched->score);
237 	list_del_init(&entity->list);
238 
239 	if (rq->current_entity == entity)
240 		rq->current_entity = NULL;
241 
242 	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
243 		drm_sched_rq_remove_fifo_locked(entity, rq);
244 
245 	spin_unlock(&rq->lock);
246 }
247 
248 /**
249  * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
250  *
251  * @sched: the gpu scheduler
252  * @rq: scheduler run queue to check.
253  *
254  * Try to find the next ready entity.
255  *
256  * Return an entity if one is found; return an error-pointer (!NULL) if an
257  * entity was ready, but the scheduler had insufficient credits to accommodate
258  * its job; return NULL, if no ready entity was found.
259  */
260 static struct drm_sched_entity *
261 drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
262 			      struct drm_sched_rq *rq)
263 {
264 	struct drm_sched_entity *entity;
265 
266 	spin_lock(&rq->lock);
267 
268 	entity = rq->current_entity;
269 	if (entity) {
270 		list_for_each_entry_continue(entity, &rq->entities, list) {
271 			if (drm_sched_entity_is_ready(entity)) {
272 				/* If we can't queue yet, preserve the current
273 				 * entity in terms of fairness.
274 				 */
275 				if (!drm_sched_can_queue(sched, entity)) {
276 					spin_unlock(&rq->lock);
277 					return ERR_PTR(-ENOSPC);
278 				}
279 
280 				rq->current_entity = entity;
281 				reinit_completion(&entity->entity_idle);
282 				spin_unlock(&rq->lock);
283 				return entity;
284 			}
285 		}
286 	}
287 
288 	list_for_each_entry(entity, &rq->entities, list) {
289 		if (drm_sched_entity_is_ready(entity)) {
290 			/* If we can't queue yet, preserve the current entity in
291 			 * terms of fairness.
292 			 */
293 			if (!drm_sched_can_queue(sched, entity)) {
294 				spin_unlock(&rq->lock);
295 				return ERR_PTR(-ENOSPC);
296 			}
297 
298 			rq->current_entity = entity;
299 			reinit_completion(&entity->entity_idle);
300 			spin_unlock(&rq->lock);
301 			return entity;
302 		}
303 
304 		if (entity == rq->current_entity)
305 			break;
306 	}
307 
308 	spin_unlock(&rq->lock);
309 
310 	return NULL;
311 }
312 
313 /**
314  * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
315  *
316  * @sched: the gpu scheduler
317  * @rq: scheduler run queue to check.
318  *
319  * Find oldest waiting ready entity.
320  *
321  * Return an entity if one is found; return an error-pointer (!NULL) if an
322  * entity was ready, but the scheduler had insufficient credits to accommodate
323  * its job; return NULL, if no ready entity was found.
324  */
325 static struct drm_sched_entity *
326 drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched,
327 				struct drm_sched_rq *rq)
328 {
329 	struct rb_node *rb;
330 
331 	spin_lock(&rq->lock);
332 	for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
333 		struct drm_sched_entity *entity;
334 
335 		entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
336 		if (drm_sched_entity_is_ready(entity)) {
337 			/* If we can't queue yet, preserve the current entity in
338 			 * terms of fairness.
339 			 */
340 			if (!drm_sched_can_queue(sched, entity)) {
341 				spin_unlock(&rq->lock);
342 				return ERR_PTR(-ENOSPC);
343 			}
344 
345 			reinit_completion(&entity->entity_idle);
346 			break;
347 		}
348 	}
349 	spin_unlock(&rq->lock);
350 
351 	return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
352 }
353 
354 /**
355  * drm_sched_run_job_queue - enqueue run-job work
356  * @sched: scheduler instance
357  */
358 static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
359 {
360 	if (!READ_ONCE(sched->pause_submit))
361 		queue_work(sched->submit_wq, &sched->work_run_job);
362 }
363 
364 /**
365  * __drm_sched_run_free_queue - enqueue free-job work
366  * @sched: scheduler instance
367  */
368 static void __drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
369 {
370 	if (!READ_ONCE(sched->pause_submit))
371 		queue_work(sched->submit_wq, &sched->work_free_job);
372 }
373 
374 /**
375  * drm_sched_run_free_queue - enqueue free-job work if ready
376  * @sched: scheduler instance
377  */
378 static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
379 {
380 	struct drm_sched_job *job;
381 
382 	spin_lock(&sched->job_list_lock);
383 	job = list_first_entry_or_null(&sched->pending_list,
384 				       struct drm_sched_job, list);
385 	if (job && dma_fence_is_signaled(&job->s_fence->finished))
386 		__drm_sched_run_free_queue(sched);
387 	spin_unlock(&sched->job_list_lock);
388 }
389 
390 /**
391  * drm_sched_job_done - complete a job
392  * @s_job: pointer to the job which is done
393  *
394  * Finish the job's fence and resubmit the work items.
395  */
396 static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
397 {
398 	struct drm_sched_fence *s_fence = s_job->s_fence;
399 	struct drm_gpu_scheduler *sched = s_fence->sched;
400 
401 	atomic_sub(s_job->credits, &sched->credit_count);
402 	atomic_dec(sched->score);
403 
404 	trace_drm_sched_process_job(s_fence);
405 
406 	dma_fence_get(&s_fence->finished);
407 	drm_sched_fence_finished(s_fence, result);
408 	dma_fence_put(&s_fence->finished);
409 	__drm_sched_run_free_queue(sched);
410 }
411 
412 /**
413  * drm_sched_job_done_cb - the callback for a done job
414  * @f: fence
415  * @cb: fence callbacks
416  */
417 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
418 {
419 	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
420 
421 	drm_sched_job_done(s_job, f->error);
422 }
423 
424 /**
425  * drm_sched_start_timeout - start timeout for reset worker
426  *
427  * @sched: scheduler instance to start the worker for
428  *
429  * Start the timeout for the given scheduler.
430  */
431 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
432 {
433 	lockdep_assert_held(&sched->job_list_lock);
434 
435 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
436 	    !list_empty(&sched->pending_list))
437 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
438 }
439 
440 static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
441 {
442 	spin_lock(&sched->job_list_lock);
443 	drm_sched_start_timeout(sched);
444 	spin_unlock(&sched->job_list_lock);
445 }
446 
447 /**
448  * drm_sched_tdr_queue_imm: - immediately start job timeout handler
449  *
450  * @sched: scheduler for which the timeout handling should be started.
451  *
452  * Start timeout handling immediately for the named scheduler.
453  */
454 void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched)
455 {
456 	spin_lock(&sched->job_list_lock);
457 	sched->timeout = 0;
458 	drm_sched_start_timeout(sched);
459 	spin_unlock(&sched->job_list_lock);
460 }
461 EXPORT_SYMBOL(drm_sched_tdr_queue_imm);
462 
463 /**
464  * drm_sched_fault - immediately start timeout handler
465  *
466  * @sched: scheduler where the timeout handling should be started.
467  *
468  * Start timeout handling immediately when the driver detects a hardware fault.
469  */
470 void drm_sched_fault(struct drm_gpu_scheduler *sched)
471 {
472 	if (sched->timeout_wq)
473 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
474 }
475 EXPORT_SYMBOL(drm_sched_fault);
476 
477 /**
478  * drm_sched_suspend_timeout - Suspend scheduler job timeout
479  *
480  * @sched: scheduler instance for which to suspend the timeout
481  *
482  * Suspend the delayed work timeout for the scheduler. This is done by
483  * modifying the delayed work timeout to an arbitrary large value,
484  * MAX_SCHEDULE_TIMEOUT in this case.
485  *
486  * Returns the timeout remaining
487  *
488  */
489 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
490 {
491 	unsigned long sched_timeout, now = jiffies;
492 
493 	sched_timeout = sched->work_tdr.timer.expires;
494 
495 	/*
496 	 * Modify the timeout to an arbitrarily large value. This also prevents
497 	 * the timeout to be restarted when new submissions arrive
498 	 */
499 	if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
500 			&& time_after(sched_timeout, now))
501 		return sched_timeout - now;
502 	else
503 		return sched->timeout;
504 }
505 EXPORT_SYMBOL(drm_sched_suspend_timeout);
506 
507 /**
508  * drm_sched_resume_timeout - Resume scheduler job timeout
509  *
510  * @sched: scheduler instance for which to resume the timeout
511  * @remaining: remaining timeout
512  *
513  * Resume the delayed work timeout for the scheduler.
514  */
515 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
516 		unsigned long remaining)
517 {
518 	spin_lock(&sched->job_list_lock);
519 
520 	if (list_empty(&sched->pending_list))
521 		cancel_delayed_work(&sched->work_tdr);
522 	else
523 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
524 
525 	spin_unlock(&sched->job_list_lock);
526 }
527 EXPORT_SYMBOL(drm_sched_resume_timeout);
528 
529 static void drm_sched_job_begin(struct drm_sched_job *s_job)
530 {
531 	struct drm_gpu_scheduler *sched = s_job->sched;
532 
533 	spin_lock(&sched->job_list_lock);
534 	list_add_tail(&s_job->list, &sched->pending_list);
535 	drm_sched_start_timeout(sched);
536 	spin_unlock(&sched->job_list_lock);
537 }
538 
539 static void drm_sched_job_timedout(struct work_struct *work)
540 {
541 	struct drm_gpu_scheduler *sched;
542 	struct drm_sched_job *job;
543 	enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
544 
545 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
546 
547 	/* Protects against concurrent deletion in drm_sched_get_finished_job */
548 	spin_lock(&sched->job_list_lock);
549 	job = list_first_entry_or_null(&sched->pending_list,
550 				       struct drm_sched_job, list);
551 
552 	if (job) {
553 		/*
554 		 * Remove the bad job so it cannot be freed by a concurrent
555 		 * &struct drm_sched_backend_ops.free_job. It will be
556 		 * reinserted after the scheduler's work items have been
557 		 * cancelled, at which point it's safe.
558 		 */
559 		list_del_init(&job->list);
560 		spin_unlock(&sched->job_list_lock);
561 
562 		status = job->sched->ops->timedout_job(job);
563 
564 		/*
565 		 * Guilty job did complete and hence needs to be manually removed
566 		 * See drm_sched_stop doc.
567 		 */
568 		if (sched->free_guilty) {
569 			job->sched->ops->free_job(job);
570 			sched->free_guilty = false;
571 		}
572 	} else {
573 		spin_unlock(&sched->job_list_lock);
574 	}
575 
576 	if (status != DRM_GPU_SCHED_STAT_ENODEV)
577 		drm_sched_start_timeout_unlocked(sched);
578 }
579 
580 /**
581  * drm_sched_stop - stop the scheduler
582  *
583  * @sched: scheduler instance
584  * @bad: job which caused the time out
585  *
586  * Stop the scheduler and also removes and frees all completed jobs.
587  * Note: bad job will not be freed as it might be used later and so it's
588  * callers responsibility to release it manually if it's not part of the
589  * pending list any more.
590  *
591  * This function is typically used for reset recovery (see the docu of
592  * drm_sched_backend_ops.timedout_job() for details). Do not call it for
593  * scheduler teardown, i.e., before calling drm_sched_fini().
594  */
595 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
596 {
597 	struct drm_sched_job *s_job, *tmp;
598 
599 	drm_sched_wqueue_stop(sched);
600 
601 	/*
602 	 * Reinsert back the bad job here - now it's safe as
603 	 * drm_sched_get_finished_job() cannot race against us and release the
604 	 * bad job at this point - we parked (waited for) any in progress
605 	 * (earlier) cleanups and drm_sched_get_finished_job() will not be
606 	 * called now until the scheduler's work items are submitted again.
607 	 */
608 	if (bad && bad->sched == sched)
609 		/*
610 		 * Add at the head of the queue to reflect it was the earliest
611 		 * job extracted.
612 		 */
613 		list_add(&bad->list, &sched->pending_list);
614 
615 	/*
616 	 * Iterate the job list from later to  earlier one and either deactive
617 	 * their HW callbacks or remove them from pending list if they already
618 	 * signaled.
619 	 * This iteration is thread safe as the scheduler's work items have been
620 	 * cancelled.
621 	 */
622 	list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
623 					 list) {
624 		if (s_job->s_fence->parent &&
625 		    dma_fence_remove_callback(s_job->s_fence->parent,
626 					      &s_job->cb)) {
627 			dma_fence_put(s_job->s_fence->parent);
628 			s_job->s_fence->parent = NULL;
629 			atomic_sub(s_job->credits, &sched->credit_count);
630 		} else {
631 			/*
632 			 * remove job from pending_list.
633 			 * Locking here is for concurrent resume timeout
634 			 */
635 			spin_lock(&sched->job_list_lock);
636 			list_del_init(&s_job->list);
637 			spin_unlock(&sched->job_list_lock);
638 
639 			/*
640 			 * Wait for job's HW fence callback to finish using s_job
641 			 * before releasing it.
642 			 *
643 			 * Job is still alive so fence refcount at least 1
644 			 */
645 			dma_fence_wait(&s_job->s_fence->finished, false);
646 
647 			/*
648 			 * We must keep bad job alive for later use during
649 			 * recovery by some of the drivers but leave a hint
650 			 * that the guilty job must be released.
651 			 */
652 			if (bad != s_job)
653 				sched->ops->free_job(s_job);
654 			else
655 				sched->free_guilty = true;
656 		}
657 	}
658 
659 	/*
660 	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
661 	 * avoids the pending timeout work in progress to fire right away after
662 	 * this TDR finished and before the newly restarted jobs had a
663 	 * chance to complete.
664 	 */
665 	cancel_delayed_work(&sched->work_tdr);
666 }
667 EXPORT_SYMBOL(drm_sched_stop);
668 
669 /**
670  * drm_sched_start - recover jobs after a reset
671  *
672  * @sched: scheduler instance
673  * @errno: error to set on the pending fences
674  *
675  * This function is typically used for reset recovery (see the docu of
676  * drm_sched_backend_ops.timedout_job() for details). Do not call it for
677  * scheduler startup. The scheduler itself is fully operational after
678  * drm_sched_init() succeeded.
679  */
680 void drm_sched_start(struct drm_gpu_scheduler *sched, int errno)
681 {
682 	struct drm_sched_job *s_job, *tmp;
683 
684 	/*
685 	 * Locking the list is not required here as the scheduler's work items
686 	 * are currently not running, so no new jobs are being inserted or
687 	 * removed. Also concurrent GPU recovers can't run in parallel.
688 	 */
689 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
690 		struct dma_fence *fence = s_job->s_fence->parent;
691 
692 		atomic_add(s_job->credits, &sched->credit_count);
693 
694 		if (!fence) {
695 			drm_sched_job_done(s_job, errno ?: -ECANCELED);
696 			continue;
697 		}
698 
699 		if (dma_fence_add_callback(fence, &s_job->cb,
700 					   drm_sched_job_done_cb))
701 			drm_sched_job_done(s_job, fence->error ?: errno);
702 	}
703 
704 	drm_sched_start_timeout_unlocked(sched);
705 	drm_sched_wqueue_start(sched);
706 }
707 EXPORT_SYMBOL(drm_sched_start);
708 
709 /**
710  * drm_sched_resubmit_jobs - Deprecated, don't use in new code!
711  *
712  * @sched: scheduler instance
713  *
714  * Re-submitting jobs was a concept AMD came up as cheap way to implement
715  * recovery after a job timeout.
716  *
717  * This turned out to be not working very well. First of all there are many
718  * problem with the dma_fence implementation and requirements. Either the
719  * implementation is risking deadlocks with core memory management or violating
720  * documented implementation details of the dma_fence object.
721  *
722  * Drivers can still save and restore their state for recovery operations, but
723  * we shouldn't make this a general scheduler feature around the dma_fence
724  * interface.
725  */
726 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
727 {
728 	struct drm_sched_job *s_job, *tmp;
729 	uint64_t guilty_context;
730 	bool found_guilty = false;
731 	struct dma_fence *fence;
732 
733 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
734 		struct drm_sched_fence *s_fence = s_job->s_fence;
735 
736 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
737 			found_guilty = true;
738 			guilty_context = s_job->s_fence->scheduled.context;
739 		}
740 
741 		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
742 			dma_fence_set_error(&s_fence->finished, -ECANCELED);
743 
744 		fence = sched->ops->run_job(s_job);
745 
746 		if (IS_ERR_OR_NULL(fence)) {
747 			if (IS_ERR(fence))
748 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
749 
750 			s_job->s_fence->parent = NULL;
751 		} else {
752 
753 			s_job->s_fence->parent = dma_fence_get(fence);
754 
755 			/* Drop for orignal kref_init */
756 			dma_fence_put(fence);
757 		}
758 	}
759 }
760 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
761 
762 /**
763  * drm_sched_job_init - init a scheduler job
764  * @job: scheduler job to init
765  * @entity: scheduler entity to use
766  * @credits: the number of credits this job contributes to the schedulers
767  * credit limit
768  * @owner: job owner for debugging
769  *
770  * Refer to drm_sched_entity_push_job() documentation
771  * for locking considerations.
772  *
773  * Drivers must make sure drm_sched_job_cleanup() if this function returns
774  * successfully, even when @job is aborted before drm_sched_job_arm() is called.
775  *
776  * Note that this function does not assign a valid value to each struct member
777  * of struct drm_sched_job. Take a look at that struct's documentation to see
778  * who sets which struct member with what lifetime.
779  *
780  * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
781  * has died, which can mean that there's no valid runqueue for a @entity.
782  * This function returns -ENOENT in this case (which probably should be -EIO as
783  * a more meanigful return value).
784  *
785  * Returns 0 for success, negative error code otherwise.
786  */
787 int drm_sched_job_init(struct drm_sched_job *job,
788 		       struct drm_sched_entity *entity,
789 		       u32 credits, void *owner)
790 {
791 	if (!entity->rq) {
792 		/* This will most likely be followed by missing frames
793 		 * or worse--a blank screen--leave a trail in the
794 		 * logs, so this can be debugged easier.
795 		 */
796 		dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__);
797 		return -ENOENT;
798 	}
799 
800 	if (unlikely(!credits)) {
801 		pr_err("*ERROR* %s: credits cannot be 0!\n", __func__);
802 		return -EINVAL;
803 	}
804 
805 	/*
806 	 * We don't know for sure how the user has allocated. Thus, zero the
807 	 * struct so that unallowed (i.e., too early) usage of pointers that
808 	 * this function does not set is guaranteed to lead to a NULL pointer
809 	 * exception instead of UB.
810 	 */
811 	memset(job, 0, sizeof(*job));
812 
813 	job->entity = entity;
814 	job->credits = credits;
815 	job->s_fence = drm_sched_fence_alloc(entity, owner);
816 	if (!job->s_fence)
817 		return -ENOMEM;
818 
819 	INIT_LIST_HEAD(&job->list);
820 
821 	xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
822 
823 	return 0;
824 }
825 EXPORT_SYMBOL(drm_sched_job_init);
826 
827 /**
828  * drm_sched_job_arm - arm a scheduler job for execution
829  * @job: scheduler job to arm
830  *
831  * This arms a scheduler job for execution. Specifically it initializes the
832  * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
833  * or other places that need to track the completion of this job. It also
834  * initializes sequence numbers, which are fundamental for fence ordering.
835  *
836  * Refer to drm_sched_entity_push_job() documentation for locking
837  * considerations.
838  *
839  * Once this function was called, you *must* submit @job with
840  * drm_sched_entity_push_job().
841  *
842  * This can only be called if drm_sched_job_init() succeeded.
843  */
844 void drm_sched_job_arm(struct drm_sched_job *job)
845 {
846 	struct drm_gpu_scheduler *sched;
847 	struct drm_sched_entity *entity = job->entity;
848 
849 	BUG_ON(!entity);
850 	drm_sched_entity_select_rq(entity);
851 	sched = entity->rq->sched;
852 
853 	job->sched = sched;
854 	job->s_priority = entity->priority;
855 	job->id = atomic64_inc_return(&sched->job_id_count);
856 
857 	drm_sched_fence_init(job->s_fence, job->entity);
858 }
859 EXPORT_SYMBOL(drm_sched_job_arm);
860 
861 /**
862  * drm_sched_job_add_dependency - adds the fence as a job dependency
863  * @job: scheduler job to add the dependencies to
864  * @fence: the dma_fence to add to the list of dependencies.
865  *
866  * Note that @fence is consumed in both the success and error cases.
867  *
868  * Returns:
869  * 0 on success, or an error on failing to expand the array.
870  */
871 int drm_sched_job_add_dependency(struct drm_sched_job *job,
872 				 struct dma_fence *fence)
873 {
874 	struct dma_fence *entry;
875 	unsigned long index;
876 	u32 id = 0;
877 	int ret;
878 
879 	if (!fence)
880 		return 0;
881 
882 	/* Deduplicate if we already depend on a fence from the same context.
883 	 * This lets the size of the array of deps scale with the number of
884 	 * engines involved, rather than the number of BOs.
885 	 */
886 	xa_for_each(&job->dependencies, index, entry) {
887 		if (entry->context != fence->context)
888 			continue;
889 
890 		if (dma_fence_is_later(fence, entry)) {
891 			dma_fence_put(entry);
892 			xa_store(&job->dependencies, index, fence, GFP_KERNEL);
893 		} else {
894 			dma_fence_put(fence);
895 		}
896 		return 0;
897 	}
898 
899 	ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
900 	if (ret != 0)
901 		dma_fence_put(fence);
902 
903 	return ret;
904 }
905 EXPORT_SYMBOL(drm_sched_job_add_dependency);
906 
907 /**
908  * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
909  * @job: scheduler job to add the dependencies to
910  * @file: drm file private pointer
911  * @handle: syncobj handle to lookup
912  * @point: timeline point
913  *
914  * This adds the fence matching the given syncobj to @job.
915  *
916  * Returns:
917  * 0 on success, or an error on failing to expand the array.
918  */
919 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
920 					 struct drm_file *file,
921 					 u32 handle,
922 					 u32 point)
923 {
924 	struct dma_fence *fence;
925 	int ret;
926 
927 	ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
928 	if (ret)
929 		return ret;
930 
931 	return drm_sched_job_add_dependency(job, fence);
932 }
933 EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
934 
935 /**
936  * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
937  * @job: scheduler job to add the dependencies to
938  * @resv: the dma_resv object to get the fences from
939  * @usage: the dma_resv_usage to use to filter the fences
940  *
941  * This adds all fences matching the given usage from @resv to @job.
942  * Must be called with the @resv lock held.
943  *
944  * Returns:
945  * 0 on success, or an error on failing to expand the array.
946  */
947 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
948 					struct dma_resv *resv,
949 					enum dma_resv_usage usage)
950 {
951 	struct dma_resv_iter cursor;
952 	struct dma_fence *fence;
953 	int ret;
954 
955 	dma_resv_assert_held(resv);
956 
957 	dma_resv_for_each_fence(&cursor, resv, usage, fence) {
958 		/* Make sure to grab an additional ref on the added fence */
959 		dma_fence_get(fence);
960 		ret = drm_sched_job_add_dependency(job, fence);
961 		if (ret) {
962 			dma_fence_put(fence);
963 			return ret;
964 		}
965 	}
966 	return 0;
967 }
968 EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
969 
970 /**
971  * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
972  *   dependencies
973  * @job: scheduler job to add the dependencies to
974  * @obj: the gem object to add new dependencies from.
975  * @write: whether the job might write the object (so we need to depend on
976  * shared fences in the reservation object).
977  *
978  * This should be called after drm_gem_lock_reservations() on your array of
979  * GEM objects used in the job but before updating the reservations with your
980  * own fences.
981  *
982  * Returns:
983  * 0 on success, or an error on failing to expand the array.
984  */
985 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
986 					    struct drm_gem_object *obj,
987 					    bool write)
988 {
989 	return drm_sched_job_add_resv_dependencies(job, obj->resv,
990 						   dma_resv_usage_rw(write));
991 }
992 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
993 
994 /**
995  * drm_sched_job_has_dependency - check whether fence is the job's dependency
996  * @job: scheduler job to check
997  * @fence: fence to look for
998  *
999  * Returns:
1000  * True if @fence is found within the job's dependencies, or otherwise false.
1001  */
1002 bool drm_sched_job_has_dependency(struct drm_sched_job *job,
1003 				  struct dma_fence *fence)
1004 {
1005 	struct dma_fence *f;
1006 	unsigned long index;
1007 
1008 	xa_for_each(&job->dependencies, index, f) {
1009 		if (f == fence)
1010 			return true;
1011 	}
1012 
1013 	return false;
1014 }
1015 EXPORT_SYMBOL(drm_sched_job_has_dependency);
1016 
1017 /**
1018  * drm_sched_job_cleanup - clean up scheduler job resources
1019  * @job: scheduler job to clean up
1020  *
1021  * Cleans up the resources allocated with drm_sched_job_init().
1022  *
1023  * Drivers should call this from their error unwind code if @job is aborted
1024  * before drm_sched_job_arm() is called.
1025  *
1026  * drm_sched_job_arm() is a point of no return since it initializes the fences
1027  * and their sequence number etc. Once that function has been called, you *must*
1028  * submit it with drm_sched_entity_push_job() and cannot simply abort it by
1029  * calling drm_sched_job_cleanup().
1030  *
1031  * This function should be called in the &drm_sched_backend_ops.free_job callback.
1032  */
1033 void drm_sched_job_cleanup(struct drm_sched_job *job)
1034 {
1035 	struct dma_fence *fence;
1036 	unsigned long index;
1037 
1038 	if (kref_read(&job->s_fence->finished.refcount)) {
1039 		/* The job has been processed by the scheduler, i.e.,
1040 		 * drm_sched_job_arm() and drm_sched_entity_push_job() have
1041 		 * been called.
1042 		 */
1043 		dma_fence_put(&job->s_fence->finished);
1044 	} else {
1045 		/* The job was aborted before it has been committed to be run;
1046 		 * notably, drm_sched_job_arm() has not been called.
1047 		 */
1048 		drm_sched_fence_free(job->s_fence);
1049 	}
1050 
1051 	job->s_fence = NULL;
1052 
1053 	xa_for_each(&job->dependencies, index, fence) {
1054 		dma_fence_put(fence);
1055 	}
1056 	xa_destroy(&job->dependencies);
1057 
1058 }
1059 EXPORT_SYMBOL(drm_sched_job_cleanup);
1060 
1061 /**
1062  * drm_sched_wakeup - Wake up the scheduler if it is ready to queue
1063  * @sched: scheduler instance
1064  *
1065  * Wake up the scheduler if we can queue jobs.
1066  */
1067 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
1068 {
1069 	drm_sched_run_job_queue(sched);
1070 }
1071 
1072 /**
1073  * drm_sched_select_entity - Select next entity to process
1074  *
1075  * @sched: scheduler instance
1076  *
1077  * Return an entity to process or NULL if none are found.
1078  *
1079  * Note, that we break out of the for-loop when "entity" is non-null, which can
1080  * also be an error-pointer--this assures we don't process lower priority
1081  * run-queues. See comments in the respectively called functions.
1082  */
1083 static struct drm_sched_entity *
1084 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
1085 {
1086 	struct drm_sched_entity *entity;
1087 	int i;
1088 
1089 	/* Start with the highest priority.
1090 	 */
1091 	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1092 		entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
1093 			drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
1094 			drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
1095 		if (entity)
1096 			break;
1097 	}
1098 
1099 	return IS_ERR(entity) ? NULL : entity;
1100 }
1101 
1102 /**
1103  * drm_sched_get_finished_job - fetch the next finished job to be destroyed
1104  *
1105  * @sched: scheduler instance
1106  *
1107  * Returns the next finished job from the pending list (if there is one)
1108  * ready for it to be destroyed.
1109  */
1110 static struct drm_sched_job *
1111 drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
1112 {
1113 	struct drm_sched_job *job, *next;
1114 
1115 	spin_lock(&sched->job_list_lock);
1116 
1117 	job = list_first_entry_or_null(&sched->pending_list,
1118 				       struct drm_sched_job, list);
1119 
1120 	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
1121 		/* remove job from pending_list */
1122 		list_del_init(&job->list);
1123 
1124 		/* cancel this job's TO timer */
1125 		cancel_delayed_work(&sched->work_tdr);
1126 		/* make the scheduled timestamp more accurate */
1127 		next = list_first_entry_or_null(&sched->pending_list,
1128 						typeof(*next), list);
1129 
1130 		if (next) {
1131 			if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
1132 				     &next->s_fence->scheduled.flags))
1133 				next->s_fence->scheduled.timestamp =
1134 					dma_fence_timestamp(&job->s_fence->finished);
1135 			/* start TO timer for next job */
1136 			drm_sched_start_timeout(sched);
1137 		}
1138 	} else {
1139 		job = NULL;
1140 	}
1141 
1142 	spin_unlock(&sched->job_list_lock);
1143 
1144 	return job;
1145 }
1146 
1147 /**
1148  * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
1149  * @sched_list: list of drm_gpu_schedulers
1150  * @num_sched_list: number of drm_gpu_schedulers in the sched_list
1151  *
1152  * Returns pointer of the sched with the least load or NULL if none of the
1153  * drm_gpu_schedulers are ready
1154  */
1155 struct drm_gpu_scheduler *
1156 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
1157 		     unsigned int num_sched_list)
1158 {
1159 	struct drm_gpu_scheduler *sched, *picked_sched = NULL;
1160 	int i;
1161 	unsigned int min_score = UINT_MAX, num_score;
1162 
1163 	for (i = 0; i < num_sched_list; ++i) {
1164 		sched = sched_list[i];
1165 
1166 		if (!sched->ready) {
1167 			DRM_WARN("scheduler %s is not ready, skipping",
1168 				 sched->name);
1169 			continue;
1170 		}
1171 
1172 		num_score = atomic_read(sched->score);
1173 		if (num_score < min_score) {
1174 			min_score = num_score;
1175 			picked_sched = sched;
1176 		}
1177 	}
1178 
1179 	return picked_sched;
1180 }
1181 EXPORT_SYMBOL(drm_sched_pick_best);
1182 
1183 /**
1184  * drm_sched_free_job_work - worker to call free_job
1185  *
1186  * @w: free job work
1187  */
1188 static void drm_sched_free_job_work(struct work_struct *w)
1189 {
1190 	struct drm_gpu_scheduler *sched =
1191 		container_of(w, struct drm_gpu_scheduler, work_free_job);
1192 	struct drm_sched_job *job;
1193 
1194 	job = drm_sched_get_finished_job(sched);
1195 	if (job)
1196 		sched->ops->free_job(job);
1197 
1198 	drm_sched_run_free_queue(sched);
1199 	drm_sched_run_job_queue(sched);
1200 }
1201 
1202 /**
1203  * drm_sched_run_job_work - worker to call run_job
1204  *
1205  * @w: run job work
1206  */
1207 static void drm_sched_run_job_work(struct work_struct *w)
1208 {
1209 	struct drm_gpu_scheduler *sched =
1210 		container_of(w, struct drm_gpu_scheduler, work_run_job);
1211 	struct drm_sched_entity *entity;
1212 	struct dma_fence *fence;
1213 	struct drm_sched_fence *s_fence;
1214 	struct drm_sched_job *sched_job;
1215 	int r;
1216 
1217 	/* Find entity with a ready job */
1218 	entity = drm_sched_select_entity(sched);
1219 	if (!entity)
1220 		return;	/* No more work */
1221 
1222 	sched_job = drm_sched_entity_pop_job(entity);
1223 	if (!sched_job) {
1224 		complete_all(&entity->entity_idle);
1225 		drm_sched_run_job_queue(sched);
1226 		return;
1227 	}
1228 
1229 	s_fence = sched_job->s_fence;
1230 
1231 	atomic_add(sched_job->credits, &sched->credit_count);
1232 	drm_sched_job_begin(sched_job);
1233 
1234 	trace_drm_run_job(sched_job, entity);
1235 	/*
1236 	 * The run_job() callback must by definition return a fence whose
1237 	 * refcount has been incremented for the scheduler already.
1238 	 */
1239 	fence = sched->ops->run_job(sched_job);
1240 	complete_all(&entity->entity_idle);
1241 	drm_sched_fence_scheduled(s_fence, fence);
1242 
1243 	if (!IS_ERR_OR_NULL(fence)) {
1244 		r = dma_fence_add_callback(fence, &sched_job->cb,
1245 					   drm_sched_job_done_cb);
1246 		if (r == -ENOENT)
1247 			drm_sched_job_done(sched_job, fence->error);
1248 		else if (r)
1249 			DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
1250 
1251 		dma_fence_put(fence);
1252 	} else {
1253 		drm_sched_job_done(sched_job, IS_ERR(fence) ?
1254 				   PTR_ERR(fence) : 0);
1255 	}
1256 
1257 	wake_up(&sched->job_scheduled);
1258 	drm_sched_run_job_queue(sched);
1259 }
1260 
1261 /**
1262  * drm_sched_init - Init a gpu scheduler instance
1263  *
1264  * @sched: scheduler instance
1265  * @args: scheduler initialization arguments
1266  *
1267  * Return 0 on success, otherwise error code.
1268  */
1269 int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_args *args)
1270 {
1271 	int i;
1272 
1273 	sched->ops = args->ops;
1274 	sched->credit_limit = args->credit_limit;
1275 	sched->name = args->name;
1276 	sched->timeout = args->timeout;
1277 	sched->hang_limit = args->hang_limit;
1278 	sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_wq;
1279 	sched->score = args->score ? args->score : &sched->_score;
1280 	sched->dev = args->dev;
1281 
1282 	if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) {
1283 		/* This is a gross violation--tell drivers what the  problem is.
1284 		 */
1285 		dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
1286 			__func__);
1287 		return -EINVAL;
1288 	} else if (sched->sched_rq) {
1289 		/* Not an error, but warn anyway so drivers can
1290 		 * fine-tune their DRM calling order, and return all
1291 		 * is good.
1292 		 */
1293 		dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__);
1294 		return 0;
1295 	}
1296 
1297 	if (args->submit_wq) {
1298 		sched->submit_wq = args->submit_wq;
1299 		sched->own_submit_wq = false;
1300 	} else {
1301 #ifdef CONFIG_LOCKDEP
1302 		sched->submit_wq = alloc_ordered_workqueue_lockdep_map(args->name,
1303 								       WQ_MEM_RECLAIM,
1304 								       &drm_sched_lockdep_map);
1305 #else
1306 		sched->submit_wq = alloc_ordered_workqueue(args->name, WQ_MEM_RECLAIM);
1307 #endif
1308 		if (!sched->submit_wq)
1309 			return -ENOMEM;
1310 
1311 		sched->own_submit_wq = true;
1312 	}
1313 
1314 	sched->sched_rq = kmalloc_array(args->num_rqs, sizeof(*sched->sched_rq),
1315 					GFP_KERNEL | __GFP_ZERO);
1316 	if (!sched->sched_rq)
1317 		goto Out_check_own;
1318 	sched->num_rqs = args->num_rqs;
1319 	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1320 		sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
1321 		if (!sched->sched_rq[i])
1322 			goto Out_unroll;
1323 		drm_sched_rq_init(sched, sched->sched_rq[i]);
1324 	}
1325 
1326 	init_waitqueue_head(&sched->job_scheduled);
1327 	INIT_LIST_HEAD(&sched->pending_list);
1328 	spin_lock_init(&sched->job_list_lock);
1329 	atomic_set(&sched->credit_count, 0);
1330 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1331 	INIT_WORK(&sched->work_run_job, drm_sched_run_job_work);
1332 	INIT_WORK(&sched->work_free_job, drm_sched_free_job_work);
1333 	atomic_set(&sched->_score, 0);
1334 	atomic64_set(&sched->job_id_count, 0);
1335 	sched->pause_submit = false;
1336 
1337 	sched->ready = true;
1338 	return 0;
1339 Out_unroll:
1340 	for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
1341 		kfree(sched->sched_rq[i]);
1342 
1343 	kfree(sched->sched_rq);
1344 	sched->sched_rq = NULL;
1345 Out_check_own:
1346 	if (sched->own_submit_wq)
1347 		destroy_workqueue(sched->submit_wq);
1348 	dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
1349 	return -ENOMEM;
1350 }
1351 EXPORT_SYMBOL(drm_sched_init);
1352 
1353 /**
1354  * drm_sched_fini - Destroy a gpu scheduler
1355  *
1356  * @sched: scheduler instance
1357  *
1358  * Tears down and cleans up the scheduler.
1359  *
1360  * This stops submission of new jobs to the hardware through
1361  * drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job()
1362  * will not be called for all jobs still in drm_gpu_scheduler.pending_list.
1363  * There is no solution for this currently. Thus, it is up to the driver to make
1364  * sure that:
1365  *
1366  *  a) drm_sched_fini() is only called after for all submitted jobs
1367  *     drm_sched_backend_ops.free_job() has been called or that
1368  *  b) the jobs for which drm_sched_backend_ops.free_job() has not been called
1369  *     after drm_sched_fini() ran are freed manually.
1370  *
1371  * FIXME: Take care of the above problem and prevent this function from leaking
1372  * the jobs in drm_gpu_scheduler.pending_list under any circumstances.
1373  */
1374 void drm_sched_fini(struct drm_gpu_scheduler *sched)
1375 {
1376 	struct drm_sched_entity *s_entity;
1377 	int i;
1378 
1379 	drm_sched_wqueue_stop(sched);
1380 
1381 	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1382 		struct drm_sched_rq *rq = sched->sched_rq[i];
1383 
1384 		spin_lock(&rq->lock);
1385 		list_for_each_entry(s_entity, &rq->entities, list)
1386 			/*
1387 			 * Prevents reinsertion and marks job_queue as idle,
1388 			 * it will be removed from the rq in drm_sched_entity_fini()
1389 			 * eventually
1390 			 */
1391 			s_entity->stopped = true;
1392 		spin_unlock(&rq->lock);
1393 		kfree(sched->sched_rq[i]);
1394 	}
1395 
1396 	/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1397 	wake_up_all(&sched->job_scheduled);
1398 
1399 	/* Confirm no work left behind accessing device structures */
1400 	cancel_delayed_work_sync(&sched->work_tdr);
1401 
1402 	if (sched->own_submit_wq)
1403 		destroy_workqueue(sched->submit_wq);
1404 	sched->ready = false;
1405 	kfree(sched->sched_rq);
1406 	sched->sched_rq = NULL;
1407 }
1408 EXPORT_SYMBOL(drm_sched_fini);
1409 
1410 /**
1411  * drm_sched_increase_karma - Update sched_entity guilty flag
1412  *
1413  * @bad: The job guilty of time out
1414  *
1415  * Increment on every hang caused by the 'bad' job. If this exceeds the hang
1416  * limit of the scheduler then the respective sched entity is marked guilty and
1417  * jobs from it will not be scheduled further
1418  */
1419 void drm_sched_increase_karma(struct drm_sched_job *bad)
1420 {
1421 	int i;
1422 	struct drm_sched_entity *tmp;
1423 	struct drm_sched_entity *entity;
1424 	struct drm_gpu_scheduler *sched = bad->sched;
1425 
1426 	/* don't change @bad's karma if it's from KERNEL RQ,
1427 	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1428 	 * corrupt but keep in mind that kernel jobs always considered good.
1429 	 */
1430 	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1431 		atomic_inc(&bad->karma);
1432 
1433 		for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
1434 			struct drm_sched_rq *rq = sched->sched_rq[i];
1435 
1436 			spin_lock(&rq->lock);
1437 			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1438 				if (bad->s_fence->scheduled.context ==
1439 				    entity->fence_context) {
1440 					if (entity->guilty)
1441 						atomic_set(entity->guilty, 1);
1442 					break;
1443 				}
1444 			}
1445 			spin_unlock(&rq->lock);
1446 			if (&entity->list != &rq->entities)
1447 				break;
1448 		}
1449 	}
1450 }
1451 EXPORT_SYMBOL(drm_sched_increase_karma);
1452 
1453 /**
1454  * drm_sched_wqueue_ready - Is the scheduler ready for submission
1455  *
1456  * @sched: scheduler instance
1457  *
1458  * Returns true if submission is ready
1459  */
1460 bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
1461 {
1462 	return sched->ready;
1463 }
1464 EXPORT_SYMBOL(drm_sched_wqueue_ready);
1465 
1466 /**
1467  * drm_sched_wqueue_stop - stop scheduler submission
1468  * @sched: scheduler instance
1469  *
1470  * Stops the scheduler from pulling new jobs from entities. It also stops
1471  * freeing jobs automatically through drm_sched_backend_ops.free_job().
1472  */
1473 void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
1474 {
1475 	WRITE_ONCE(sched->pause_submit, true);
1476 	cancel_work_sync(&sched->work_run_job);
1477 	cancel_work_sync(&sched->work_free_job);
1478 }
1479 EXPORT_SYMBOL(drm_sched_wqueue_stop);
1480 
1481 /**
1482  * drm_sched_wqueue_start - start scheduler submission
1483  * @sched: scheduler instance
1484  *
1485  * Restarts the scheduler after drm_sched_wqueue_stop() has stopped it.
1486  *
1487  * This function is not necessary for 'conventional' startup. The scheduler is
1488  * fully operational after drm_sched_init() succeeded.
1489  */
1490 void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
1491 {
1492 	WRITE_ONCE(sched->pause_submit, false);
1493 	queue_work(sched->submit_wq, &sched->work_run_job);
1494 	queue_work(sched->submit_wq, &sched->work_free_job);
1495 }
1496 EXPORT_SYMBOL(drm_sched_wqueue_start);
1497