xref: /linux/drivers/gpu/drm/scheduler/sched_main.c (revision d8684ae1cdcf848d21e00bc0e0de821d694a207b)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in an entity are always scheduled in the order in which they were pushed.
45  *
46  * Note that once a job was taken from the entities queue and pushed to the
47  * hardware, i.e. the pending queue, the entity must not be referenced anymore
48  * through the jobs entity pointer.
49  */
50 
51 /**
52  * DOC: Flow Control
53  *
54  * The DRM GPU scheduler provides a flow control mechanism to regulate the rate
55  * in which the jobs fetched from scheduler entities are executed.
56  *
57  * In this context the &drm_gpu_scheduler keeps track of a driver specified
58  * credit limit representing the capacity of this scheduler and a credit count;
59  * every &drm_sched_job carries a driver specified number of credits.
60  *
61  * Once a job is executed (but not yet finished), the job's credits contribute
62  * to the scheduler's credit count until the job is finished. If by executing
63  * one more job the scheduler's credit count would exceed the scheduler's
64  * credit limit, the job won't be executed. Instead, the scheduler will wait
65  * until the credit count has decreased enough to not overflow its credit limit.
66  * This implies waiting for previously executed jobs.
67  */
68 
69 #include <linux/export.h>
70 #include <linux/wait.h>
71 #include <linux/sched.h>
72 #include <linux/completion.h>
73 #include <linux/dma-resv.h>
74 #include <uapi/linux/sched/types.h>
75 
76 #include <drm/drm_print.h>
77 #include <drm/drm_gem.h>
78 #include <drm/drm_syncobj.h>
79 #include <drm/gpu_scheduler.h>
80 #include <drm/spsc_queue.h>
81 
82 #include "sched_internal.h"
83 
84 #define CREATE_TRACE_POINTS
85 #include "gpu_scheduler_trace.h"
86 
87 int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
88 
89 /**
90  * DOC: sched_policy (int)
91  * Used to override default entities scheduling policy in a run queue.
92  */
93 MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
94 module_param_named(sched_policy, drm_sched_policy, int, 0444);
95 
96 static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
97 {
98 	u32 credits;
99 
100 	WARN_ON(check_sub_overflow(sched->credit_limit,
101 				   atomic_read(&sched->credit_count),
102 				   &credits));
103 
104 	return credits;
105 }
106 
107 /**
108  * drm_sched_can_queue -- Can we queue more to the hardware?
109  * @sched: scheduler instance
110  * @entity: the scheduler entity
111  *
112  * Return true if we can push at least one more job from @entity, false
113  * otherwise.
114  */
115 static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
116 				struct drm_sched_entity *entity)
117 {
118 	struct drm_sched_job *s_job;
119 
120 	s_job = drm_sched_entity_queue_peek(entity);
121 	if (!s_job)
122 		return false;
123 
124 	/* If a job exceeds the credit limit, truncate it to the credit limit
125 	 * itself to guarantee forward progress.
126 	 */
127 	if (s_job->credits > sched->credit_limit) {
128 		dev_WARN(sched->dev,
129 			 "Jobs may not exceed the credit limit, truncate.\n");
130 		s_job->credits = sched->credit_limit;
131 	}
132 
133 	return drm_sched_available_credits(sched) >= s_job->credits;
134 }
135 
136 static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
137 							    const struct rb_node *b)
138 {
139 	struct drm_sched_entity *ent_a =  rb_entry((a), struct drm_sched_entity, rb_tree_node);
140 	struct drm_sched_entity *ent_b =  rb_entry((b), struct drm_sched_entity, rb_tree_node);
141 
142 	return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
143 }
144 
145 static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity,
146 					    struct drm_sched_rq *rq)
147 {
148 	if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
149 		rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
150 		RB_CLEAR_NODE(&entity->rb_tree_node);
151 	}
152 }
153 
154 void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
155 				     struct drm_sched_rq *rq,
156 				     ktime_t ts)
157 {
158 	/*
159 	 * Both locks need to be grabbed, one to protect from entity->rq change
160 	 * for entity from within concurrent drm_sched_entity_select_rq and the
161 	 * other to update the rb tree structure.
162 	 */
163 	lockdep_assert_held(&entity->lock);
164 	lockdep_assert_held(&rq->lock);
165 
166 	drm_sched_rq_remove_fifo_locked(entity, rq);
167 
168 	entity->oldest_job_waiting = ts;
169 
170 	rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root,
171 		      drm_sched_entity_compare_before);
172 }
173 
174 /**
175  * drm_sched_rq_init - initialize a given run queue struct
176  *
177  * @sched: scheduler instance to associate with this run queue
178  * @rq: scheduler run queue
179  *
180  * Initializes a scheduler runqueue.
181  */
182 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
183 			      struct drm_sched_rq *rq)
184 {
185 	spin_lock_init(&rq->lock);
186 	INIT_LIST_HEAD(&rq->entities);
187 	rq->rb_tree_root = RB_ROOT_CACHED;
188 	rq->current_entity = NULL;
189 	rq->sched = sched;
190 }
191 
192 /**
193  * drm_sched_rq_add_entity - add an entity
194  *
195  * @rq: scheduler run queue
196  * @entity: scheduler entity
197  *
198  * Adds a scheduler entity to the run queue.
199  */
200 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
201 			     struct drm_sched_entity *entity)
202 {
203 	lockdep_assert_held(&entity->lock);
204 	lockdep_assert_held(&rq->lock);
205 
206 	if (!list_empty(&entity->list))
207 		return;
208 
209 	atomic_inc(rq->sched->score);
210 	list_add_tail(&entity->list, &rq->entities);
211 }
212 
213 /**
214  * drm_sched_rq_remove_entity - remove an entity
215  *
216  * @rq: scheduler run queue
217  * @entity: scheduler entity
218  *
219  * Removes a scheduler entity from the run queue.
220  */
221 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
222 				struct drm_sched_entity *entity)
223 {
224 	lockdep_assert_held(&entity->lock);
225 
226 	if (list_empty(&entity->list))
227 		return;
228 
229 	spin_lock(&rq->lock);
230 
231 	atomic_dec(rq->sched->score);
232 	list_del_init(&entity->list);
233 
234 	if (rq->current_entity == entity)
235 		rq->current_entity = NULL;
236 
237 	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
238 		drm_sched_rq_remove_fifo_locked(entity, rq);
239 
240 	spin_unlock(&rq->lock);
241 }
242 
243 /**
244  * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
245  *
246  * @sched: the gpu scheduler
247  * @rq: scheduler run queue to check.
248  *
249  * Try to find the next ready entity.
250  *
251  * Return an entity if one is found; return an error-pointer (!NULL) if an
252  * entity was ready, but the scheduler had insufficient credits to accommodate
253  * its job; return NULL, if no ready entity was found.
254  */
255 static struct drm_sched_entity *
256 drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
257 			      struct drm_sched_rq *rq)
258 {
259 	struct drm_sched_entity *entity;
260 
261 	spin_lock(&rq->lock);
262 
263 	entity = rq->current_entity;
264 	if (entity) {
265 		list_for_each_entry_continue(entity, &rq->entities, list) {
266 			if (drm_sched_entity_is_ready(entity))
267 				goto found;
268 		}
269 	}
270 
271 	list_for_each_entry(entity, &rq->entities, list) {
272 		if (drm_sched_entity_is_ready(entity))
273 			goto found;
274 
275 		if (entity == rq->current_entity)
276 			break;
277 	}
278 
279 	spin_unlock(&rq->lock);
280 
281 	return NULL;
282 
283 found:
284 	if (!drm_sched_can_queue(sched, entity)) {
285 		/*
286 		 * If scheduler cannot take more jobs signal the caller to not
287 		 * consider lower priority queues.
288 		 */
289 		entity = ERR_PTR(-ENOSPC);
290 	} else {
291 		rq->current_entity = entity;
292 		reinit_completion(&entity->entity_idle);
293 	}
294 
295 	spin_unlock(&rq->lock);
296 
297 	return entity;
298 }
299 
300 /**
301  * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
302  *
303  * @sched: the gpu scheduler
304  * @rq: scheduler run queue to check.
305  *
306  * Find oldest waiting ready entity.
307  *
308  * Return an entity if one is found; return an error-pointer (!NULL) if an
309  * entity was ready, but the scheduler had insufficient credits to accommodate
310  * its job; return NULL, if no ready entity was found.
311  */
312 static struct drm_sched_entity *
313 drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched,
314 				struct drm_sched_rq *rq)
315 {
316 	struct rb_node *rb;
317 
318 	spin_lock(&rq->lock);
319 	for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
320 		struct drm_sched_entity *entity;
321 
322 		entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
323 		if (drm_sched_entity_is_ready(entity)) {
324 			/* If we can't queue yet, preserve the current entity in
325 			 * terms of fairness.
326 			 */
327 			if (!drm_sched_can_queue(sched, entity)) {
328 				spin_unlock(&rq->lock);
329 				return ERR_PTR(-ENOSPC);
330 			}
331 
332 			reinit_completion(&entity->entity_idle);
333 			break;
334 		}
335 	}
336 	spin_unlock(&rq->lock);
337 
338 	return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
339 }
340 
341 /**
342  * drm_sched_run_job_queue - enqueue run-job work
343  * @sched: scheduler instance
344  */
345 static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
346 {
347 	if (!drm_sched_is_stopped(sched))
348 		queue_work(sched->submit_wq, &sched->work_run_job);
349 }
350 
351 /**
352  * drm_sched_run_free_queue - enqueue free-job work
353  * @sched: scheduler instance
354  */
355 static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
356 {
357 	if (!drm_sched_is_stopped(sched))
358 		queue_work(sched->submit_wq, &sched->work_free_job);
359 }
360 
361 /**
362  * drm_sched_job_done - complete a job
363  * @s_job: pointer to the job which is done
364  *
365  * Finish the job's fence and resubmit the work items.
366  */
367 static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
368 {
369 	struct drm_sched_fence *s_fence = s_job->s_fence;
370 	struct drm_gpu_scheduler *sched = s_fence->sched;
371 
372 	atomic_sub(s_job->credits, &sched->credit_count);
373 	atomic_dec(sched->score);
374 
375 	trace_drm_sched_job_done(s_fence);
376 
377 	dma_fence_get(&s_fence->finished);
378 	drm_sched_fence_finished(s_fence, result);
379 	dma_fence_put(&s_fence->finished);
380 	drm_sched_run_free_queue(sched);
381 }
382 
383 /**
384  * drm_sched_job_done_cb - the callback for a done job
385  * @f: fence
386  * @cb: fence callbacks
387  */
388 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
389 {
390 	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
391 
392 	drm_sched_job_done(s_job, f->error);
393 }
394 
395 /**
396  * drm_sched_start_timeout - start timeout for reset worker
397  *
398  * @sched: scheduler instance to start the worker for
399  *
400  * Start the timeout for the given scheduler.
401  */
402 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
403 {
404 	lockdep_assert_held(&sched->job_list_lock);
405 
406 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
407 	    !list_empty(&sched->pending_list))
408 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
409 }
410 
411 static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
412 {
413 	spin_lock(&sched->job_list_lock);
414 	drm_sched_start_timeout(sched);
415 	spin_unlock(&sched->job_list_lock);
416 }
417 
418 /**
419  * drm_sched_tdr_queue_imm: - immediately start job timeout handler
420  *
421  * @sched: scheduler for which the timeout handling should be started.
422  *
423  * Start timeout handling immediately for the named scheduler.
424  */
425 void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched)
426 {
427 	spin_lock(&sched->job_list_lock);
428 	sched->timeout = 0;
429 	drm_sched_start_timeout(sched);
430 	spin_unlock(&sched->job_list_lock);
431 }
432 EXPORT_SYMBOL(drm_sched_tdr_queue_imm);
433 
434 /**
435  * drm_sched_fault - immediately start timeout handler
436  *
437  * @sched: scheduler where the timeout handling should be started.
438  *
439  * Start timeout handling immediately when the driver detects a hardware fault.
440  */
441 void drm_sched_fault(struct drm_gpu_scheduler *sched)
442 {
443 	if (sched->timeout_wq)
444 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
445 }
446 EXPORT_SYMBOL(drm_sched_fault);
447 
448 /**
449  * drm_sched_suspend_timeout - Suspend scheduler job timeout
450  *
451  * @sched: scheduler instance for which to suspend the timeout
452  *
453  * Suspend the delayed work timeout for the scheduler. This is done by
454  * modifying the delayed work timeout to an arbitrary large value,
455  * MAX_SCHEDULE_TIMEOUT in this case.
456  *
457  * Returns the timeout remaining
458  *
459  */
460 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
461 {
462 	unsigned long sched_timeout, now = jiffies;
463 
464 	sched_timeout = sched->work_tdr.timer.expires;
465 
466 	/*
467 	 * Modify the timeout to an arbitrarily large value. This also prevents
468 	 * the timeout to be restarted when new submissions arrive
469 	 */
470 	if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
471 			&& time_after(sched_timeout, now))
472 		return sched_timeout - now;
473 	else
474 		return sched->timeout;
475 }
476 EXPORT_SYMBOL(drm_sched_suspend_timeout);
477 
478 /**
479  * drm_sched_resume_timeout - Resume scheduler job timeout
480  *
481  * @sched: scheduler instance for which to resume the timeout
482  * @remaining: remaining timeout
483  *
484  * Resume the delayed work timeout for the scheduler.
485  */
486 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
487 		unsigned long remaining)
488 {
489 	spin_lock(&sched->job_list_lock);
490 
491 	if (list_empty(&sched->pending_list))
492 		cancel_delayed_work(&sched->work_tdr);
493 	else
494 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
495 
496 	spin_unlock(&sched->job_list_lock);
497 }
498 EXPORT_SYMBOL(drm_sched_resume_timeout);
499 
500 static void drm_sched_job_begin(struct drm_sched_job *s_job)
501 {
502 	struct drm_gpu_scheduler *sched = s_job->sched;
503 
504 	spin_lock(&sched->job_list_lock);
505 	list_add_tail(&s_job->list, &sched->pending_list);
506 	drm_sched_start_timeout(sched);
507 	spin_unlock(&sched->job_list_lock);
508 }
509 
510 /**
511  * drm_sched_job_reinsert_on_false_timeout - reinsert the job on a false timeout
512  * @sched: scheduler instance
513  * @job: job to be reinserted on the pending list
514  *
515  * In the case of a "false timeout" - when a timeout occurs but the GPU isn't
516  * hung and is making progress, the scheduler must reinsert the job back into
517  * @sched->pending_list. Otherwise, the job and its resources won't be freed
518  * through the &struct drm_sched_backend_ops.free_job callback.
519  *
520  * This function must be used in "false timeout" cases only.
521  */
522 static void drm_sched_job_reinsert_on_false_timeout(struct drm_gpu_scheduler *sched,
523 						    struct drm_sched_job *job)
524 {
525 	spin_lock(&sched->job_list_lock);
526 	list_add(&job->list, &sched->pending_list);
527 
528 	/* After reinserting the job, the scheduler enqueues the free-job work
529 	 * again if ready. Otherwise, a signaled job could be added to the
530 	 * pending list, but never freed.
531 	 */
532 	drm_sched_run_free_queue(sched);
533 	spin_unlock(&sched->job_list_lock);
534 }
535 
536 static void drm_sched_job_timedout(struct work_struct *work)
537 {
538 	struct drm_gpu_scheduler *sched;
539 	struct drm_sched_job *job;
540 	enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_RESET;
541 
542 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
543 
544 	/* Protects against concurrent deletion in drm_sched_get_finished_job */
545 	spin_lock(&sched->job_list_lock);
546 	job = list_first_entry_or_null(&sched->pending_list,
547 				       struct drm_sched_job, list);
548 
549 	if (job) {
550 		/*
551 		 * Remove the bad job so it cannot be freed by a concurrent
552 		 * &struct drm_sched_backend_ops.free_job. It will be
553 		 * reinserted after the scheduler's work items have been
554 		 * cancelled, at which point it's safe.
555 		 */
556 		list_del_init(&job->list);
557 		spin_unlock(&sched->job_list_lock);
558 
559 		status = job->sched->ops->timedout_job(job);
560 
561 		/*
562 		 * Guilty job did complete and hence needs to be manually removed
563 		 * See drm_sched_stop doc.
564 		 */
565 		if (sched->free_guilty) {
566 			job->sched->ops->free_job(job);
567 			sched->free_guilty = false;
568 		}
569 
570 		if (status == DRM_GPU_SCHED_STAT_NO_HANG)
571 			drm_sched_job_reinsert_on_false_timeout(sched, job);
572 	} else {
573 		spin_unlock(&sched->job_list_lock);
574 	}
575 
576 	if (status != DRM_GPU_SCHED_STAT_ENODEV)
577 		drm_sched_start_timeout_unlocked(sched);
578 }
579 
580 /**
581  * drm_sched_stop - stop the scheduler
582  *
583  * @sched: scheduler instance
584  * @bad: job which caused the time out
585  *
586  * Stop the scheduler and also removes and frees all completed jobs.
587  * Note: bad job will not be freed as it might be used later and so it's
588  * callers responsibility to release it manually if it's not part of the
589  * pending list any more.
590  *
591  * This function is typically used for reset recovery (see the docu of
592  * drm_sched_backend_ops.timedout_job() for details). Do not call it for
593  * scheduler teardown, i.e., before calling drm_sched_fini().
594  *
595  * As it's only used for reset recovery, drivers must not call this function
596  * in their &struct drm_sched_backend_ops.timedout_job callback when they
597  * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG.
598  */
599 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
600 {
601 	struct drm_sched_job *s_job, *tmp;
602 
603 	drm_sched_wqueue_stop(sched);
604 
605 	/*
606 	 * Reinsert back the bad job here - now it's safe as
607 	 * drm_sched_get_finished_job() cannot race against us and release the
608 	 * bad job at this point - we parked (waited for) any in progress
609 	 * (earlier) cleanups and drm_sched_get_finished_job() will not be
610 	 * called now until the scheduler's work items are submitted again.
611 	 */
612 	if (bad && bad->sched == sched)
613 		/*
614 		 * Add at the head of the queue to reflect it was the earliest
615 		 * job extracted.
616 		 */
617 		list_add(&bad->list, &sched->pending_list);
618 
619 	/*
620 	 * Iterate the job list from later to  earlier one and either deactive
621 	 * their HW callbacks or remove them from pending list if they already
622 	 * signaled.
623 	 * This iteration is thread safe as the scheduler's work items have been
624 	 * cancelled.
625 	 */
626 	list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
627 					 list) {
628 		if (s_job->s_fence->parent &&
629 		    dma_fence_remove_callback(s_job->s_fence->parent,
630 					      &s_job->cb)) {
631 			dma_fence_put(s_job->s_fence->parent);
632 			s_job->s_fence->parent = NULL;
633 			atomic_sub(s_job->credits, &sched->credit_count);
634 		} else {
635 			/*
636 			 * remove job from pending_list.
637 			 * Locking here is for concurrent resume timeout
638 			 */
639 			spin_lock(&sched->job_list_lock);
640 			list_del_init(&s_job->list);
641 			spin_unlock(&sched->job_list_lock);
642 
643 			/*
644 			 * Wait for job's HW fence callback to finish using s_job
645 			 * before releasing it.
646 			 *
647 			 * Job is still alive so fence refcount at least 1
648 			 */
649 			dma_fence_wait(&s_job->s_fence->finished, false);
650 
651 			/*
652 			 * We must keep bad job alive for later use during
653 			 * recovery by some of the drivers but leave a hint
654 			 * that the guilty job must be released.
655 			 */
656 			if (bad != s_job)
657 				sched->ops->free_job(s_job);
658 			else
659 				sched->free_guilty = true;
660 		}
661 	}
662 
663 	/*
664 	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
665 	 * avoids the pending timeout work in progress to fire right away after
666 	 * this TDR finished and before the newly restarted jobs had a
667 	 * chance to complete.
668 	 */
669 	cancel_delayed_work(&sched->work_tdr);
670 }
671 EXPORT_SYMBOL(drm_sched_stop);
672 
673 /**
674  * drm_sched_start - recover jobs after a reset
675  *
676  * @sched: scheduler instance
677  * @errno: error to set on the pending fences
678  *
679  * This function is typically used for reset recovery (see the docu of
680  * drm_sched_backend_ops.timedout_job() for details). Do not call it for
681  * scheduler startup. The scheduler itself is fully operational after
682  * drm_sched_init() succeeded.
683  *
684  * As it's only used for reset recovery, drivers must not call this function
685  * in their &struct drm_sched_backend_ops.timedout_job callback when they
686  * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG.
687  */
688 void drm_sched_start(struct drm_gpu_scheduler *sched, int errno)
689 {
690 	struct drm_sched_job *s_job, *tmp;
691 
692 	/*
693 	 * Locking the list is not required here as the scheduler's work items
694 	 * are currently not running, so no new jobs are being inserted or
695 	 * removed. Also concurrent GPU recovers can't run in parallel.
696 	 */
697 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
698 		struct dma_fence *fence = s_job->s_fence->parent;
699 
700 		atomic_add(s_job->credits, &sched->credit_count);
701 
702 		if (!fence) {
703 			drm_sched_job_done(s_job, errno ?: -ECANCELED);
704 			continue;
705 		}
706 
707 		if (dma_fence_add_callback(fence, &s_job->cb,
708 					   drm_sched_job_done_cb))
709 			drm_sched_job_done(s_job, fence->error ?: errno);
710 	}
711 
712 	drm_sched_start_timeout_unlocked(sched);
713 	drm_sched_wqueue_start(sched);
714 }
715 EXPORT_SYMBOL(drm_sched_start);
716 
717 /**
718  * drm_sched_resubmit_jobs - Deprecated, don't use in new code!
719  *
720  * @sched: scheduler instance
721  *
722  * Re-submitting jobs was a concept AMD came up as cheap way to implement
723  * recovery after a job timeout.
724  *
725  * This turned out to be not working very well. First of all there are many
726  * problem with the dma_fence implementation and requirements. Either the
727  * implementation is risking deadlocks with core memory management or violating
728  * documented implementation details of the dma_fence object.
729  *
730  * Drivers can still save and restore their state for recovery operations, but
731  * we shouldn't make this a general scheduler feature around the dma_fence
732  * interface. The suggested driver-side replacement is to use
733  * drm_sched_for_each_pending_job() after stopping the scheduler and implement
734  * their own recovery operations.
735  */
736 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
737 {
738 	struct drm_sched_job *s_job, *tmp;
739 	uint64_t guilty_context;
740 	bool found_guilty = false;
741 	struct dma_fence *fence;
742 
743 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
744 		struct drm_sched_fence *s_fence = s_job->s_fence;
745 
746 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
747 			found_guilty = true;
748 			guilty_context = s_job->s_fence->scheduled.context;
749 		}
750 
751 		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
752 			dma_fence_set_error(&s_fence->finished, -ECANCELED);
753 
754 		fence = sched->ops->run_job(s_job);
755 
756 		if (IS_ERR_OR_NULL(fence)) {
757 			if (IS_ERR(fence))
758 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
759 
760 			s_job->s_fence->parent = NULL;
761 		} else {
762 
763 			s_job->s_fence->parent = dma_fence_get(fence);
764 
765 			/* Drop for orignal kref_init */
766 			dma_fence_put(fence);
767 		}
768 	}
769 }
770 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
771 
772 /**
773  * drm_sched_job_init - init a scheduler job
774  * @job: scheduler job to init
775  * @entity: scheduler entity to use
776  * @credits: the number of credits this job contributes to the schedulers
777  * credit limit
778  * @owner: job owner for debugging
779  * @drm_client_id: &struct drm_file.client_id of the owner (used by trace
780  * events)
781  *
782  * Refer to drm_sched_entity_push_job() documentation
783  * for locking considerations.
784  *
785  * Drivers must make sure drm_sched_job_cleanup() if this function returns
786  * successfully, even when @job is aborted before drm_sched_job_arm() is called.
787  *
788  * Note that this function does not assign a valid value to each struct member
789  * of struct drm_sched_job. Take a look at that struct's documentation to see
790  * who sets which struct member with what lifetime.
791  *
792  * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
793  * has died, which can mean that there's no valid runqueue for a @entity.
794  * This function returns -ENOENT in this case (which probably should be -EIO as
795  * a more meanigful return value).
796  *
797  * Returns 0 for success, negative error code otherwise.
798  */
799 int drm_sched_job_init(struct drm_sched_job *job,
800 		       struct drm_sched_entity *entity,
801 		       u32 credits, void *owner,
802 		       uint64_t drm_client_id)
803 {
804 	if (!entity->rq) {
805 		/* This will most likely be followed by missing frames
806 		 * or worse--a blank screen--leave a trail in the
807 		 * logs, so this can be debugged easier.
808 		 */
809 		dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__);
810 		return -ENOENT;
811 	}
812 
813 	if (unlikely(!credits)) {
814 		pr_err("*ERROR* %s: credits cannot be 0!\n", __func__);
815 		return -EINVAL;
816 	}
817 
818 	/*
819 	 * We don't know for sure how the user has allocated. Thus, zero the
820 	 * struct so that unallowed (i.e., too early) usage of pointers that
821 	 * this function does not set is guaranteed to lead to a NULL pointer
822 	 * exception instead of UB.
823 	 */
824 	memset(job, 0, sizeof(*job));
825 
826 	job->entity = entity;
827 	job->credits = credits;
828 	job->s_fence = drm_sched_fence_alloc(entity, owner, drm_client_id);
829 	if (!job->s_fence)
830 		return -ENOMEM;
831 
832 	INIT_LIST_HEAD(&job->list);
833 
834 	xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
835 
836 	return 0;
837 }
838 EXPORT_SYMBOL(drm_sched_job_init);
839 
840 /**
841  * drm_sched_job_arm - arm a scheduler job for execution
842  * @job: scheduler job to arm
843  *
844  * This arms a scheduler job for execution. Specifically it initializes the
845  * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
846  * or other places that need to track the completion of this job. It also
847  * initializes sequence numbers, which are fundamental for fence ordering.
848  *
849  * Refer to drm_sched_entity_push_job() documentation for locking
850  * considerations.
851  *
852  * Once this function was called, you *must* submit @job with
853  * drm_sched_entity_push_job().
854  *
855  * This can only be called if drm_sched_job_init() succeeded.
856  */
857 void drm_sched_job_arm(struct drm_sched_job *job)
858 {
859 	struct drm_gpu_scheduler *sched;
860 	struct drm_sched_entity *entity = job->entity;
861 
862 	BUG_ON(!entity);
863 	drm_sched_entity_select_rq(entity);
864 	sched = entity->rq->sched;
865 
866 	job->sched = sched;
867 	job->s_priority = entity->priority;
868 
869 	drm_sched_fence_init(job->s_fence, job->entity);
870 }
871 EXPORT_SYMBOL(drm_sched_job_arm);
872 
873 /**
874  * drm_sched_job_add_dependency - adds the fence as a job dependency
875  * @job: scheduler job to add the dependencies to
876  * @fence: the dma_fence to add to the list of dependencies.
877  *
878  * Note that @fence is consumed in both the success and error cases.
879  *
880  * Returns:
881  * 0 on success, or an error on failing to expand the array.
882  */
883 int drm_sched_job_add_dependency(struct drm_sched_job *job,
884 				 struct dma_fence *fence)
885 {
886 	struct dma_fence *entry;
887 	unsigned long index;
888 	u32 id = 0;
889 	int ret;
890 
891 	if (!fence)
892 		return 0;
893 
894 	/* Deduplicate if we already depend on a fence from the same context.
895 	 * This lets the size of the array of deps scale with the number of
896 	 * engines involved, rather than the number of BOs.
897 	 */
898 	xa_for_each(&job->dependencies, index, entry) {
899 		if (entry->context != fence->context)
900 			continue;
901 
902 		if (dma_fence_is_later(fence, entry)) {
903 			dma_fence_put(entry);
904 			xa_store(&job->dependencies, index, fence, GFP_KERNEL);
905 		} else {
906 			dma_fence_put(fence);
907 		}
908 		return 0;
909 	}
910 
911 	ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
912 	if (ret != 0)
913 		dma_fence_put(fence);
914 
915 	return ret;
916 }
917 EXPORT_SYMBOL(drm_sched_job_add_dependency);
918 
919 /**
920  * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
921  * @job: scheduler job to add the dependencies to
922  * @file: drm file private pointer
923  * @handle: syncobj handle to lookup
924  * @point: timeline point
925  *
926  * This adds the fence matching the given syncobj to @job.
927  *
928  * Returns:
929  * 0 on success, or an error on failing to expand the array.
930  */
931 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
932 					 struct drm_file *file,
933 					 u32 handle,
934 					 u32 point)
935 {
936 	struct dma_fence *fence;
937 	int ret;
938 
939 	ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
940 	if (ret)
941 		return ret;
942 
943 	return drm_sched_job_add_dependency(job, fence);
944 }
945 EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
946 
947 /**
948  * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
949  * @job: scheduler job to add the dependencies to
950  * @resv: the dma_resv object to get the fences from
951  * @usage: the dma_resv_usage to use to filter the fences
952  *
953  * This adds all fences matching the given usage from @resv to @job.
954  * Must be called with the @resv lock held.
955  *
956  * Returns:
957  * 0 on success, or an error on failing to expand the array.
958  */
959 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
960 					struct dma_resv *resv,
961 					enum dma_resv_usage usage)
962 {
963 	struct dma_resv_iter cursor;
964 	struct dma_fence *fence;
965 	int ret;
966 
967 	dma_resv_assert_held(resv);
968 
969 	dma_resv_for_each_fence(&cursor, resv, usage, fence) {
970 		/*
971 		 * As drm_sched_job_add_dependency always consumes the fence
972 		 * reference (even when it fails), and dma_resv_for_each_fence
973 		 * is not obtaining one, we need to grab one before calling.
974 		 */
975 		ret = drm_sched_job_add_dependency(job, dma_fence_get(fence));
976 		if (ret)
977 			return ret;
978 	}
979 	return 0;
980 }
981 EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
982 
983 /**
984  * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
985  *   dependencies
986  * @job: scheduler job to add the dependencies to
987  * @obj: the gem object to add new dependencies from.
988  * @write: whether the job might write the object (so we need to depend on
989  * shared fences in the reservation object).
990  *
991  * This should be called after drm_gem_lock_reservations() on your array of
992  * GEM objects used in the job but before updating the reservations with your
993  * own fences.
994  *
995  * Returns:
996  * 0 on success, or an error on failing to expand the array.
997  */
998 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
999 					    struct drm_gem_object *obj,
1000 					    bool write)
1001 {
1002 	return drm_sched_job_add_resv_dependencies(job, obj->resv,
1003 						   dma_resv_usage_rw(write));
1004 }
1005 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
1006 
1007 /**
1008  * drm_sched_job_has_dependency - check whether fence is the job's dependency
1009  * @job: scheduler job to check
1010  * @fence: fence to look for
1011  *
1012  * Returns:
1013  * True if @fence is found within the job's dependencies, or otherwise false.
1014  */
1015 bool drm_sched_job_has_dependency(struct drm_sched_job *job,
1016 				  struct dma_fence *fence)
1017 {
1018 	struct dma_fence *f;
1019 	unsigned long index;
1020 
1021 	xa_for_each(&job->dependencies, index, f) {
1022 		if (f == fence)
1023 			return true;
1024 	}
1025 
1026 	return false;
1027 }
1028 EXPORT_SYMBOL(drm_sched_job_has_dependency);
1029 
1030 /**
1031  * drm_sched_job_cleanup - clean up scheduler job resources
1032  * @job: scheduler job to clean up
1033  *
1034  * Cleans up the resources allocated with drm_sched_job_init().
1035  *
1036  * Drivers should call this from their error unwind code if @job is aborted
1037  * before drm_sched_job_arm() is called.
1038  *
1039  * drm_sched_job_arm() is a point of no return since it initializes the fences
1040  * and their sequence number etc. Once that function has been called, you *must*
1041  * submit it with drm_sched_entity_push_job() and cannot simply abort it by
1042  * calling drm_sched_job_cleanup().
1043  *
1044  * This function should be called in the &drm_sched_backend_ops.free_job callback.
1045  */
1046 void drm_sched_job_cleanup(struct drm_sched_job *job)
1047 {
1048 	struct dma_fence *fence;
1049 	unsigned long index;
1050 
1051 	if (kref_read(&job->s_fence->finished.refcount)) {
1052 		/* The job has been processed by the scheduler, i.e.,
1053 		 * drm_sched_job_arm() and drm_sched_entity_push_job() have
1054 		 * been called.
1055 		 */
1056 		dma_fence_put(&job->s_fence->finished);
1057 	} else {
1058 		/* The job was aborted before it has been committed to be run;
1059 		 * notably, drm_sched_job_arm() has not been called.
1060 		 */
1061 		drm_sched_fence_free(job->s_fence);
1062 	}
1063 
1064 	job->s_fence = NULL;
1065 
1066 	xa_for_each(&job->dependencies, index, fence) {
1067 		dma_fence_put(fence);
1068 	}
1069 	xa_destroy(&job->dependencies);
1070 
1071 }
1072 EXPORT_SYMBOL(drm_sched_job_cleanup);
1073 
1074 /**
1075  * drm_sched_wakeup - Wake up the scheduler if it is ready to queue
1076  * @sched: scheduler instance
1077  *
1078  * Wake up the scheduler if we can queue jobs.
1079  */
1080 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
1081 {
1082 	drm_sched_run_job_queue(sched);
1083 }
1084 
1085 /**
1086  * drm_sched_select_entity - Select next entity to process
1087  *
1088  * @sched: scheduler instance
1089  *
1090  * Return an entity to process or NULL if none are found.
1091  *
1092  * Note, that we break out of the for-loop when "entity" is non-null, which can
1093  * also be an error-pointer--this assures we don't process lower priority
1094  * run-queues. See comments in the respectively called functions.
1095  */
1096 static struct drm_sched_entity *
1097 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
1098 {
1099 	struct drm_sched_entity *entity;
1100 	int i;
1101 
1102 	/* Start with the highest priority.
1103 	 */
1104 	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1105 		entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
1106 			drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
1107 			drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
1108 		if (entity)
1109 			break;
1110 	}
1111 
1112 	return IS_ERR(entity) ? NULL : entity;
1113 }
1114 
1115 /**
1116  * drm_sched_get_finished_job - fetch the next finished job to be destroyed
1117  *
1118  * @sched: scheduler instance
1119  * @have_more: are there more finished jobs on the list
1120  *
1121  * Informs the caller through @have_more whether there are more finished jobs
1122  * besides the returned one.
1123  *
1124  * Returns the next finished job from the pending list (if there is one)
1125  * ready for it to be destroyed.
1126  */
1127 static struct drm_sched_job *
1128 drm_sched_get_finished_job(struct drm_gpu_scheduler *sched, bool *have_more)
1129 {
1130 	struct drm_sched_job *job, *next;
1131 
1132 	spin_lock(&sched->job_list_lock);
1133 
1134 	job = list_first_entry_or_null(&sched->pending_list,
1135 				       struct drm_sched_job, list);
1136 	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
1137 		/* remove job from pending_list */
1138 		list_del_init(&job->list);
1139 
1140 		/* cancel this job's TO timer */
1141 		cancel_delayed_work(&sched->work_tdr);
1142 
1143 		*have_more = false;
1144 		next = list_first_entry_or_null(&sched->pending_list,
1145 						typeof(*next), list);
1146 		if (next) {
1147 			/* make the scheduled timestamp more accurate */
1148 			if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
1149 				     &next->s_fence->scheduled.flags))
1150 				next->s_fence->scheduled.timestamp =
1151 					dma_fence_timestamp(&job->s_fence->finished);
1152 
1153 			*have_more = dma_fence_is_signaled(&next->s_fence->finished);
1154 
1155 			/* start TO timer for next job */
1156 			drm_sched_start_timeout(sched);
1157 		}
1158 	} else {
1159 		job = NULL;
1160 	}
1161 
1162 	spin_unlock(&sched->job_list_lock);
1163 
1164 	return job;
1165 }
1166 
1167 /**
1168  * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
1169  * @sched_list: list of drm_gpu_schedulers
1170  * @num_sched_list: number of drm_gpu_schedulers in the sched_list
1171  *
1172  * Returns pointer of the sched with the least load or NULL if none of the
1173  * drm_gpu_schedulers are ready
1174  */
1175 struct drm_gpu_scheduler *
1176 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
1177 		     unsigned int num_sched_list)
1178 {
1179 	struct drm_gpu_scheduler *sched, *picked_sched = NULL;
1180 	int i;
1181 	unsigned int min_score = UINT_MAX, num_score;
1182 
1183 	for (i = 0; i < num_sched_list; ++i) {
1184 		sched = sched_list[i];
1185 
1186 		if (!sched->ready) {
1187 			DRM_WARN("scheduler %s is not ready, skipping",
1188 				 sched->name);
1189 			continue;
1190 		}
1191 
1192 		num_score = atomic_read(sched->score);
1193 		if (num_score < min_score) {
1194 			min_score = num_score;
1195 			picked_sched = sched;
1196 		}
1197 	}
1198 
1199 	return picked_sched;
1200 }
1201 EXPORT_SYMBOL(drm_sched_pick_best);
1202 
1203 /**
1204  * drm_sched_free_job_work - worker to call free_job
1205  *
1206  * @w: free job work
1207  */
1208 static void drm_sched_free_job_work(struct work_struct *w)
1209 {
1210 	struct drm_gpu_scheduler *sched =
1211 		container_of(w, struct drm_gpu_scheduler, work_free_job);
1212 	struct drm_sched_job *job;
1213 	bool have_more;
1214 
1215 	job = drm_sched_get_finished_job(sched, &have_more);
1216 	if (job) {
1217 		sched->ops->free_job(job);
1218 		if (have_more)
1219 			drm_sched_run_free_queue(sched);
1220 	}
1221 
1222 	drm_sched_run_job_queue(sched);
1223 }
1224 
1225 /**
1226  * drm_sched_run_job_work - worker to call run_job
1227  *
1228  * @w: run job work
1229  */
1230 static void drm_sched_run_job_work(struct work_struct *w)
1231 {
1232 	struct drm_gpu_scheduler *sched =
1233 		container_of(w, struct drm_gpu_scheduler, work_run_job);
1234 	struct drm_sched_entity *entity;
1235 	struct dma_fence *fence;
1236 	struct drm_sched_fence *s_fence;
1237 	struct drm_sched_job *sched_job;
1238 	int r;
1239 
1240 	/* Find entity with a ready job */
1241 	entity = drm_sched_select_entity(sched);
1242 	if (!entity) {
1243 		/*
1244 		 * Either no more work to do, or the next ready job needs more
1245 		 * credits than the scheduler has currently available.
1246 		 */
1247 		return;
1248 	}
1249 
1250 	sched_job = drm_sched_entity_pop_job(entity);
1251 	if (!sched_job) {
1252 		complete_all(&entity->entity_idle);
1253 		drm_sched_run_job_queue(sched);
1254 		return;
1255 	}
1256 
1257 	s_fence = sched_job->s_fence;
1258 
1259 	atomic_add(sched_job->credits, &sched->credit_count);
1260 	drm_sched_job_begin(sched_job);
1261 
1262 	trace_drm_sched_job_run(sched_job, entity);
1263 	/*
1264 	 * The run_job() callback must by definition return a fence whose
1265 	 * refcount has been incremented for the scheduler already.
1266 	 */
1267 	fence = sched->ops->run_job(sched_job);
1268 	complete_all(&entity->entity_idle);
1269 	drm_sched_fence_scheduled(s_fence, fence);
1270 
1271 	if (!IS_ERR_OR_NULL(fence)) {
1272 		r = dma_fence_add_callback(fence, &sched_job->cb,
1273 					   drm_sched_job_done_cb);
1274 		if (r == -ENOENT)
1275 			drm_sched_job_done(sched_job, fence->error);
1276 		else if (r)
1277 			DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
1278 
1279 		dma_fence_put(fence);
1280 	} else {
1281 		drm_sched_job_done(sched_job, IS_ERR(fence) ?
1282 				   PTR_ERR(fence) : 0);
1283 	}
1284 
1285 	wake_up(&sched->job_scheduled);
1286 	drm_sched_run_job_queue(sched);
1287 }
1288 
1289 static struct workqueue_struct *drm_sched_alloc_wq(const char *name)
1290 {
1291 #if (IS_ENABLED(CONFIG_LOCKDEP))
1292 	static struct lockdep_map map = {
1293 		.name = "drm_sched_lockdep_map"
1294 	};
1295 
1296 	/*
1297 	 * Avoid leaking a lockdep map on each drm sched creation and
1298 	 * destruction by using a single lockdep map for all drm sched
1299 	 * allocated submit_wq.
1300 	 */
1301 
1302 	return alloc_ordered_workqueue_lockdep_map(name, WQ_MEM_RECLAIM, &map);
1303 #else
1304 	return alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
1305 #endif
1306 }
1307 
1308 /**
1309  * drm_sched_init - Init a gpu scheduler instance
1310  *
1311  * @sched: scheduler instance
1312  * @args: scheduler initialization arguments
1313  *
1314  * Return 0 on success, otherwise error code.
1315  */
1316 int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_args *args)
1317 {
1318 	int i;
1319 
1320 	sched->ops = args->ops;
1321 	sched->credit_limit = args->credit_limit;
1322 	sched->name = args->name;
1323 	sched->timeout = args->timeout;
1324 	sched->hang_limit = args->hang_limit;
1325 	sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_percpu_wq;
1326 	sched->score = args->score ? args->score : &sched->_score;
1327 	sched->dev = args->dev;
1328 
1329 	if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) {
1330 		/* This is a gross violation--tell drivers what the  problem is.
1331 		 */
1332 		dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
1333 			__func__);
1334 		return -EINVAL;
1335 	} else if (sched->sched_rq) {
1336 		/* Not an error, but warn anyway so drivers can
1337 		 * fine-tune their DRM calling order, and return all
1338 		 * is good.
1339 		 */
1340 		dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__);
1341 		return 0;
1342 	}
1343 
1344 	if (args->submit_wq) {
1345 		sched->submit_wq = args->submit_wq;
1346 		sched->own_submit_wq = false;
1347 	} else {
1348 		sched->submit_wq = drm_sched_alloc_wq(args->name);
1349 		if (!sched->submit_wq)
1350 			return -ENOMEM;
1351 
1352 		sched->own_submit_wq = true;
1353 	}
1354 
1355 	sched->sched_rq = kmalloc_array(args->num_rqs, sizeof(*sched->sched_rq),
1356 					GFP_KERNEL | __GFP_ZERO);
1357 	if (!sched->sched_rq)
1358 		goto Out_check_own;
1359 	sched->num_rqs = args->num_rqs;
1360 	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1361 		sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
1362 		if (!sched->sched_rq[i])
1363 			goto Out_unroll;
1364 		drm_sched_rq_init(sched, sched->sched_rq[i]);
1365 	}
1366 
1367 	init_waitqueue_head(&sched->job_scheduled);
1368 	INIT_LIST_HEAD(&sched->pending_list);
1369 	spin_lock_init(&sched->job_list_lock);
1370 	atomic_set(&sched->credit_count, 0);
1371 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1372 	INIT_WORK(&sched->work_run_job, drm_sched_run_job_work);
1373 	INIT_WORK(&sched->work_free_job, drm_sched_free_job_work);
1374 	atomic_set(&sched->_score, 0);
1375 	atomic64_set(&sched->job_id_count, 0);
1376 	sched->pause_submit = false;
1377 
1378 	sched->ready = true;
1379 	return 0;
1380 Out_unroll:
1381 	for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
1382 		kfree(sched->sched_rq[i]);
1383 
1384 	kfree(sched->sched_rq);
1385 	sched->sched_rq = NULL;
1386 Out_check_own:
1387 	if (sched->own_submit_wq)
1388 		destroy_workqueue(sched->submit_wq);
1389 	dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
1390 	return -ENOMEM;
1391 }
1392 EXPORT_SYMBOL(drm_sched_init);
1393 
1394 static void drm_sched_cancel_remaining_jobs(struct drm_gpu_scheduler *sched)
1395 {
1396 	struct drm_sched_job *job, *tmp;
1397 
1398 	/* All other accessors are stopped. No locking necessary. */
1399 	list_for_each_entry_safe_reverse(job, tmp, &sched->pending_list, list) {
1400 		sched->ops->cancel_job(job);
1401 		list_del(&job->list);
1402 		sched->ops->free_job(job);
1403 	}
1404 }
1405 
1406 /**
1407  * drm_sched_fini - Destroy a gpu scheduler
1408  *
1409  * @sched: scheduler instance
1410  *
1411  * Tears down and cleans up the scheduler.
1412  *
1413  * This stops submission of new jobs to the hardware through &struct
1414  * drm_sched_backend_ops.run_job. If &struct drm_sched_backend_ops.cancel_job
1415  * is implemented, all jobs will be canceled through it and afterwards cleaned
1416  * up through &struct drm_sched_backend_ops.free_job. If cancel_job is not
1417  * implemented, memory could leak.
1418  */
1419 void drm_sched_fini(struct drm_gpu_scheduler *sched)
1420 {
1421 	struct drm_sched_entity *s_entity;
1422 	int i;
1423 
1424 	drm_sched_wqueue_stop(sched);
1425 
1426 	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1427 		struct drm_sched_rq *rq = sched->sched_rq[i];
1428 
1429 		spin_lock(&rq->lock);
1430 		list_for_each_entry(s_entity, &rq->entities, list) {
1431 			/*
1432 			 * Prevents reinsertion and marks job_queue as idle,
1433 			 * it will be removed from the rq in drm_sched_entity_fini()
1434 			 * eventually
1435 			 *
1436 			 * FIXME:
1437 			 * This lacks the proper spin_lock(&s_entity->lock) and
1438 			 * is, therefore, a race condition. Most notably, it
1439 			 * can race with drm_sched_entity_push_job(). The lock
1440 			 * cannot be taken here, however, because this would
1441 			 * lead to lock inversion -> deadlock.
1442 			 *
1443 			 * The best solution probably is to enforce the life
1444 			 * time rule of all entities having to be torn down
1445 			 * before their scheduler. Then, however, locking could
1446 			 * be dropped alltogether from this function.
1447 			 *
1448 			 * For now, this remains a potential race in all
1449 			 * drivers that keep entities alive for longer than
1450 			 * the scheduler.
1451 			 *
1452 			 * The READ_ONCE() is there to make the lockless read
1453 			 * (warning about the lockless write below) slightly
1454 			 * less broken...
1455 			 */
1456 			if (!READ_ONCE(s_entity->stopped))
1457 				dev_warn(sched->dev, "Tearing down scheduler with active entities!\n");
1458 			s_entity->stopped = true;
1459 		}
1460 		spin_unlock(&rq->lock);
1461 		kfree(sched->sched_rq[i]);
1462 	}
1463 
1464 	/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1465 	wake_up_all(&sched->job_scheduled);
1466 
1467 	/* Confirm no work left behind accessing device structures */
1468 	cancel_delayed_work_sync(&sched->work_tdr);
1469 
1470 	/* Avoid memory leaks if supported by the driver. */
1471 	if (sched->ops->cancel_job)
1472 		drm_sched_cancel_remaining_jobs(sched);
1473 
1474 	if (sched->own_submit_wq)
1475 		destroy_workqueue(sched->submit_wq);
1476 	sched->ready = false;
1477 	kfree(sched->sched_rq);
1478 	sched->sched_rq = NULL;
1479 
1480 	if (!list_empty(&sched->pending_list))
1481 		dev_warn(sched->dev, "Tearing down scheduler while jobs are pending!\n");
1482 }
1483 EXPORT_SYMBOL(drm_sched_fini);
1484 
1485 /**
1486  * drm_sched_increase_karma - Update sched_entity guilty flag
1487  *
1488  * @bad: The job guilty of time out
1489  *
1490  * Increment on every hang caused by the 'bad' job. If this exceeds the hang
1491  * limit of the scheduler then the respective sched entity is marked guilty and
1492  * jobs from it will not be scheduled further
1493  */
1494 void drm_sched_increase_karma(struct drm_sched_job *bad)
1495 {
1496 	int i;
1497 	struct drm_sched_entity *tmp;
1498 	struct drm_sched_entity *entity;
1499 	struct drm_gpu_scheduler *sched = bad->sched;
1500 
1501 	/* don't change @bad's karma if it's from KERNEL RQ,
1502 	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1503 	 * corrupt but keep in mind that kernel jobs always considered good.
1504 	 */
1505 	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1506 		atomic_inc(&bad->karma);
1507 
1508 		for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
1509 			struct drm_sched_rq *rq = sched->sched_rq[i];
1510 
1511 			spin_lock(&rq->lock);
1512 			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1513 				if (bad->s_fence->scheduled.context ==
1514 				    entity->fence_context) {
1515 					if (entity->guilty)
1516 						atomic_set(entity->guilty, 1);
1517 					break;
1518 				}
1519 			}
1520 			spin_unlock(&rq->lock);
1521 			if (&entity->list != &rq->entities)
1522 				break;
1523 		}
1524 	}
1525 }
1526 EXPORT_SYMBOL(drm_sched_increase_karma);
1527 
1528 /**
1529  * drm_sched_wqueue_ready - Is the scheduler ready for submission
1530  *
1531  * @sched: scheduler instance
1532  *
1533  * Returns true if submission is ready
1534  */
1535 bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
1536 {
1537 	return sched->ready;
1538 }
1539 EXPORT_SYMBOL(drm_sched_wqueue_ready);
1540 
1541 /**
1542  * drm_sched_wqueue_stop - stop scheduler submission
1543  * @sched: scheduler instance
1544  *
1545  * Stops the scheduler from pulling new jobs from entities. It also stops
1546  * freeing jobs automatically through drm_sched_backend_ops.free_job().
1547  */
1548 void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
1549 {
1550 	WRITE_ONCE(sched->pause_submit, true);
1551 	cancel_work_sync(&sched->work_run_job);
1552 	cancel_work_sync(&sched->work_free_job);
1553 }
1554 EXPORT_SYMBOL(drm_sched_wqueue_stop);
1555 
1556 /**
1557  * drm_sched_wqueue_start - start scheduler submission
1558  * @sched: scheduler instance
1559  *
1560  * Restarts the scheduler after drm_sched_wqueue_stop() has stopped it.
1561  *
1562  * This function is not necessary for 'conventional' startup. The scheduler is
1563  * fully operational after drm_sched_init() succeeded.
1564  */
1565 void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
1566 {
1567 	WRITE_ONCE(sched->pause_submit, false);
1568 	queue_work(sched->submit_wq, &sched->work_run_job);
1569 	queue_work(sched->submit_wq, &sched->work_free_job);
1570 }
1571 EXPORT_SYMBOL(drm_sched_wqueue_start);
1572 
1573 /**
1574  * drm_sched_is_stopped() - Checks whether drm_sched is stopped
1575  * @sched: DRM scheduler
1576  *
1577  * Return: true if sched is stopped, false otherwise
1578  */
1579 bool drm_sched_is_stopped(struct drm_gpu_scheduler *sched)
1580 {
1581 	return READ_ONCE(sched->pause_submit);
1582 }
1583 EXPORT_SYMBOL(drm_sched_is_stopped);
1584 
1585 /**
1586  * drm_sched_job_is_signaled() - DRM scheduler job is signaled
1587  * @job: DRM scheduler job
1588  *
1589  * Determine if DRM scheduler job is signaled. DRM scheduler should be stopped
1590  * to obtain a stable snapshot of state. Both parent fence (hardware fence) and
1591  * finished fence (software fence) are checked to determine signaling state.
1592  *
1593  * Return: true if job is signaled, false otherwise
1594  */
1595 bool drm_sched_job_is_signaled(struct drm_sched_job *job)
1596 {
1597 	struct drm_sched_fence *s_fence = job->s_fence;
1598 
1599 	WARN_ON(!drm_sched_is_stopped(job->sched));
1600 	return (s_fence->parent && dma_fence_is_signaled(s_fence->parent)) ||
1601 		dma_fence_is_signaled(&s_fence->finished);
1602 }
1603 EXPORT_SYMBOL(drm_sched_job_is_signaled);
1604