xref: /linux/include/drm/gpu_scheduler.h (revision 7b1166dee847d5018c1f3cc781218e806078f752)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #ifndef _DRM_GPU_SCHEDULER_H_
25 #define _DRM_GPU_SCHEDULER_H_
26 
27 #include <drm/spsc_queue.h>
28 #include <linux/dma-fence.h>
29 #include <linux/completion.h>
30 #include <linux/xarray.h>
31 #include <linux/workqueue.h>
32 
33 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
34 
35 /**
36  * DRM_SCHED_FENCE_DONT_PIPELINE - Prevent dependency pipelining
37  *
38  * Setting this flag on a scheduler fence prevents pipelining of jobs depending
39  * on this fence. In other words we always insert a full CPU round trip before
40  * dependent jobs are pushed to the hw queue.
41  */
42 #define DRM_SCHED_FENCE_DONT_PIPELINE	DMA_FENCE_FLAG_USER_BITS
43 
44 /**
45  * DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT - A fence deadline hint has been set
46  *
47  * Because we could have a deadline hint can be set before the backing hw
48  * fence is created, we need to keep track of whether a deadline has already
49  * been set.
50  */
51 #define DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT	(DMA_FENCE_FLAG_USER_BITS + 1)
52 
53 enum dma_resv_usage;
54 struct dma_resv;
55 struct drm_gem_object;
56 
57 struct drm_gpu_scheduler;
58 struct drm_sched_rq;
59 
60 struct drm_file;
61 
62 /* These are often used as an (initial) index
63  * to an array, and as such should start at 0.
64  */
65 enum drm_sched_priority {
66 	DRM_SCHED_PRIORITY_KERNEL,
67 	DRM_SCHED_PRIORITY_HIGH,
68 	DRM_SCHED_PRIORITY_NORMAL,
69 	DRM_SCHED_PRIORITY_LOW,
70 
71 	DRM_SCHED_PRIORITY_COUNT
72 };
73 
74 /**
75  * struct drm_sched_entity - A wrapper around a job queue (typically
76  * attached to the DRM file_priv).
77  *
78  * Entities will emit jobs in order to their corresponding hardware
79  * ring, and the scheduler will alternate between entities based on
80  * scheduling policy.
81  */
82 struct drm_sched_entity {
83 	/**
84 	 * @list:
85 	 *
86 	 * Used to append this struct to the list of entities in the runqueue
87 	 * @rq under &drm_sched_rq.entities.
88 	 *
89 	 * Protected by &drm_sched_rq.lock of @rq.
90 	 */
91 	struct list_head		list;
92 
93 	/**
94 	 * @lock:
95 	 *
96 	 * Lock protecting the run-queue (@rq) to which this entity belongs,
97 	 * @priority and the list of schedulers (@sched_list, @num_sched_list).
98 	 */
99 	spinlock_t			lock;
100 
101 	/**
102 	 * @rq:
103 	 *
104 	 * Runqueue on which this entity is currently scheduled.
105 	 *
106 	 * FIXME: Locking is very unclear for this. Writers are protected by
107 	 * @lock, but readers are generally lockless and seem to just race with
108 	 * not even a READ_ONCE.
109 	 */
110 	struct drm_sched_rq		*rq;
111 
112 	/**
113 	 * @sched_list:
114 	 *
115 	 * A list of schedulers (struct drm_gpu_scheduler).  Jobs from this entity can
116 	 * be scheduled on any scheduler on this list.
117 	 *
118 	 * This can be modified by calling drm_sched_entity_modify_sched().
119 	 * Locking is entirely up to the driver, see the above function for more
120 	 * details.
121 	 *
122 	 * This will be set to NULL if &num_sched_list equals 1 and @rq has been
123 	 * set already.
124 	 *
125 	 * FIXME: This means priority changes through
126 	 * drm_sched_entity_set_priority() will be lost henceforth in this case.
127 	 */
128 	struct drm_gpu_scheduler        **sched_list;
129 
130 	/**
131 	 * @num_sched_list:
132 	 *
133 	 * Number of drm_gpu_schedulers in the @sched_list.
134 	 */
135 	unsigned int                    num_sched_list;
136 
137 	/**
138 	 * @priority:
139 	 *
140 	 * Priority of the entity. This can be modified by calling
141 	 * drm_sched_entity_set_priority(). Protected by @lock.
142 	 */
143 	enum drm_sched_priority         priority;
144 
145 	/**
146 	 * @job_queue: the list of jobs of this entity.
147 	 */
148 	struct spsc_queue		job_queue;
149 
150 	/**
151 	 * @fence_seq:
152 	 *
153 	 * A linearly increasing seqno incremented with each new
154 	 * &drm_sched_fence which is part of the entity.
155 	 *
156 	 * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
157 	 * this doesn't need to be atomic.
158 	 */
159 	atomic_t			fence_seq;
160 
161 	/**
162 	 * @fence_context:
163 	 *
164 	 * A unique context for all the fences which belong to this entity.  The
165 	 * &drm_sched_fence.scheduled uses the fence_context but
166 	 * &drm_sched_fence.finished uses fence_context + 1.
167 	 */
168 	uint64_t			fence_context;
169 
170 	/**
171 	 * @dependency:
172 	 *
173 	 * The dependency fence of the job which is on the top of the job queue.
174 	 */
175 	struct dma_fence		*dependency;
176 
177 	/**
178 	 * @cb:
179 	 *
180 	 * Callback for the dependency fence above.
181 	 */
182 	struct dma_fence_cb		cb;
183 
184 	/**
185 	 * @guilty:
186 	 *
187 	 * Points to entities' guilty.
188 	 */
189 	atomic_t			*guilty;
190 
191 	/**
192 	 * @last_scheduled:
193 	 *
194 	 * Points to the finished fence of the last scheduled job. Only written
195 	 * by drm_sched_entity_pop_job(). Can be accessed locklessly from
196 	 * drm_sched_job_arm() if the queue is empty.
197 	 */
198 	struct dma_fence __rcu		*last_scheduled;
199 
200 	/**
201 	 * @last_user: last group leader pushing a job into the entity.
202 	 */
203 	struct task_struct		*last_user;
204 
205 	/**
206 	 * @stopped:
207 	 *
208 	 * Marks the enity as removed from rq and destined for
209 	 * termination. This is set by calling drm_sched_entity_flush() and by
210 	 * drm_sched_fini().
211 	 */
212 	bool 				stopped;
213 
214 	/**
215 	 * @entity_idle:
216 	 *
217 	 * Signals when entity is not in use, used to sequence entity cleanup in
218 	 * drm_sched_entity_fini().
219 	 */
220 	struct completion		entity_idle;
221 
222 	/**
223 	 * @oldest_job_waiting:
224 	 *
225 	 * Marks earliest job waiting in SW queue
226 	 */
227 	ktime_t				oldest_job_waiting;
228 
229 	/**
230 	 * @rb_tree_node:
231 	 *
232 	 * The node used to insert this entity into time based priority queue
233 	 */
234 	struct rb_node			rb_tree_node;
235 
236 };
237 
238 /**
239  * struct drm_sched_rq - queue of entities to be scheduled.
240  *
241  * @sched: the scheduler to which this rq belongs to.
242  * @lock: protects @entities, @rb_tree_root and @current_entity.
243  * @current_entity: the entity which is to be scheduled.
244  * @entities: list of the entities to be scheduled.
245  * @rb_tree_root: root of time based priority queue of entities for FIFO scheduling
246  *
247  * Run queue is a set of entities scheduling command submissions for
248  * one specific ring. It implements the scheduling policy that selects
249  * the next entity to emit commands from.
250  */
251 struct drm_sched_rq {
252 	struct drm_gpu_scheduler	*sched;
253 
254 	spinlock_t			lock;
255 	/* Following members are protected by the @lock: */
256 	struct drm_sched_entity		*current_entity;
257 	struct list_head		entities;
258 	struct rb_root_cached		rb_tree_root;
259 };
260 
261 /**
262  * struct drm_sched_fence - fences corresponding to the scheduling of a job.
263  */
264 struct drm_sched_fence {
265         /**
266          * @scheduled: this fence is what will be signaled by the scheduler
267          * when the job is scheduled.
268          */
269 	struct dma_fence		scheduled;
270 
271         /**
272          * @finished: this fence is what will be signaled by the scheduler
273          * when the job is completed.
274          *
275          * When setting up an out fence for the job, you should use
276          * this, since it's available immediately upon
277          * drm_sched_job_init(), and the fence returned by the driver
278          * from run_job() won't be created until the dependencies have
279          * resolved.
280          */
281 	struct dma_fence		finished;
282 
283 	/**
284 	 * @deadline: deadline set on &drm_sched_fence.finished which
285 	 * potentially needs to be propagated to &drm_sched_fence.parent
286 	 */
287 	ktime_t				deadline;
288 
289         /**
290          * @parent: the fence returned by &drm_sched_backend_ops.run_job
291          * when scheduling the job on hardware. We signal the
292          * &drm_sched_fence.finished fence once parent is signalled.
293          */
294 	struct dma_fence		*parent;
295         /**
296          * @sched: the scheduler instance to which the job having this struct
297          * belongs to.
298          */
299 	struct drm_gpu_scheduler	*sched;
300         /**
301          * @lock: the lock used by the scheduled and the finished fences.
302          */
303 	spinlock_t			lock;
304         /**
305          * @owner: job owner for debugging
306          */
307 	void				*owner;
308 
309 	/**
310 	 * @drm_client_id:
311 	 *
312 	 * The client_id of the drm_file which owns the job.
313 	 */
314 	uint64_t			drm_client_id;
315 };
316 
317 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
318 
319 /**
320  * struct drm_sched_job - A job to be run by an entity.
321  *
322  * @queue_node: used to append this struct to the queue of jobs in an entity.
323  * @list: a job participates in a "pending" and "done" lists.
324  * @sched: the scheduler instance on which this job is scheduled.
325  * @s_fence: contains the fences for the scheduling of job.
326  * @finish_cb: the callback for the finished fence.
327  * @credits: the number of credits this job contributes to the scheduler
328  * @work: Helper to reschedule job kill to different context.
329  * @karma: increment on every hang caused by this job. If this exceeds the hang
330  *         limit of the scheduler then the job is marked guilty and will not
331  *         be scheduled further.
332  * @s_priority: the priority of the job.
333  * @entity: the entity to which this job belongs.
334  * @cb: the callback for the parent fence in s_fence.
335  *
336  * A job is created by the driver using drm_sched_job_init(), and
337  * should call drm_sched_entity_push_job() once it wants the scheduler
338  * to schedule the job.
339  */
340 struct drm_sched_job {
341 	/**
342 	 * @submit_ts:
343 	 *
344 	 * When the job was pushed into the entity queue.
345 	 */
346 	ktime_t                         submit_ts;
347 
348 	/**
349 	 * @sched:
350 	 *
351 	 * The scheduler this job is or will be scheduled on. Gets set by
352 	 * drm_sched_job_arm(). Valid until drm_sched_backend_ops.free_job()
353 	 * has finished.
354 	 */
355 	struct drm_gpu_scheduler	*sched;
356 
357 	struct drm_sched_fence		*s_fence;
358 	struct drm_sched_entity         *entity;
359 
360 	enum drm_sched_priority		s_priority;
361 	u32				credits;
362 	/** @last_dependency: tracks @dependencies as they signal */
363 	unsigned int			last_dependency;
364 	atomic_t			karma;
365 
366 	struct spsc_node		queue_node;
367 	struct list_head		list;
368 
369 	/*
370 	 * work is used only after finish_cb has been used and will not be
371 	 * accessed anymore.
372 	 */
373 	union {
374 		struct dma_fence_cb	finish_cb;
375 		struct work_struct	work;
376 	};
377 
378 	struct dma_fence_cb		cb;
379 
380 	/**
381 	 * @dependencies:
382 	 *
383 	 * Contains the dependencies as struct dma_fence for this job, see
384 	 * drm_sched_job_add_dependency() and
385 	 * drm_sched_job_add_implicit_dependencies().
386 	 */
387 	struct xarray			dependencies;
388 };
389 
390 /**
391  * enum drm_gpu_sched_stat - the scheduler's status
392  *
393  * @DRM_GPU_SCHED_STAT_NONE: Reserved. Do not use.
394  * @DRM_GPU_SCHED_STAT_NOMINAL: Operation succeeded.
395  * @DRM_GPU_SCHED_STAT_ENODEV: Error: Device is not available anymore.
396  */
397 enum drm_gpu_sched_stat {
398 	DRM_GPU_SCHED_STAT_NONE,
399 	DRM_GPU_SCHED_STAT_NOMINAL,
400 	DRM_GPU_SCHED_STAT_ENODEV,
401 };
402 
403 /**
404  * struct drm_sched_backend_ops - Define the backend operations
405  *	called by the scheduler
406  *
407  * These functions should be implemented in the driver side.
408  */
409 struct drm_sched_backend_ops {
410 	/**
411 	 * @prepare_job:
412 	 *
413 	 * Called when the scheduler is considering scheduling this job next, to
414 	 * get another struct dma_fence for this job to block on.  Once it
415 	 * returns NULL, run_job() may be called.
416 	 *
417 	 * Can be NULL if no additional preparation to the dependencies are
418 	 * necessary. Skipped when jobs are killed instead of run.
419 	 */
420 	struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job,
421 					 struct drm_sched_entity *s_entity);
422 
423 	/**
424 	 * @run_job: Called to execute the job once all of the dependencies
425 	 * have been resolved.
426 	 *
427 	 * @sched_job: the job to run
428 	 *
429 	 * The deprecated drm_sched_resubmit_jobs() (called by &struct
430 	 * drm_sched_backend_ops.timedout_job) can invoke this again with the
431 	 * same parameters. Using this is discouraged because it violates
432 	 * dma_fence rules, notably dma_fence_init() has to be called on
433 	 * already initialized fences for a second time. Moreover, this is
434 	 * dangerous because attempts to allocate memory might deadlock with
435 	 * memory management code waiting for the reset to complete.
436 	 *
437 	 * TODO: Document what drivers should do / use instead.
438 	 *
439 	 * This method is called in a workqueue context - either from the
440 	 * submit_wq the driver passed through drm_sched_init(), or, if the
441 	 * driver passed NULL, a separate, ordered workqueue the scheduler
442 	 * allocated.
443 	 *
444 	 * Note that the scheduler expects to 'inherit' its own reference to
445 	 * this fence from the callback. It does not invoke an extra
446 	 * dma_fence_get() on it. Consequently, this callback must take a
447 	 * reference for the scheduler, and additional ones for the driver's
448 	 * respective needs.
449 	 *
450 	 * Return:
451 	 * * On success: dma_fence the driver must signal once the hardware has
452 	 * completed the job ("hardware fence").
453 	 * * On failure: NULL or an ERR_PTR.
454 	 */
455 	struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
456 
457 	/**
458 	 * @timedout_job: Called when a job has taken too long to execute,
459 	 * to trigger GPU recovery.
460 	 *
461 	 * @sched_job: The job that has timed out
462 	 *
463 	 * Drivers typically issue a reset to recover from GPU hangs.
464 	 * This procedure looks very different depending on whether a firmware
465 	 * or a hardware scheduler is being used.
466 	 *
467 	 * For a FIRMWARE SCHEDULER, each ring has one scheduler, and each
468 	 * scheduler has one entity. Hence, the steps taken typically look as
469 	 * follows:
470 	 *
471 	 * 1. Stop the scheduler using drm_sched_stop(). This will pause the
472 	 *    scheduler workqueues and cancel the timeout work, guaranteeing
473 	 *    that nothing is queued while the ring is being removed.
474 	 * 2. Remove the ring. The firmware will make sure that the
475 	 *    corresponding parts of the hardware are resetted, and that other
476 	 *    rings are not impacted.
477 	 * 3. Kill the entity and the associated scheduler.
478 	 *
479 	 *
480 	 * For a HARDWARE SCHEDULER, a scheduler instance schedules jobs from
481 	 * one or more entities to one ring. This implies that all entities
482 	 * associated with the affected scheduler cannot be torn down, because
483 	 * this would effectively also affect innocent userspace processes which
484 	 * did not submit faulty jobs (for example).
485 	 *
486 	 * Consequently, the procedure to recover with a hardware scheduler
487 	 * should look like this:
488 	 *
489 	 * 1. Stop all schedulers impacted by the reset using drm_sched_stop().
490 	 * 2. Kill the entity the faulty job stems from.
491 	 * 3. Issue a GPU reset on all faulty rings (driver-specific).
492 	 * 4. Re-submit jobs on all schedulers impacted by re-submitting them to
493 	 *    the entities which are still alive.
494 	 * 5. Restart all schedulers that were stopped in step #1 using
495 	 *    drm_sched_start().
496 	 *
497 	 * Note that some GPUs have distinct hardware queues but need to reset
498 	 * the GPU globally, which requires extra synchronization between the
499 	 * timeout handlers of different schedulers. One way to achieve this
500 	 * synchronization is to create an ordered workqueue (using
501 	 * alloc_ordered_workqueue()) at the driver level, and pass this queue
502 	 * as drm_sched_init()'s @timeout_wq parameter. This will guarantee
503 	 * that timeout handlers are executed sequentially.
504 	 *
505 	 * Return: The scheduler's status, defined by &enum drm_gpu_sched_stat
506 	 *
507 	 */
508 	enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
509 
510 	/**
511          * @free_job: Called once the job's finished fence has been signaled
512          * and it's time to clean it up.
513 	 */
514 	void (*free_job)(struct drm_sched_job *sched_job);
515 };
516 
517 /**
518  * struct drm_gpu_scheduler - scheduler instance-specific data
519  *
520  * @ops: backend operations provided by the driver.
521  * @credit_limit: the credit limit of this scheduler
522  * @credit_count: the current credit count of this scheduler
523  * @timeout: the time after which a job is removed from the scheduler.
524  * @name: name of the ring for which this scheduler is being used.
525  * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT,
526  *           as there's usually one run-queue per priority, but could be less.
527  * @sched_rq: An allocated array of run-queues of size @num_rqs;
528  * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
529  *                 waits on this wait queue until all the scheduled jobs are
530  *                 finished.
531  * @job_id_count: used to assign unique id to the each job.
532  * @submit_wq: workqueue used to queue @work_run_job and @work_free_job
533  * @timeout_wq: workqueue used to queue @work_tdr
534  * @work_run_job: work which calls run_job op of each scheduler.
535  * @work_free_job: work which calls free_job op of each scheduler.
536  * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
537  *            timeout interval is over.
538  * @pending_list: the list of jobs which are currently in the job queue.
539  * @job_list_lock: lock to protect the pending_list.
540  * @hang_limit: once the hangs by a job crosses this limit then it is marked
541  *              guilty and it will no longer be considered for scheduling.
542  * @score: score to help loadbalancer pick a idle sched
543  * @_score: score used when the driver doesn't provide one
544  * @ready: marks if the underlying HW is ready to work
545  * @free_guilty: A hit to time out handler to free the guilty job.
546  * @pause_submit: pause queuing of @work_run_job on @submit_wq
547  * @own_submit_wq: scheduler owns allocation of @submit_wq
548  * @dev: system &struct device
549  *
550  * One scheduler is implemented for each hardware ring.
551  */
552 struct drm_gpu_scheduler {
553 	const struct drm_sched_backend_ops	*ops;
554 	u32				credit_limit;
555 	atomic_t			credit_count;
556 	long				timeout;
557 	const char			*name;
558 	u32                             num_rqs;
559 	struct drm_sched_rq             **sched_rq;
560 	wait_queue_head_t		job_scheduled;
561 	atomic64_t			job_id_count;
562 	struct workqueue_struct		*submit_wq;
563 	struct workqueue_struct		*timeout_wq;
564 	struct work_struct		work_run_job;
565 	struct work_struct		work_free_job;
566 	struct delayed_work		work_tdr;
567 	struct list_head		pending_list;
568 	spinlock_t			job_list_lock;
569 	int				hang_limit;
570 	atomic_t                        *score;
571 	atomic_t                        _score;
572 	bool				ready;
573 	bool				free_guilty;
574 	bool				pause_submit;
575 	bool				own_submit_wq;
576 	struct device			*dev;
577 };
578 
579 /**
580  * struct drm_sched_init_args - parameters for initializing a DRM GPU scheduler
581  *
582  * @ops: backend operations provided by the driver
583  * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
584  *	       allocated and used.
585  * @num_rqs: Number of run-queues. This may be at most DRM_SCHED_PRIORITY_COUNT,
586  *	     as there's usually one run-queue per priority, but may be less.
587  * @credit_limit: the number of credits this scheduler can hold from all jobs
588  * @hang_limit: number of times to allow a job to hang before dropping it.
589  *		This mechanism is DEPRECATED. Set it to 0.
590  * @timeout: timeout value in jiffies for submitted jobs.
591  * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is used.
592  * @score: score atomic shared with other schedulers. May be NULL.
593  * @name: name (typically the driver's name). Used for debugging
594  * @dev: associated device. Used for debugging
595  */
596 struct drm_sched_init_args {
597 	const struct drm_sched_backend_ops *ops;
598 	struct workqueue_struct *submit_wq;
599 	struct workqueue_struct *timeout_wq;
600 	u32 num_rqs;
601 	u32 credit_limit;
602 	unsigned int hang_limit;
603 	long timeout;
604 	atomic_t *score;
605 	const char *name;
606 	struct device *dev;
607 };
608 
609 /* Scheduler operations */
610 
611 int drm_sched_init(struct drm_gpu_scheduler *sched,
612 		   const struct drm_sched_init_args *args);
613 
614 void drm_sched_fini(struct drm_gpu_scheduler *sched);
615 
616 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
617 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
618 			      unsigned long remaining);
619 void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
620 bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
621 void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
622 void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
623 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
624 void drm_sched_start(struct drm_gpu_scheduler *sched, int errno);
625 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
626 void drm_sched_fault(struct drm_gpu_scheduler *sched);
627 
628 struct drm_gpu_scheduler *
629 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
630 		    unsigned int num_sched_list);
631 
632 /* Jobs */
633 
634 int drm_sched_job_init(struct drm_sched_job *job,
635 		       struct drm_sched_entity *entity,
636 		       u32 credits, void *owner,
637 		       u64 drm_client_id);
638 void drm_sched_job_arm(struct drm_sched_job *job);
639 void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
640 int drm_sched_job_add_dependency(struct drm_sched_job *job,
641 				 struct dma_fence *fence);
642 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
643 					 struct drm_file *file,
644 					 u32 handle,
645 					 u32 point);
646 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
647 					struct dma_resv *resv,
648 					enum dma_resv_usage usage);
649 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
650 					    struct drm_gem_object *obj,
651 					    bool write);
652 bool drm_sched_job_has_dependency(struct drm_sched_job *job,
653 				  struct dma_fence *fence);
654 void drm_sched_job_cleanup(struct drm_sched_job *job);
655 void drm_sched_increase_karma(struct drm_sched_job *bad);
656 
657 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
658 					    int threshold)
659 {
660 	return s_job && atomic_inc_return(&s_job->karma) > threshold;
661 }
662 
663 /* Entities */
664 
665 int drm_sched_entity_init(struct drm_sched_entity *entity,
666 			  enum drm_sched_priority priority,
667 			  struct drm_gpu_scheduler **sched_list,
668 			  unsigned int num_sched_list,
669 			  atomic_t *guilty);
670 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
671 void drm_sched_entity_fini(struct drm_sched_entity *entity);
672 void drm_sched_entity_destroy(struct drm_sched_entity *entity);
673 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
674 				   enum drm_sched_priority priority);
675 int drm_sched_entity_error(struct drm_sched_entity *entity);
676 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
677 				   struct drm_gpu_scheduler **sched_list,
678 				   unsigned int num_sched_list);
679 
680 #endif
681