xref: /linux/include/drm/gpu_scheduler.h (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #ifndef _DRM_GPU_SCHEDULER_H_
25 #define _DRM_GPU_SCHEDULER_H_
26 
27 #include <drm/spsc_queue.h>
28 #include <linux/dma-fence.h>
29 #include <linux/completion.h>
30 #include <linux/xarray.h>
31 #include <linux/workqueue.h>
32 
33 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
34 
35 /**
36  * DRM_SCHED_FENCE_DONT_PIPELINE - Prevent dependency pipelining
37  *
38  * Setting this flag on a scheduler fence prevents pipelining of jobs depending
39  * on this fence. In other words we always insert a full CPU round trip before
40  * dependent jobs are pushed to the hw queue.
41  */
42 #define DRM_SCHED_FENCE_DONT_PIPELINE	DMA_FENCE_FLAG_USER_BITS
43 
44 /**
45  * DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT - A fence deadline hint has been set
46  *
47  * Because we could have a deadline hint can be set before the backing hw
48  * fence is created, we need to keep track of whether a deadline has already
49  * been set.
50  */
51 #define DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT	(DMA_FENCE_FLAG_USER_BITS + 1)
52 
53 enum dma_resv_usage;
54 struct dma_resv;
55 struct drm_gem_object;
56 
57 struct drm_gpu_scheduler;
58 struct drm_sched_rq;
59 
60 struct drm_file;
61 
62 /* These are often used as an (initial) index
63  * to an array, and as such should start at 0.
64  */
65 enum drm_sched_priority {
66 	DRM_SCHED_PRIORITY_KERNEL,
67 	DRM_SCHED_PRIORITY_HIGH,
68 	DRM_SCHED_PRIORITY_NORMAL,
69 	DRM_SCHED_PRIORITY_LOW,
70 
71 	DRM_SCHED_PRIORITY_COUNT
72 };
73 
74 /* Used to choose between FIFO and RR job-scheduling */
75 extern int drm_sched_policy;
76 
77 #define DRM_SCHED_POLICY_RR    0
78 #define DRM_SCHED_POLICY_FIFO  1
79 
80 /**
81  * struct drm_sched_entity - A wrapper around a job queue (typically
82  * attached to the DRM file_priv).
83  *
84  * Entities will emit jobs in order to their corresponding hardware
85  * ring, and the scheduler will alternate between entities based on
86  * scheduling policy.
87  */
88 struct drm_sched_entity {
89 	/**
90 	 * @list:
91 	 *
92 	 * Used to append this struct to the list of entities in the runqueue
93 	 * @rq under &drm_sched_rq.entities.
94 	 *
95 	 * Protected by &drm_sched_rq.lock of @rq.
96 	 */
97 	struct list_head		list;
98 
99 	/**
100 	 * @lock:
101 	 *
102 	 * Lock protecting the run-queue (@rq) to which this entity belongs,
103 	 * @priority and the list of schedulers (@sched_list, @num_sched_list).
104 	 */
105 	spinlock_t			lock;
106 
107 	/**
108 	 * @rq:
109 	 *
110 	 * Runqueue on which this entity is currently scheduled.
111 	 *
112 	 * FIXME: Locking is very unclear for this. Writers are protected by
113 	 * @lock, but readers are generally lockless and seem to just race with
114 	 * not even a READ_ONCE.
115 	 */
116 	struct drm_sched_rq		*rq;
117 
118 	/**
119 	 * @sched_list:
120 	 *
121 	 * A list of schedulers (struct drm_gpu_scheduler).  Jobs from this entity can
122 	 * be scheduled on any scheduler on this list.
123 	 *
124 	 * This can be modified by calling drm_sched_entity_modify_sched().
125 	 * Locking is entirely up to the driver, see the above function for more
126 	 * details.
127 	 *
128 	 * This will be set to NULL if &num_sched_list equals 1 and @rq has been
129 	 * set already.
130 	 *
131 	 * FIXME: This means priority changes through
132 	 * drm_sched_entity_set_priority() will be lost henceforth in this case.
133 	 */
134 	struct drm_gpu_scheduler        **sched_list;
135 
136 	/**
137 	 * @num_sched_list:
138 	 *
139 	 * Number of drm_gpu_schedulers in the @sched_list.
140 	 */
141 	unsigned int                    num_sched_list;
142 
143 	/**
144 	 * @priority:
145 	 *
146 	 * Priority of the entity. This can be modified by calling
147 	 * drm_sched_entity_set_priority(). Protected by @lock.
148 	 */
149 	enum drm_sched_priority         priority;
150 
151 	/**
152 	 * @job_queue: the list of jobs of this entity.
153 	 */
154 	struct spsc_queue		job_queue;
155 
156 	/**
157 	 * @fence_seq:
158 	 *
159 	 * A linearly increasing seqno incremented with each new
160 	 * &drm_sched_fence which is part of the entity.
161 	 *
162 	 * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
163 	 * this doesn't need to be atomic.
164 	 */
165 	atomic_t			fence_seq;
166 
167 	/**
168 	 * @fence_context:
169 	 *
170 	 * A unique context for all the fences which belong to this entity.  The
171 	 * &drm_sched_fence.scheduled uses the fence_context but
172 	 * &drm_sched_fence.finished uses fence_context + 1.
173 	 */
174 	uint64_t			fence_context;
175 
176 	/**
177 	 * @dependency:
178 	 *
179 	 * The dependency fence of the job which is on the top of the job queue.
180 	 */
181 	struct dma_fence		*dependency;
182 
183 	/**
184 	 * @cb:
185 	 *
186 	 * Callback for the dependency fence above.
187 	 */
188 	struct dma_fence_cb		cb;
189 
190 	/**
191 	 * @guilty:
192 	 *
193 	 * Points to entities' guilty.
194 	 */
195 	atomic_t			*guilty;
196 
197 	/**
198 	 * @last_scheduled:
199 	 *
200 	 * Points to the finished fence of the last scheduled job. Only written
201 	 * by the scheduler thread, can be accessed locklessly from
202 	 * drm_sched_job_arm() if the queue is empty.
203 	 */
204 	struct dma_fence __rcu		*last_scheduled;
205 
206 	/**
207 	 * @last_user: last group leader pushing a job into the entity.
208 	 */
209 	struct task_struct		*last_user;
210 
211 	/**
212 	 * @stopped:
213 	 *
214 	 * Marks the enity as removed from rq and destined for
215 	 * termination. This is set by calling drm_sched_entity_flush() and by
216 	 * drm_sched_fini().
217 	 */
218 	bool 				stopped;
219 
220 	/**
221 	 * @entity_idle:
222 	 *
223 	 * Signals when entity is not in use, used to sequence entity cleanup in
224 	 * drm_sched_entity_fini().
225 	 */
226 	struct completion		entity_idle;
227 
228 	/**
229 	 * @oldest_job_waiting:
230 	 *
231 	 * Marks earliest job waiting in SW queue
232 	 */
233 	ktime_t				oldest_job_waiting;
234 
235 	/**
236 	 * @rb_tree_node:
237 	 *
238 	 * The node used to insert this entity into time based priority queue
239 	 */
240 	struct rb_node			rb_tree_node;
241 
242 };
243 
244 /**
245  * struct drm_sched_rq - queue of entities to be scheduled.
246  *
247  * @sched: the scheduler to which this rq belongs to.
248  * @lock: protects @entities, @rb_tree_root and @current_entity.
249  * @current_entity: the entity which is to be scheduled.
250  * @entities: list of the entities to be scheduled.
251  * @rb_tree_root: root of time based priority queue of entities for FIFO scheduling
252  *
253  * Run queue is a set of entities scheduling command submissions for
254  * one specific ring. It implements the scheduling policy that selects
255  * the next entity to emit commands from.
256  */
257 struct drm_sched_rq {
258 	struct drm_gpu_scheduler	*sched;
259 
260 	spinlock_t			lock;
261 	/* Following members are protected by the @lock: */
262 	struct drm_sched_entity		*current_entity;
263 	struct list_head		entities;
264 	struct rb_root_cached		rb_tree_root;
265 };
266 
267 /**
268  * struct drm_sched_fence - fences corresponding to the scheduling of a job.
269  */
270 struct drm_sched_fence {
271         /**
272          * @scheduled: this fence is what will be signaled by the scheduler
273          * when the job is scheduled.
274          */
275 	struct dma_fence		scheduled;
276 
277         /**
278          * @finished: this fence is what will be signaled by the scheduler
279          * when the job is completed.
280          *
281          * When setting up an out fence for the job, you should use
282          * this, since it's available immediately upon
283          * drm_sched_job_init(), and the fence returned by the driver
284          * from run_job() won't be created until the dependencies have
285          * resolved.
286          */
287 	struct dma_fence		finished;
288 
289 	/**
290 	 * @deadline: deadline set on &drm_sched_fence.finished which
291 	 * potentially needs to be propagated to &drm_sched_fence.parent
292 	 */
293 	ktime_t				deadline;
294 
295         /**
296          * @parent: the fence returned by &drm_sched_backend_ops.run_job
297          * when scheduling the job on hardware. We signal the
298          * &drm_sched_fence.finished fence once parent is signalled.
299          */
300 	struct dma_fence		*parent;
301         /**
302          * @sched: the scheduler instance to which the job having this struct
303          * belongs to.
304          */
305 	struct drm_gpu_scheduler	*sched;
306         /**
307          * @lock: the lock used by the scheduled and the finished fences.
308          */
309 	spinlock_t			lock;
310         /**
311          * @owner: job owner for debugging
312          */
313 	void				*owner;
314 };
315 
316 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
317 
318 /**
319  * struct drm_sched_job - A job to be run by an entity.
320  *
321  * @queue_node: used to append this struct to the queue of jobs in an entity.
322  * @list: a job participates in a "pending" and "done" lists.
323  * @sched: the scheduler instance on which this job is scheduled.
324  * @s_fence: contains the fences for the scheduling of job.
325  * @finish_cb: the callback for the finished fence.
326  * @credits: the number of credits this job contributes to the scheduler
327  * @work: Helper to reschedule job kill to different context.
328  * @id: a unique id assigned to each job scheduled on the scheduler.
329  * @karma: increment on every hang caused by this job. If this exceeds the hang
330  *         limit of the scheduler then the job is marked guilty and will not
331  *         be scheduled further.
332  * @s_priority: the priority of the job.
333  * @entity: the entity to which this job belongs.
334  * @cb: the callback for the parent fence in s_fence.
335  *
336  * A job is created by the driver using drm_sched_job_init(), and
337  * should call drm_sched_entity_push_job() once it wants the scheduler
338  * to schedule the job.
339  */
340 struct drm_sched_job {
341 	struct spsc_node		queue_node;
342 	struct list_head		list;
343 
344 	/**
345 	 * @sched:
346 	 *
347 	 * The scheduler this job is or will be scheduled on. Gets set by
348 	 * drm_sched_job_arm(). Valid until drm_sched_backend_ops.free_job()
349 	 * has finished.
350 	 */
351 	struct drm_gpu_scheduler	*sched;
352 	struct drm_sched_fence		*s_fence;
353 
354 	u32				credits;
355 
356 	/*
357 	 * work is used only after finish_cb has been used and will not be
358 	 * accessed anymore.
359 	 */
360 	union {
361 		struct dma_fence_cb		finish_cb;
362 		struct work_struct		work;
363 	};
364 
365 	uint64_t			id;
366 	atomic_t			karma;
367 	enum drm_sched_priority		s_priority;
368 	struct drm_sched_entity         *entity;
369 	struct dma_fence_cb		cb;
370 	/**
371 	 * @dependencies:
372 	 *
373 	 * Contains the dependencies as struct dma_fence for this job, see
374 	 * drm_sched_job_add_dependency() and
375 	 * drm_sched_job_add_implicit_dependencies().
376 	 */
377 	struct xarray			dependencies;
378 
379 	/** @last_dependency: tracks @dependencies as they signal */
380 	unsigned long			last_dependency;
381 
382 	/**
383 	 * @submit_ts:
384 	 *
385 	 * When the job was pushed into the entity queue.
386 	 */
387 	ktime_t                         submit_ts;
388 };
389 
drm_sched_invalidate_job(struct drm_sched_job * s_job,int threshold)390 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
391 					    int threshold)
392 {
393 	return s_job && atomic_inc_return(&s_job->karma) > threshold;
394 }
395 
396 enum drm_gpu_sched_stat {
397 	DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
398 	DRM_GPU_SCHED_STAT_NOMINAL,
399 	DRM_GPU_SCHED_STAT_ENODEV,
400 };
401 
402 /**
403  * struct drm_sched_backend_ops - Define the backend operations
404  *	called by the scheduler
405  *
406  * These functions should be implemented in the driver side.
407  */
408 struct drm_sched_backend_ops {
409 	/**
410 	 * @prepare_job:
411 	 *
412 	 * Called when the scheduler is considering scheduling this job next, to
413 	 * get another struct dma_fence for this job to block on.  Once it
414 	 * returns NULL, run_job() may be called.
415 	 *
416 	 * Can be NULL if no additional preparation to the dependencies are
417 	 * necessary. Skipped when jobs are killed instead of run.
418 	 */
419 	struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job,
420 					 struct drm_sched_entity *s_entity);
421 
422 	/**
423          * @run_job: Called to execute the job once all of the dependencies
424          * have been resolved.  This may be called multiple times, if
425 	 * timedout_job() has happened and drm_sched_job_recovery()
426 	 * decides to try it again.
427 	 */
428 	struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
429 
430 	/**
431 	 * @timedout_job: Called when a job has taken too long to execute,
432 	 * to trigger GPU recovery.
433 	 *
434 	 * This method is called in a workqueue context.
435 	 *
436 	 * Drivers typically issue a reset to recover from GPU hangs, and this
437 	 * procedure usually follows the following workflow:
438 	 *
439 	 * 1. Stop the scheduler using drm_sched_stop(). This will park the
440 	 *    scheduler thread and cancel the timeout work, guaranteeing that
441 	 *    nothing is queued while we reset the hardware queue
442 	 * 2. Try to gracefully stop non-faulty jobs (optional)
443 	 * 3. Issue a GPU reset (driver-specific)
444 	 * 4. Re-submit jobs using drm_sched_resubmit_jobs()
445 	 * 5. Restart the scheduler using drm_sched_start(). At that point, new
446 	 *    jobs can be queued, and the scheduler thread is unblocked
447 	 *
448 	 * Note that some GPUs have distinct hardware queues but need to reset
449 	 * the GPU globally, which requires extra synchronization between the
450 	 * timeout handler of the different &drm_gpu_scheduler. One way to
451 	 * achieve this synchronization is to create an ordered workqueue
452 	 * (using alloc_ordered_workqueue()) at the driver level, and pass this
453 	 * queue to drm_sched_init(), to guarantee that timeout handlers are
454 	 * executed sequentially. The above workflow needs to be slightly
455 	 * adjusted in that case:
456 	 *
457 	 * 1. Stop all schedulers impacted by the reset using drm_sched_stop()
458 	 * 2. Try to gracefully stop non-faulty jobs on all queues impacted by
459 	 *    the reset (optional)
460 	 * 3. Issue a GPU reset on all faulty queues (driver-specific)
461 	 * 4. Re-submit jobs on all schedulers impacted by the reset using
462 	 *    drm_sched_resubmit_jobs()
463 	 * 5. Restart all schedulers that were stopped in step #1 using
464 	 *    drm_sched_start()
465 	 *
466 	 * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
467 	 * and the underlying driver has started or completed recovery.
468 	 *
469 	 * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
470 	 * available, i.e. has been unplugged.
471 	 */
472 	enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
473 
474 	/**
475          * @free_job: Called once the job's finished fence has been signaled
476          * and it's time to clean it up.
477 	 */
478 	void (*free_job)(struct drm_sched_job *sched_job);
479 
480 	/**
481 	 * @update_job_credits: Called when the scheduler is considering this
482 	 * job for execution.
483 	 *
484 	 * This callback returns the number of credits the job would take if
485 	 * pushed to the hardware. Drivers may use this to dynamically update
486 	 * the job's credit count. For instance, deduct the number of credits
487 	 * for already signalled native fences.
488 	 *
489 	 * This callback is optional.
490 	 */
491 	u32 (*update_job_credits)(struct drm_sched_job *sched_job);
492 };
493 
494 /**
495  * struct drm_gpu_scheduler - scheduler instance-specific data
496  *
497  * @ops: backend operations provided by the driver.
498  * @credit_limit: the credit limit of this scheduler
499  * @credit_count: the current credit count of this scheduler
500  * @timeout: the time after which a job is removed from the scheduler.
501  * @name: name of the ring for which this scheduler is being used.
502  * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT,
503  *           as there's usually one run-queue per priority, but could be less.
504  * @sched_rq: An allocated array of run-queues of size @num_rqs;
505  * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
506  *                 waits on this wait queue until all the scheduled jobs are
507  *                 finished.
508  * @job_id_count: used to assign unique id to the each job.
509  * @submit_wq: workqueue used to queue @work_run_job and @work_free_job
510  * @timeout_wq: workqueue used to queue @work_tdr
511  * @work_run_job: work which calls run_job op of each scheduler.
512  * @work_free_job: work which calls free_job op of each scheduler.
513  * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
514  *            timeout interval is over.
515  * @pending_list: the list of jobs which are currently in the job queue.
516  * @job_list_lock: lock to protect the pending_list.
517  * @hang_limit: once the hangs by a job crosses this limit then it is marked
518  *              guilty and it will no longer be considered for scheduling.
519  * @score: score to help loadbalancer pick a idle sched
520  * @_score: score used when the driver doesn't provide one
521  * @ready: marks if the underlying HW is ready to work
522  * @free_guilty: A hit to time out handler to free the guilty job.
523  * @pause_submit: pause queuing of @work_run_job on @submit_wq
524  * @own_submit_wq: scheduler owns allocation of @submit_wq
525  * @dev: system &struct device
526  *
527  * One scheduler is implemented for each hardware ring.
528  */
529 struct drm_gpu_scheduler {
530 	const struct drm_sched_backend_ops	*ops;
531 	u32				credit_limit;
532 	atomic_t			credit_count;
533 	long				timeout;
534 	const char			*name;
535 	u32                             num_rqs;
536 	struct drm_sched_rq             **sched_rq;
537 	wait_queue_head_t		job_scheduled;
538 	atomic64_t			job_id_count;
539 	struct workqueue_struct		*submit_wq;
540 	struct workqueue_struct		*timeout_wq;
541 	struct work_struct		work_run_job;
542 	struct work_struct		work_free_job;
543 	struct delayed_work		work_tdr;
544 	struct list_head		pending_list;
545 	spinlock_t			job_list_lock;
546 	int				hang_limit;
547 	atomic_t                        *score;
548 	atomic_t                        _score;
549 	bool				ready;
550 	bool				free_guilty;
551 	bool				pause_submit;
552 	bool				own_submit_wq;
553 	struct device			*dev;
554 };
555 
556 int drm_sched_init(struct drm_gpu_scheduler *sched,
557 		   const struct drm_sched_backend_ops *ops,
558 		   struct workqueue_struct *submit_wq,
559 		   u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
560 		   long timeout, struct workqueue_struct *timeout_wq,
561 		   atomic_t *score, const char *name, struct device *dev);
562 
563 void drm_sched_fini(struct drm_gpu_scheduler *sched);
564 int drm_sched_job_init(struct drm_sched_job *job,
565 		       struct drm_sched_entity *entity,
566 		       u32 credits, void *owner);
567 void drm_sched_job_arm(struct drm_sched_job *job);
568 int drm_sched_job_add_dependency(struct drm_sched_job *job,
569 				 struct dma_fence *fence);
570 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
571 					 struct drm_file *file,
572 					 u32 handle,
573 					 u32 point);
574 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
575 					struct dma_resv *resv,
576 					enum dma_resv_usage usage);
577 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
578 					    struct drm_gem_object *obj,
579 					    bool write);
580 
581 
582 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
583 				    struct drm_gpu_scheduler **sched_list,
584                                    unsigned int num_sched_list);
585 
586 void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
587 void drm_sched_job_cleanup(struct drm_sched_job *job);
588 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
589 bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
590 void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
591 void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
592 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
593 void drm_sched_start(struct drm_gpu_scheduler *sched, int errno);
594 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
595 void drm_sched_increase_karma(struct drm_sched_job *bad);
596 void drm_sched_reset_karma(struct drm_sched_job *bad);
597 void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
598 bool drm_sched_dependency_optimized(struct dma_fence* fence,
599 				    struct drm_sched_entity *entity);
600 void drm_sched_fault(struct drm_gpu_scheduler *sched);
601 
602 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
603 			     struct drm_sched_entity *entity);
604 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
605 				struct drm_sched_entity *entity);
606 
607 void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
608 				     struct drm_sched_rq *rq, ktime_t ts);
609 
610 int drm_sched_entity_init(struct drm_sched_entity *entity,
611 			  enum drm_sched_priority priority,
612 			  struct drm_gpu_scheduler **sched_list,
613 			  unsigned int num_sched_list,
614 			  atomic_t *guilty);
615 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
616 void drm_sched_entity_fini(struct drm_sched_entity *entity);
617 void drm_sched_entity_destroy(struct drm_sched_entity *entity);
618 void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
619 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
620 void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
621 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
622 				   enum drm_sched_priority priority);
623 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
624 int drm_sched_entity_error(struct drm_sched_entity *entity);
625 
626 struct drm_sched_fence *drm_sched_fence_alloc(
627 	struct drm_sched_entity *s_entity, void *owner);
628 void drm_sched_fence_init(struct drm_sched_fence *fence,
629 			  struct drm_sched_entity *entity);
630 void drm_sched_fence_free(struct drm_sched_fence *fence);
631 
632 void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
633 			       struct dma_fence *parent);
634 void drm_sched_fence_finished(struct drm_sched_fence *fence, int result);
635 
636 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
637 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
638 		                unsigned long remaining);
639 struct drm_gpu_scheduler *
640 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
641 		     unsigned int num_sched_list);
642 
643 #endif
644