xref: /linux/include/drm/gpu_scheduler.h (revision b6a1af0362b3232c7b474b9b46e49b862602018c)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #ifndef _DRM_GPU_SCHEDULER_H_
25 #define _DRM_GPU_SCHEDULER_H_
26 
27 #include <drm/spsc_queue.h>
28 #include <linux/dma-fence.h>
29 #include <linux/completion.h>
30 #include <linux/xarray.h>
31 #include <linux/workqueue.h>
32 
33 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
34 
35 /**
36  * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
37  *
38  * Setting this flag on a scheduler fence prevents pipelining of jobs depending
39  * on this fence. In other words we always insert a full CPU round trip before
40  * dependen jobs are pushed to the hw queue.
41  */
42 #define DRM_SCHED_FENCE_DONT_PIPELINE	DMA_FENCE_FLAG_USER_BITS
43 
44 enum dma_resv_usage;
45 struct dma_resv;
46 struct drm_gem_object;
47 
48 struct drm_gpu_scheduler;
49 struct drm_sched_rq;
50 
51 /* These are often used as an (initial) index
52  * to an array, and as such should start at 0.
53  */
54 enum drm_sched_priority {
55 	DRM_SCHED_PRIORITY_MIN,
56 	DRM_SCHED_PRIORITY_NORMAL,
57 	DRM_SCHED_PRIORITY_HIGH,
58 	DRM_SCHED_PRIORITY_KERNEL,
59 
60 	DRM_SCHED_PRIORITY_COUNT,
61 	DRM_SCHED_PRIORITY_UNSET = -2
62 };
63 
64 /* Used to chose between FIFO and RR jobs scheduling */
65 extern int drm_sched_policy;
66 
67 #define DRM_SCHED_POLICY_RR    0
68 #define DRM_SCHED_POLICY_FIFO  1
69 
70 /**
71  * struct drm_sched_entity - A wrapper around a job queue (typically
72  * attached to the DRM file_priv).
73  *
74  * Entities will emit jobs in order to their corresponding hardware
75  * ring, and the scheduler will alternate between entities based on
76  * scheduling policy.
77  */
78 struct drm_sched_entity {
79 	/**
80 	 * @list:
81 	 *
82 	 * Used to append this struct to the list of entities in the runqueue
83 	 * @rq under &drm_sched_rq.entities.
84 	 *
85 	 * Protected by &drm_sched_rq.lock of @rq.
86 	 */
87 	struct list_head		list;
88 
89 	/**
90 	 * @rq:
91 	 *
92 	 * Runqueue on which this entity is currently scheduled.
93 	 *
94 	 * FIXME: Locking is very unclear for this. Writers are protected by
95 	 * @rq_lock, but readers are generally lockless and seem to just race
96 	 * with not even a READ_ONCE.
97 	 */
98 	struct drm_sched_rq		*rq;
99 
100 	/**
101 	 * @sched_list:
102 	 *
103 	 * A list of schedulers (struct drm_gpu_scheduler).  Jobs from this entity can
104 	 * be scheduled on any scheduler on this list.
105 	 *
106 	 * This can be modified by calling drm_sched_entity_modify_sched().
107 	 * Locking is entirely up to the driver, see the above function for more
108 	 * details.
109 	 *
110 	 * This will be set to NULL if &num_sched_list equals 1 and @rq has been
111 	 * set already.
112 	 *
113 	 * FIXME: This means priority changes through
114 	 * drm_sched_entity_set_priority() will be lost henceforth in this case.
115 	 */
116 	struct drm_gpu_scheduler        **sched_list;
117 
118 	/**
119 	 * @num_sched_list:
120 	 *
121 	 * Number of drm_gpu_schedulers in the @sched_list.
122 	 */
123 	unsigned int                    num_sched_list;
124 
125 	/**
126 	 * @priority:
127 	 *
128 	 * Priority of the entity. This can be modified by calling
129 	 * drm_sched_entity_set_priority(). Protected by &rq_lock.
130 	 */
131 	enum drm_sched_priority         priority;
132 
133 	/**
134 	 * @rq_lock:
135 	 *
136 	 * Lock to modify the runqueue to which this entity belongs.
137 	 */
138 	spinlock_t			rq_lock;
139 
140 	/**
141 	 * @job_queue: the list of jobs of this entity.
142 	 */
143 	struct spsc_queue		job_queue;
144 
145 	/**
146 	 * @fence_seq:
147 	 *
148 	 * A linearly increasing seqno incremented with each new
149 	 * &drm_sched_fence which is part of the entity.
150 	 *
151 	 * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
152 	 * this doesn't need to be atomic.
153 	 */
154 	atomic_t			fence_seq;
155 
156 	/**
157 	 * @fence_context:
158 	 *
159 	 * A unique context for all the fences which belong to this entity.  The
160 	 * &drm_sched_fence.scheduled uses the fence_context but
161 	 * &drm_sched_fence.finished uses fence_context + 1.
162 	 */
163 	uint64_t			fence_context;
164 
165 	/**
166 	 * @dependency:
167 	 *
168 	 * The dependency fence of the job which is on the top of the job queue.
169 	 */
170 	struct dma_fence		*dependency;
171 
172 	/**
173 	 * @cb:
174 	 *
175 	 * Callback for the dependency fence above.
176 	 */
177 	struct dma_fence_cb		cb;
178 
179 	/**
180 	 * @guilty:
181 	 *
182 	 * Points to entities' guilty.
183 	 */
184 	atomic_t			*guilty;
185 
186 	/**
187 	 * @last_scheduled:
188 	 *
189 	 * Points to the finished fence of the last scheduled job. Only written
190 	 * by the scheduler thread, can be accessed locklessly from
191 	 * drm_sched_job_arm() iff the queue is empty.
192 	 */
193 	struct dma_fence                *last_scheduled;
194 
195 	/**
196 	 * @last_user: last group leader pushing a job into the entity.
197 	 */
198 	struct task_struct		*last_user;
199 
200 	/**
201 	 * @stopped:
202 	 *
203 	 * Marks the enity as removed from rq and destined for
204 	 * termination. This is set by calling drm_sched_entity_flush() and by
205 	 * drm_sched_fini().
206 	 */
207 	bool 				stopped;
208 
209 	/**
210 	 * @entity_idle:
211 	 *
212 	 * Signals when entity is not in use, used to sequence entity cleanup in
213 	 * drm_sched_entity_fini().
214 	 */
215 	struct completion		entity_idle;
216 
217 	/**
218 	 * @oldest_job_waiting:
219 	 *
220 	 * Marks earliest job waiting in SW queue
221 	 */
222 	ktime_t				oldest_job_waiting;
223 
224 	/**
225 	 * @rb_tree_node:
226 	 *
227 	 * The node used to insert this entity into time based priority queue
228 	 */
229 	struct rb_node			rb_tree_node;
230 
231 };
232 
233 /**
234  * struct drm_sched_rq - queue of entities to be scheduled.
235  *
236  * @lock: to modify the entities list.
237  * @sched: the scheduler to which this rq belongs to.
238  * @entities: list of the entities to be scheduled.
239  * @current_entity: the entity which is to be scheduled.
240  * @rb_tree_root: root of time based priory queue of entities for FIFO scheduling
241  *
242  * Run queue is a set of entities scheduling command submissions for
243  * one specific ring. It implements the scheduling policy that selects
244  * the next entity to emit commands from.
245  */
246 struct drm_sched_rq {
247 	spinlock_t			lock;
248 	struct drm_gpu_scheduler	*sched;
249 	struct list_head		entities;
250 	struct drm_sched_entity		*current_entity;
251 	struct rb_root_cached		rb_tree_root;
252 };
253 
254 /**
255  * struct drm_sched_fence - fences corresponding to the scheduling of a job.
256  */
257 struct drm_sched_fence {
258         /**
259          * @scheduled: this fence is what will be signaled by the scheduler
260          * when the job is scheduled.
261          */
262 	struct dma_fence		scheduled;
263 
264         /**
265          * @finished: this fence is what will be signaled by the scheduler
266          * when the job is completed.
267          *
268          * When setting up an out fence for the job, you should use
269          * this, since it's available immediately upon
270          * drm_sched_job_init(), and the fence returned by the driver
271          * from run_job() won't be created until the dependencies have
272          * resolved.
273          */
274 	struct dma_fence		finished;
275 
276         /**
277          * @parent: the fence returned by &drm_sched_backend_ops.run_job
278          * when scheduling the job on hardware. We signal the
279          * &drm_sched_fence.finished fence once parent is signalled.
280          */
281 	struct dma_fence		*parent;
282         /**
283          * @sched: the scheduler instance to which the job having this struct
284          * belongs to.
285          */
286 	struct drm_gpu_scheduler	*sched;
287         /**
288          * @lock: the lock used by the scheduled and the finished fences.
289          */
290 	spinlock_t			lock;
291         /**
292          * @owner: job owner for debugging
293          */
294 	void				*owner;
295 };
296 
297 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
298 
299 /**
300  * struct drm_sched_job - A job to be run by an entity.
301  *
302  * @queue_node: used to append this struct to the queue of jobs in an entity.
303  * @list: a job participates in a "pending" and "done" lists.
304  * @sched: the scheduler instance on which this job is scheduled.
305  * @s_fence: contains the fences for the scheduling of job.
306  * @finish_cb: the callback for the finished fence.
307  * @work: Helper to reschdeule job kill to different context.
308  * @id: a unique id assigned to each job scheduled on the scheduler.
309  * @karma: increment on every hang caused by this job. If this exceeds the hang
310  *         limit of the scheduler then the job is marked guilty and will not
311  *         be scheduled further.
312  * @s_priority: the priority of the job.
313  * @entity: the entity to which this job belongs.
314  * @cb: the callback for the parent fence in s_fence.
315  *
316  * A job is created by the driver using drm_sched_job_init(), and
317  * should call drm_sched_entity_push_job() once it wants the scheduler
318  * to schedule the job.
319  */
320 struct drm_sched_job {
321 	struct spsc_node		queue_node;
322 	struct list_head		list;
323 	struct drm_gpu_scheduler	*sched;
324 	struct drm_sched_fence		*s_fence;
325 
326 	/*
327 	 * work is used only after finish_cb has been used and will not be
328 	 * accessed anymore.
329 	 */
330 	union {
331 		struct dma_fence_cb		finish_cb;
332 		struct work_struct		work;
333 	};
334 
335 	uint64_t			id;
336 	atomic_t			karma;
337 	enum drm_sched_priority		s_priority;
338 	struct drm_sched_entity         *entity;
339 	struct dma_fence_cb		cb;
340 	/**
341 	 * @dependencies:
342 	 *
343 	 * Contains the dependencies as struct dma_fence for this job, see
344 	 * drm_sched_job_add_dependency() and
345 	 * drm_sched_job_add_implicit_dependencies().
346 	 */
347 	struct xarray			dependencies;
348 
349 	/** @last_dependency: tracks @dependencies as they signal */
350 	unsigned long			last_dependency;
351 
352 	/**
353 	 * @submit_ts:
354 	 *
355 	 * When the job was pushed into the entity queue.
356 	 */
357 	ktime_t                         submit_ts;
358 };
359 
360 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
361 					    int threshold)
362 {
363 	return s_job && atomic_inc_return(&s_job->karma) > threshold;
364 }
365 
366 enum drm_gpu_sched_stat {
367 	DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
368 	DRM_GPU_SCHED_STAT_NOMINAL,
369 	DRM_GPU_SCHED_STAT_ENODEV,
370 };
371 
372 /**
373  * struct drm_sched_backend_ops - Define the backend operations
374  *	called by the scheduler
375  *
376  * These functions should be implemented in the driver side.
377  */
378 struct drm_sched_backend_ops {
379 	/**
380 	 * @prepare_job:
381 	 *
382 	 * Called when the scheduler is considering scheduling this job next, to
383 	 * get another struct dma_fence for this job to block on.  Once it
384 	 * returns NULL, run_job() may be called.
385 	 *
386 	 * Can be NULL if no additional preparation to the dependencies are
387 	 * necessary. Skipped when jobs are killed instead of run.
388 	 */
389 	struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job,
390 					 struct drm_sched_entity *s_entity);
391 
392 	/**
393          * @run_job: Called to execute the job once all of the dependencies
394          * have been resolved.  This may be called multiple times, if
395 	 * timedout_job() has happened and drm_sched_job_recovery()
396 	 * decides to try it again.
397 	 */
398 	struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
399 
400 	/**
401 	 * @timedout_job: Called when a job has taken too long to execute,
402 	 * to trigger GPU recovery.
403 	 *
404 	 * This method is called in a workqueue context.
405 	 *
406 	 * Drivers typically issue a reset to recover from GPU hangs, and this
407 	 * procedure usually follows the following workflow:
408 	 *
409 	 * 1. Stop the scheduler using drm_sched_stop(). This will park the
410 	 *    scheduler thread and cancel the timeout work, guaranteeing that
411 	 *    nothing is queued while we reset the hardware queue
412 	 * 2. Try to gracefully stop non-faulty jobs (optional)
413 	 * 3. Issue a GPU reset (driver-specific)
414 	 * 4. Re-submit jobs using drm_sched_resubmit_jobs()
415 	 * 5. Restart the scheduler using drm_sched_start(). At that point, new
416 	 *    jobs can be queued, and the scheduler thread is unblocked
417 	 *
418 	 * Note that some GPUs have distinct hardware queues but need to reset
419 	 * the GPU globally, which requires extra synchronization between the
420 	 * timeout handler of the different &drm_gpu_scheduler. One way to
421 	 * achieve this synchronization is to create an ordered workqueue
422 	 * (using alloc_ordered_workqueue()) at the driver level, and pass this
423 	 * queue to drm_sched_init(), to guarantee that timeout handlers are
424 	 * executed sequentially. The above workflow needs to be slightly
425 	 * adjusted in that case:
426 	 *
427 	 * 1. Stop all schedulers impacted by the reset using drm_sched_stop()
428 	 * 2. Try to gracefully stop non-faulty jobs on all queues impacted by
429 	 *    the reset (optional)
430 	 * 3. Issue a GPU reset on all faulty queues (driver-specific)
431 	 * 4. Re-submit jobs on all schedulers impacted by the reset using
432 	 *    drm_sched_resubmit_jobs()
433 	 * 5. Restart all schedulers that were stopped in step #1 using
434 	 *    drm_sched_start()
435 	 *
436 	 * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
437 	 * and the underlying driver has started or completed recovery.
438 	 *
439 	 * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
440 	 * available, i.e. has been unplugged.
441 	 */
442 	enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
443 
444 	/**
445          * @free_job: Called once the job's finished fence has been signaled
446          * and it's time to clean it up.
447 	 */
448 	void (*free_job)(struct drm_sched_job *sched_job);
449 };
450 
451 /**
452  * struct drm_gpu_scheduler - scheduler instance-specific data
453  *
454  * @ops: backend operations provided by the driver.
455  * @hw_submission_limit: the max size of the hardware queue.
456  * @timeout: the time after which a job is removed from the scheduler.
457  * @name: name of the ring for which this scheduler is being used.
458  * @sched_rq: priority wise array of run queues.
459  * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
460  *                  is ready to be scheduled.
461  * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
462  *                 waits on this wait queue until all the scheduled jobs are
463  *                 finished.
464  * @hw_rq_count: the number of jobs currently in the hardware queue.
465  * @job_id_count: used to assign unique id to the each job.
466  * @timeout_wq: workqueue used to queue @work_tdr
467  * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
468  *            timeout interval is over.
469  * @thread: the kthread on which the scheduler which run.
470  * @pending_list: the list of jobs which are currently in the job queue.
471  * @job_list_lock: lock to protect the pending_list.
472  * @hang_limit: once the hangs by a job crosses this limit then it is marked
473  *              guilty and it will no longer be considered for scheduling.
474  * @score: score to help loadbalancer pick a idle sched
475  * @_score: score used when the driver doesn't provide one
476  * @ready: marks if the underlying HW is ready to work
477  * @free_guilty: A hit to time out handler to free the guilty job.
478  * @dev: system &struct device
479  *
480  * One scheduler is implemented for each hardware ring.
481  */
482 struct drm_gpu_scheduler {
483 	const struct drm_sched_backend_ops	*ops;
484 	uint32_t			hw_submission_limit;
485 	long				timeout;
486 	const char			*name;
487 	struct drm_sched_rq		sched_rq[DRM_SCHED_PRIORITY_COUNT];
488 	wait_queue_head_t		wake_up_worker;
489 	wait_queue_head_t		job_scheduled;
490 	atomic_t			hw_rq_count;
491 	atomic64_t			job_id_count;
492 	struct workqueue_struct		*timeout_wq;
493 	struct delayed_work		work_tdr;
494 	struct task_struct		*thread;
495 	struct list_head		pending_list;
496 	spinlock_t			job_list_lock;
497 	int				hang_limit;
498 	atomic_t                        *score;
499 	atomic_t                        _score;
500 	bool				ready;
501 	bool				free_guilty;
502 	struct device			*dev;
503 };
504 
505 int drm_sched_init(struct drm_gpu_scheduler *sched,
506 		   const struct drm_sched_backend_ops *ops,
507 		   uint32_t hw_submission, unsigned hang_limit,
508 		   long timeout, struct workqueue_struct *timeout_wq,
509 		   atomic_t *score, const char *name, struct device *dev);
510 
511 void drm_sched_fini(struct drm_gpu_scheduler *sched);
512 int drm_sched_job_init(struct drm_sched_job *job,
513 		       struct drm_sched_entity *entity,
514 		       void *owner);
515 void drm_sched_job_arm(struct drm_sched_job *job);
516 int drm_sched_job_add_dependency(struct drm_sched_job *job,
517 				 struct dma_fence *fence);
518 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
519 					struct dma_resv *resv,
520 					enum dma_resv_usage usage);
521 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
522 					    struct drm_gem_object *obj,
523 					    bool write);
524 
525 
526 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
527 				    struct drm_gpu_scheduler **sched_list,
528                                    unsigned int num_sched_list);
529 
530 void drm_sched_job_cleanup(struct drm_sched_job *job);
531 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
532 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
533 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
534 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
535 void drm_sched_increase_karma(struct drm_sched_job *bad);
536 void drm_sched_reset_karma(struct drm_sched_job *bad);
537 void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
538 bool drm_sched_dependency_optimized(struct dma_fence* fence,
539 				    struct drm_sched_entity *entity);
540 void drm_sched_fault(struct drm_gpu_scheduler *sched);
541 void drm_sched_job_kickout(struct drm_sched_job *s_job);
542 
543 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
544 			     struct drm_sched_entity *entity);
545 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
546 				struct drm_sched_entity *entity);
547 
548 void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts);
549 
550 int drm_sched_entity_init(struct drm_sched_entity *entity,
551 			  enum drm_sched_priority priority,
552 			  struct drm_gpu_scheduler **sched_list,
553 			  unsigned int num_sched_list,
554 			  atomic_t *guilty);
555 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
556 void drm_sched_entity_fini(struct drm_sched_entity *entity);
557 void drm_sched_entity_destroy(struct drm_sched_entity *entity);
558 void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
559 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
560 void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
561 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
562 				   enum drm_sched_priority priority);
563 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
564 
565 struct drm_sched_fence *drm_sched_fence_alloc(
566 	struct drm_sched_entity *s_entity, void *owner);
567 void drm_sched_fence_init(struct drm_sched_fence *fence,
568 			  struct drm_sched_entity *entity);
569 void drm_sched_fence_free(struct drm_sched_fence *fence);
570 
571 void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
572 void drm_sched_fence_finished(struct drm_sched_fence *fence);
573 
574 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
575 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
576 		                unsigned long remaining);
577 struct drm_gpu_scheduler *
578 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
579 		     unsigned int num_sched_list);
580 
581 #endif
582