xref: /linux/drivers/gpu/drm/xe/xe_gpu_scheduler.h (revision 68a052239fc4b351e961f698b824f7654a346091)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #ifndef _XE_GPU_SCHEDULER_H_
7 #define _XE_GPU_SCHEDULER_H_
8 
9 #include "xe_gpu_scheduler_types.h"
10 #include "xe_sched_job_types.h"
11 
12 int xe_sched_init(struct xe_gpu_scheduler *sched,
13 		  const struct drm_sched_backend_ops *ops,
14 		  const struct xe_sched_backend_ops *xe_ops,
15 		  struct workqueue_struct *submit_wq,
16 		  uint32_t hw_submission, unsigned hang_limit,
17 		  long timeout, struct workqueue_struct *timeout_wq,
18 		  atomic_t *score, const char *name,
19 		  struct device *dev);
20 void xe_sched_fini(struct xe_gpu_scheduler *sched);
21 
22 void xe_sched_submission_start(struct xe_gpu_scheduler *sched);
23 void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
24 void xe_sched_submission_stop_async(struct xe_gpu_scheduler *sched);
25 
26 void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched);
27 
28 void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
29 		      struct xe_sched_msg *msg);
30 void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
31 			     struct xe_sched_msg *msg);
32 
33 static inline void xe_sched_msg_lock(struct xe_gpu_scheduler *sched)
34 {
35 	spin_lock(&sched->base.job_list_lock);
36 }
37 
38 static inline void xe_sched_msg_unlock(struct xe_gpu_scheduler *sched)
39 {
40 	spin_unlock(&sched->base.job_list_lock);
41 }
42 
43 static inline void xe_sched_stop(struct xe_gpu_scheduler *sched)
44 {
45 	drm_sched_stop(&sched->base, NULL);
46 }
47 
48 static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
49 {
50 	drm_sched_tdr_queue_imm(&sched->base);
51 }
52 
53 static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
54 {
55 	struct drm_sched_job *s_job;
56 
57 	list_for_each_entry(s_job, &sched->base.pending_list, list) {
58 		struct drm_sched_fence *s_fence = s_job->s_fence;
59 		struct dma_fence *hw_fence = s_fence->parent;
60 
61 		if (hw_fence && !dma_fence_is_signaled(hw_fence))
62 			sched->base.ops->run_job(s_job);
63 	}
64 }
65 
66 static inline bool
67 xe_sched_invalidate_job(struct xe_sched_job *job, int threshold)
68 {
69 	return drm_sched_invalidate_job(&job->drm, threshold);
70 }
71 
72 static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
73 					    struct xe_sched_job *job)
74 {
75 	spin_lock(&sched->base.job_list_lock);
76 	list_add(&job->drm.list, &sched->base.pending_list);
77 	spin_unlock(&sched->base.job_list_lock);
78 }
79 
80 static inline
81 struct xe_sched_job *xe_sched_first_pending_job(struct xe_gpu_scheduler *sched)
82 {
83 	struct xe_sched_job *job;
84 
85 	spin_lock(&sched->base.job_list_lock);
86 	job = list_first_entry_or_null(&sched->base.pending_list,
87 				       struct xe_sched_job, drm.list);
88 	spin_unlock(&sched->base.job_list_lock);
89 
90 	return job;
91 }
92 
93 static inline int
94 xe_sched_entity_init(struct xe_sched_entity *entity,
95 		     struct xe_gpu_scheduler *sched)
96 {
97 	return drm_sched_entity_init(entity, 0,
98 				     (struct drm_gpu_scheduler **)&sched,
99 				     1, NULL);
100 }
101 
102 #define xe_sched_entity_fini drm_sched_entity_fini
103 
104 #endif
105