xref: /linux/drivers/gpu/drm/xe/xe_gpu_scheduler.h (revision face6a3615a649456eb4549f6d474221d877d604)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #ifndef _XE_GPU_SCHEDULER_H_
7 #define _XE_GPU_SCHEDULER_H_
8 
9 #include "xe_gpu_scheduler_types.h"
10 #include "xe_sched_job.h"
11 
12 int xe_sched_init(struct xe_gpu_scheduler *sched,
13 		  const struct drm_sched_backend_ops *ops,
14 		  const struct xe_sched_backend_ops *xe_ops,
15 		  struct workqueue_struct *submit_wq,
16 		  uint32_t hw_submission, unsigned hang_limit,
17 		  long timeout, struct workqueue_struct *timeout_wq,
18 		  atomic_t *score, const char *name,
19 		  struct device *dev);
20 void xe_sched_fini(struct xe_gpu_scheduler *sched);
21 
22 void xe_sched_submission_start(struct xe_gpu_scheduler *sched);
23 void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
24 
25 void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched);
26 
27 void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
28 		      struct xe_sched_msg *msg);
29 void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
30 			     struct xe_sched_msg *msg);
31 void xe_sched_add_msg_head(struct xe_gpu_scheduler *sched,
32 			   struct xe_sched_msg *msg);
33 
34 static inline void xe_sched_msg_lock(struct xe_gpu_scheduler *sched)
35 {
36 	spin_lock(&sched->base.job_list_lock);
37 }
38 
39 static inline void xe_sched_msg_unlock(struct xe_gpu_scheduler *sched)
40 {
41 	spin_unlock(&sched->base.job_list_lock);
42 }
43 
44 static inline void xe_sched_stop(struct xe_gpu_scheduler *sched)
45 {
46 	drm_sched_stop(&sched->base, NULL);
47 }
48 
49 static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
50 {
51 	drm_sched_tdr_queue_imm(&sched->base);
52 }
53 
54 static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
55 {
56 	struct drm_sched_job *s_job;
57 
58 	list_for_each_entry(s_job, &sched->base.pending_list, list) {
59 		struct drm_sched_fence *s_fence = s_job->s_fence;
60 		struct dma_fence *hw_fence = s_fence->parent;
61 
62 		if (to_xe_sched_job(s_job)->skip_emit ||
63 		    (hw_fence && !dma_fence_is_signaled(hw_fence)))
64 			sched->base.ops->run_job(s_job);
65 	}
66 }
67 
68 static inline bool
69 xe_sched_invalidate_job(struct xe_sched_job *job, int threshold)
70 {
71 	return drm_sched_invalidate_job(&job->drm, threshold);
72 }
73 
74 static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
75 					    struct xe_sched_job *job)
76 {
77 	spin_lock(&sched->base.job_list_lock);
78 	list_add(&job->drm.list, &sched->base.pending_list);
79 	spin_unlock(&sched->base.job_list_lock);
80 }
81 
82 /**
83  * xe_sched_first_pending_job() - Find first pending job which is unsignaled
84  * @sched: Xe GPU scheduler
85  *
86  * Return first unsignaled job in pending list or NULL
87  */
88 static inline
89 struct xe_sched_job *xe_sched_first_pending_job(struct xe_gpu_scheduler *sched)
90 {
91 	struct xe_sched_job *job, *r_job = NULL;
92 
93 	spin_lock(&sched->base.job_list_lock);
94 	list_for_each_entry(job, &sched->base.pending_list, drm.list) {
95 		struct drm_sched_fence *s_fence = job->drm.s_fence;
96 		struct dma_fence *hw_fence = s_fence->parent;
97 
98 		if (hw_fence && !dma_fence_is_signaled(hw_fence)) {
99 			r_job = job;
100 			break;
101 		}
102 	}
103 	spin_unlock(&sched->base.job_list_lock);
104 
105 	return r_job;
106 }
107 
108 static inline int
109 xe_sched_entity_init(struct xe_sched_entity *entity,
110 		     struct xe_gpu_scheduler *sched)
111 {
112 	return drm_sched_entity_init(entity, 0,
113 				     (struct drm_gpu_scheduler **)&sched,
114 				     1, NULL);
115 }
116 
117 #define xe_sched_entity_fini drm_sched_entity_fini
118 
119 #endif
120