xref: /linux/drivers/gpu/drm/xe/xe_gpu_scheduler.c (revision d53b8e36925256097a08d7cb749198d85cbf9b2b)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_gpu_scheduler.h"
7 
8 static void xe_sched_process_msg_queue(struct xe_gpu_scheduler *sched)
9 {
10 	if (!READ_ONCE(sched->base.pause_submit))
11 		queue_work(sched->base.submit_wq, &sched->work_process_msg);
12 }
13 
14 static void xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler *sched)
15 {
16 	struct xe_sched_msg *msg;
17 
18 	xe_sched_msg_lock(sched);
19 	msg = list_first_entry_or_null(&sched->msgs, struct xe_sched_msg, link);
20 	if (msg)
21 		xe_sched_process_msg_queue(sched);
22 	xe_sched_msg_unlock(sched);
23 }
24 
25 static struct xe_sched_msg *
26 xe_sched_get_msg(struct xe_gpu_scheduler *sched)
27 {
28 	struct xe_sched_msg *msg;
29 
30 	xe_sched_msg_lock(sched);
31 	msg = list_first_entry_or_null(&sched->msgs,
32 				       struct xe_sched_msg, link);
33 	if (msg)
34 		list_del_init(&msg->link);
35 	xe_sched_msg_unlock(sched);
36 
37 	return msg;
38 }
39 
40 static void xe_sched_process_msg_work(struct work_struct *w)
41 {
42 	struct xe_gpu_scheduler *sched =
43 		container_of(w, struct xe_gpu_scheduler, work_process_msg);
44 	struct xe_sched_msg *msg;
45 
46 	if (READ_ONCE(sched->base.pause_submit))
47 		return;
48 
49 	msg = xe_sched_get_msg(sched);
50 	if (msg) {
51 		sched->ops->process_msg(msg);
52 
53 		xe_sched_process_msg_queue_if_ready(sched);
54 	}
55 }
56 
57 int xe_sched_init(struct xe_gpu_scheduler *sched,
58 		  const struct drm_sched_backend_ops *ops,
59 		  const struct xe_sched_backend_ops *xe_ops,
60 		  struct workqueue_struct *submit_wq,
61 		  uint32_t hw_submission, unsigned hang_limit,
62 		  long timeout, struct workqueue_struct *timeout_wq,
63 		  atomic_t *score, const char *name,
64 		  struct device *dev)
65 {
66 	sched->ops = xe_ops;
67 	INIT_LIST_HEAD(&sched->msgs);
68 	INIT_WORK(&sched->work_process_msg, xe_sched_process_msg_work);
69 
70 	return drm_sched_init(&sched->base, ops, submit_wq, 1, hw_submission,
71 			      hang_limit, timeout, timeout_wq, score, name,
72 			      dev);
73 }
74 
75 void xe_sched_fini(struct xe_gpu_scheduler *sched)
76 {
77 	xe_sched_submission_stop(sched);
78 	drm_sched_fini(&sched->base);
79 }
80 
81 void xe_sched_submission_start(struct xe_gpu_scheduler *sched)
82 {
83 	drm_sched_wqueue_start(&sched->base);
84 	queue_work(sched->base.submit_wq, &sched->work_process_msg);
85 }
86 
87 void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
88 {
89 	drm_sched_wqueue_stop(&sched->base);
90 	cancel_work_sync(&sched->work_process_msg);
91 }
92 
93 void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
94 		      struct xe_sched_msg *msg)
95 {
96 	xe_sched_msg_lock(sched);
97 	xe_sched_add_msg_locked(sched, msg);
98 	xe_sched_msg_unlock(sched);
99 }
100 
101 void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
102 			     struct xe_sched_msg *msg)
103 {
104 	lockdep_assert_held(&sched->base.job_list_lock);
105 
106 	list_add_tail(&msg->link, &sched->msgs);
107 	xe_sched_process_msg_queue(sched);
108 }
109