1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include "xe_gpu_scheduler.h" 7 8 static void xe_sched_process_msg_queue(struct xe_gpu_scheduler *sched) 9 { 10 if (!drm_sched_is_stopped(&sched->base)) 11 queue_work(sched->base.submit_wq, &sched->work_process_msg); 12 } 13 14 static void xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler *sched) 15 { 16 struct xe_sched_msg *msg; 17 18 xe_sched_msg_lock(sched); 19 msg = list_first_entry_or_null(&sched->msgs, struct xe_sched_msg, link); 20 if (msg) 21 xe_sched_process_msg_queue(sched); 22 xe_sched_msg_unlock(sched); 23 } 24 25 static struct xe_sched_msg * 26 xe_sched_get_msg(struct xe_gpu_scheduler *sched) 27 { 28 struct xe_sched_msg *msg; 29 30 xe_sched_msg_lock(sched); 31 msg = list_first_entry_or_null(&sched->msgs, 32 struct xe_sched_msg, link); 33 if (msg) 34 list_del_init(&msg->link); 35 xe_sched_msg_unlock(sched); 36 37 return msg; 38 } 39 40 static void xe_sched_process_msg_work(struct work_struct *w) 41 { 42 struct xe_gpu_scheduler *sched = 43 container_of(w, struct xe_gpu_scheduler, work_process_msg); 44 struct xe_sched_msg *msg; 45 46 if (drm_sched_is_stopped(&sched->base)) 47 return; 48 49 msg = xe_sched_get_msg(sched); 50 if (msg) { 51 sched->ops->process_msg(msg); 52 53 xe_sched_process_msg_queue_if_ready(sched); 54 } 55 } 56 57 int xe_sched_init(struct xe_gpu_scheduler *sched, 58 const struct drm_sched_backend_ops *ops, 59 const struct xe_sched_backend_ops *xe_ops, 60 struct workqueue_struct *submit_wq, 61 uint32_t hw_submission, unsigned hang_limit, 62 long timeout, struct workqueue_struct *timeout_wq, 63 atomic_t *score, const char *name, 64 struct device *dev) 65 { 66 const struct drm_sched_init_args args = { 67 .ops = ops, 68 .submit_wq = submit_wq, 69 .num_rqs = 1, 70 .credit_limit = hw_submission, 71 .hang_limit = hang_limit, 72 .timeout = timeout, 73 .timeout_wq = timeout_wq, 74 .score = score, 75 .name = name, 76 .dev = dev, 77 }; 78 79 sched->ops = xe_ops; 80 spin_lock_init(&sched->msg_lock); 81 INIT_LIST_HEAD(&sched->msgs); 82 INIT_WORK(&sched->work_process_msg, xe_sched_process_msg_work); 83 84 return drm_sched_init(&sched->base, &args); 85 } 86 87 void xe_sched_fini(struct xe_gpu_scheduler *sched) 88 { 89 xe_sched_submission_stop(sched); 90 drm_sched_fini(&sched->base); 91 } 92 93 void xe_sched_submission_start(struct xe_gpu_scheduler *sched) 94 { 95 drm_sched_wqueue_start(&sched->base); 96 queue_work(sched->base.submit_wq, &sched->work_process_msg); 97 } 98 99 void xe_sched_submission_stop(struct xe_gpu_scheduler *sched) 100 { 101 drm_sched_wqueue_stop(&sched->base); 102 cancel_work_sync(&sched->work_process_msg); 103 } 104 105 void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched) 106 { 107 drm_sched_resume_timeout(&sched->base, sched->base.timeout); 108 } 109 110 void xe_sched_add_msg(struct xe_gpu_scheduler *sched, 111 struct xe_sched_msg *msg) 112 { 113 xe_sched_msg_lock(sched); 114 xe_sched_add_msg_locked(sched, msg); 115 xe_sched_msg_unlock(sched); 116 } 117 118 void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched, 119 struct xe_sched_msg *msg) 120 { 121 lockdep_assert_held(&sched->msg_lock); 122 123 list_add_tail(&msg->link, &sched->msgs); 124 xe_sched_process_msg_queue(sched); 125 } 126 127 /** 128 * xe_sched_add_msg_head() - Xe GPU scheduler add message to head of list 129 * @sched: Xe GPU scheduler 130 * @msg: Message to add 131 */ 132 void xe_sched_add_msg_head(struct xe_gpu_scheduler *sched, 133 struct xe_sched_msg *msg) 134 { 135 lockdep_assert_held(&sched->msg_lock); 136 137 list_add(&msg->link, &sched->msgs); 138 xe_sched_process_msg_queue(sched); 139 } 140