xref: /linux/drivers/accel/ivpu/ivpu_jsm_msg.c (revision e7b2b108cdeab76a7e7324459e50b0c1214c0386)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2023 Intel Corporation
4  */
5 
6 #include "ivpu_drv.h"
7 #include "ivpu_hw.h"
8 #include "ivpu_ipc.h"
9 #include "ivpu_jsm_msg.h"
10 
11 const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
12 {
13 	#define IVPU_CASE_TO_STR(x) case x: return #x
14 	switch (type) {
15 	IVPU_CASE_TO_STR(VPU_JSM_MSG_UNKNOWN);
16 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET);
17 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT);
18 	IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB);
19 	IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB);
20 	IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB);
21 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT);
22 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL);
23 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL);
24 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN);
25 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE);
26 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG);
27 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG);
28 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY);
29 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME);
30 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE);
31 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START);
32 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP);
33 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE);
34 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO);
35 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP);
36 	IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE);
37 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE);
38 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES);
39 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_REGISTER_DB);
40 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ);
41 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ);
42 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP);
43 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE);
44 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG);
45 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP);
46 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION);
47 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_ENGINE_RESUME);
48 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
49 	IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
50 	IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
51 	IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT);
52 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
53 	IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
54 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
55 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
56 	IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
57 	IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB_DONE);
58 	IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB_DONE);
59 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE);
60 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_DONE);
61 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL_DONE);
62 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN_DONE);
63 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE_DONE);
64 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG_RSP);
65 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG_RSP);
66 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP);
67 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME_RSP);
68 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE_DONE);
69 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START_DONE);
70 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE);
71 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE);
72 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE);
73 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION);
74 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP);
75 	IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP);
76 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP);
77 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP);
78 	IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DONE);
79 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL_RSP);
80 	IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER);
81 	IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER_DONE);
82 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE);
83 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE_DONE);
84 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE);
85 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE_DONE);
86 	}
87 	#undef IVPU_CASE_TO_STR
88 
89 	return "Unknown JSM message type";
90 }
91 
92 int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
93 			 u64 jobq_base, u32 jobq_size)
94 {
95 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB };
96 	struct vpu_jsm_msg resp;
97 	int ret = 0;
98 
99 	req.payload.register_db.db_idx = db_id;
100 	req.payload.register_db.jobq_base = jobq_base;
101 	req.payload.register_db.jobq_size = jobq_size;
102 	req.payload.register_db.host_ssid = ctx_id;
103 
104 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
105 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
106 	if (ret) {
107 		ivpu_err_ratelimited(vdev, "Failed to register doorbell %d: %d\n", db_id, ret);
108 		return ret;
109 	}
110 
111 	ivpu_dbg(vdev, JSM, "Doorbell %d registered to context %d\n", db_id, ctx_id);
112 
113 	return 0;
114 }
115 
116 int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
117 {
118 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB };
119 	struct vpu_jsm_msg resp;
120 	int ret = 0;
121 
122 	req.payload.unregister_db.db_idx = db_id;
123 
124 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
125 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
126 	if (ret) {
127 		ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %d: %d\n", db_id, ret);
128 		return ret;
129 	}
130 
131 	ivpu_dbg(vdev, JSM, "Doorbell %d unregistered\n", db_id);
132 
133 	return 0;
134 }
135 
136 int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
137 {
138 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
139 	struct vpu_jsm_msg resp;
140 	int ret;
141 
142 	if (engine > VPU_ENGINE_COPY)
143 		return -EINVAL;
144 
145 	req.payload.query_engine_hb.engine_idx = engine;
146 
147 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp,
148 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
149 	if (ret) {
150 		ivpu_err_ratelimited(vdev, "Failed to get heartbeat from engine %d: %d\n",
151 				     engine, ret);
152 		return ret;
153 	}
154 
155 	*heartbeat = resp.payload.query_engine_hb_done.heartbeat;
156 	return ret;
157 }
158 
159 int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
160 {
161 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET };
162 	struct vpu_jsm_msg resp;
163 	int ret;
164 
165 	if (engine > VPU_ENGINE_COPY)
166 		return -EINVAL;
167 
168 	req.payload.engine_reset.engine_idx = engine;
169 
170 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
171 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
172 	if (ret)
173 		ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
174 
175 	return ret;
176 }
177 
178 int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id)
179 {
180 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT };
181 	struct vpu_jsm_msg resp;
182 	int ret;
183 
184 	if (engine > VPU_ENGINE_COPY)
185 		return -EINVAL;
186 
187 	req.payload.engine_preempt.engine_idx = engine;
188 	req.payload.engine_preempt.preempt_id = preempt_id;
189 
190 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp,
191 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
192 	if (ret)
193 		ivpu_err_ratelimited(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
194 
195 	return ret;
196 }
197 
198 int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size)
199 {
200 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL };
201 	struct vpu_jsm_msg resp;
202 	int ret;
203 
204 	strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
205 
206 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
207 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
208 	if (ret)
209 		ivpu_warn_ratelimited(vdev, "Failed to send command \"%s\": ret %d\n",
210 				      command, ret);
211 
212 	return ret;
213 }
214 
215 int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
216 				  u64 *trace_hw_component_mask)
217 {
218 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY };
219 	struct vpu_jsm_msg resp;
220 	int ret;
221 
222 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp,
223 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
224 	if (ret) {
225 		ivpu_warn_ratelimited(vdev, "Failed to get trace capability: %d\n", ret);
226 		return ret;
227 	}
228 
229 	*trace_destination_mask = resp.payload.trace_capability.trace_destination_mask;
230 	*trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask;
231 
232 	return ret;
233 }
234 
235 int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
236 			      u64 trace_hw_component_mask)
237 {
238 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG };
239 	struct vpu_jsm_msg resp;
240 	int ret;
241 
242 	req.payload.trace_config.trace_level = trace_level;
243 	req.payload.trace_config.trace_destination_mask = trace_destination_mask;
244 	req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask;
245 
246 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp,
247 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
248 	if (ret)
249 		ivpu_warn_ratelimited(vdev, "Failed to set config: %d\n", ret);
250 
251 	return ret;
252 }
253 
254 int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
255 {
256 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
257 	struct vpu_jsm_msg resp;
258 
259 	req.payload.ssid_release.host_ssid = host_ssid;
260 
261 	return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
262 				     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
263 }
264 
265 int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
266 {
267 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_PWR_D0I3_ENTER };
268 	struct vpu_jsm_msg resp;
269 	int ret;
270 
271 	if (IVPU_WA(disable_d0i3_msg))
272 		return 0;
273 
274 	req.payload.pwr_d0i3_enter.send_response = 1;
275 
276 	ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE,
277 					   &resp, VPU_IPC_CHAN_GEN_CMD,
278 					   vdev->timeout.d0i3_entry_msg);
279 	if (ret)
280 		return ret;
281 
282 	return ivpu_hw_wait_for_idle(vdev);
283 }
284