xref: /linux/drivers/accel/ivpu/ivpu_jsm_msg.c (revision 5fc6c6f258b34fd0d2ff2a63b8a407a4dcbca750)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2024 Intel Corporation
4  */
5 
6 #include "ivpu_drv.h"
7 #include "ivpu_hw.h"
8 #include "ivpu_ipc.h"
9 #include "ivpu_jsm_msg.h"
10 #include "ivpu_pm.h"
11 #include "vpu_jsm_api.h"
12 
ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)13 const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
14 {
15 	#define IVPU_CASE_TO_STR(x) case x: return #x
16 	switch (type) {
17 	IVPU_CASE_TO_STR(VPU_JSM_MSG_UNKNOWN);
18 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET);
19 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT);
20 	IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB);
21 	IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB);
22 	IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB);
23 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT);
24 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL);
25 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL);
26 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN);
27 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE);
28 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG);
29 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG);
30 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY);
31 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME);
32 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE);
33 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START);
34 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP);
35 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE);
36 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO);
37 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP);
38 	IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE);
39 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE);
40 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES);
41 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_REGISTER_DB);
42 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ);
43 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ);
44 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP);
45 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE);
46 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG);
47 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP);
48 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION);
49 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_ENGINE_RESUME);
50 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
51 	IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
52 	IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
53 	IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED);
54 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
55 	IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
56 	IVPU_CASE_TO_STR(VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED);
57 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
58 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
59 	IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
60 	IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB_DONE);
61 	IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB_DONE);
62 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE);
63 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_DONE);
64 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL_DONE);
65 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN_DONE);
66 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE_DONE);
67 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG_RSP);
68 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG_RSP);
69 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP);
70 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME_RSP);
71 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE_DONE);
72 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START_DONE);
73 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE);
74 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE);
75 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE);
76 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION);
77 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP);
78 	IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP);
79 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP);
80 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP);
81 	IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DONE);
82 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL_RSP);
83 	IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER);
84 	IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER_DONE);
85 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE);
86 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE_DONE);
87 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE);
88 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE_DONE);
89 	}
90 	#undef IVPU_CASE_TO_STR
91 
92 	return "Unknown JSM message type";
93 }
94 
ivpu_jsm_register_db(struct ivpu_device * vdev,u32 ctx_id,u32 db_id,u64 jobq_base,u32 jobq_size)95 int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
96 			 u64 jobq_base, u32 jobq_size)
97 {
98 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB };
99 	struct vpu_jsm_msg resp;
100 	int ret = 0;
101 
102 	req.payload.register_db.db_idx = db_id;
103 	req.payload.register_db.jobq_base = jobq_base;
104 	req.payload.register_db.jobq_size = jobq_size;
105 	req.payload.register_db.host_ssid = ctx_id;
106 
107 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
108 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
109 	if (ret)
110 		ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
111 
112 	return ret;
113 }
114 
ivpu_jsm_unregister_db(struct ivpu_device * vdev,u32 db_id)115 int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
116 {
117 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB };
118 	struct vpu_jsm_msg resp;
119 	int ret = 0;
120 
121 	req.payload.unregister_db.db_idx = db_id;
122 
123 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
124 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
125 	if (ret)
126 		ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %u: %d\n", db_id, ret);
127 
128 	return ret;
129 }
130 
ivpu_jsm_get_heartbeat(struct ivpu_device * vdev,u32 engine,u64 * heartbeat)131 int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
132 {
133 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
134 	struct vpu_jsm_msg resp;
135 	int ret;
136 
137 	if (engine != VPU_ENGINE_COMPUTE)
138 		return -EINVAL;
139 
140 	req.payload.query_engine_hb.engine_idx = engine;
141 
142 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp,
143 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
144 	if (ret) {
145 		ivpu_err_ratelimited(vdev, "Failed to get heartbeat from engine %d: %d\n",
146 				     engine, ret);
147 		return ret;
148 	}
149 
150 	*heartbeat = resp.payload.query_engine_hb_done.heartbeat;
151 	return ret;
152 }
153 
ivpu_jsm_reset_engine(struct ivpu_device * vdev,u32 engine)154 int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
155 {
156 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET };
157 	struct vpu_jsm_msg resp;
158 	int ret;
159 
160 	if (engine != VPU_ENGINE_COMPUTE)
161 		return -EINVAL;
162 
163 	req.payload.engine_reset.engine_idx = engine;
164 
165 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
166 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
167 	if (ret) {
168 		ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
169 		ivpu_pm_trigger_recovery(vdev, "Engine reset failed");
170 	}
171 
172 	return ret;
173 }
174 
ivpu_jsm_preempt_engine(struct ivpu_device * vdev,u32 engine,u32 preempt_id)175 int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id)
176 {
177 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT };
178 	struct vpu_jsm_msg resp;
179 	int ret;
180 
181 	if (engine != VPU_ENGINE_COMPUTE)
182 		return -EINVAL;
183 
184 	req.payload.engine_preempt.engine_idx = engine;
185 	req.payload.engine_preempt.preempt_id = preempt_id;
186 
187 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp,
188 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
189 	if (ret)
190 		ivpu_err_ratelimited(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
191 
192 	return ret;
193 }
194 
ivpu_jsm_dyndbg_control(struct ivpu_device * vdev,char * command,size_t size)195 int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size)
196 {
197 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL };
198 	struct vpu_jsm_msg resp;
199 	int ret;
200 
201 	strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
202 
203 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
204 				    VPU_IPC_CHAN_GEN_CMD, vdev->timeout.jsm);
205 	if (ret)
206 		ivpu_warn_ratelimited(vdev, "Failed to send command \"%s\": ret %d\n",
207 				      command, ret);
208 
209 	return ret;
210 }
211 
ivpu_jsm_trace_get_capability(struct ivpu_device * vdev,u32 * trace_destination_mask,u64 * trace_hw_component_mask)212 int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
213 				  u64 *trace_hw_component_mask)
214 {
215 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY };
216 	struct vpu_jsm_msg resp;
217 	int ret;
218 
219 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp,
220 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
221 	if (ret) {
222 		ivpu_warn_ratelimited(vdev, "Failed to get trace capability: %d\n", ret);
223 		return ret;
224 	}
225 
226 	*trace_destination_mask = resp.payload.trace_capability.trace_destination_mask;
227 	*trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask;
228 
229 	return ret;
230 }
231 
ivpu_jsm_trace_set_config(struct ivpu_device * vdev,u32 trace_level,u32 trace_destination_mask,u64 trace_hw_component_mask)232 int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
233 			      u64 trace_hw_component_mask)
234 {
235 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG };
236 	struct vpu_jsm_msg resp;
237 	int ret;
238 
239 	req.payload.trace_config.trace_level = trace_level;
240 	req.payload.trace_config.trace_destination_mask = trace_destination_mask;
241 	req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask;
242 
243 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp,
244 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
245 	if (ret)
246 		ivpu_warn_ratelimited(vdev, "Failed to set config: %d\n", ret);
247 
248 	return ret;
249 }
250 
ivpu_jsm_context_release(struct ivpu_device * vdev,u32 host_ssid)251 int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
252 {
253 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
254 	struct vpu_jsm_msg resp;
255 	int ret;
256 
257 	req.payload.ssid_release.host_ssid = host_ssid;
258 
259 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
260 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
261 	if (ret)
262 		ivpu_warn_ratelimited(vdev, "Failed to release context: %d\n", ret);
263 
264 	return ret;
265 }
266 
ivpu_jsm_pwr_d0i3_enter(struct ivpu_device * vdev)267 int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
268 {
269 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_PWR_D0I3_ENTER };
270 	struct vpu_jsm_msg resp;
271 	int ret;
272 
273 	if (IVPU_WA(disable_d0i3_msg))
274 		return 0;
275 
276 	req.payload.pwr_d0i3_enter.send_response = 1;
277 
278 	ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE, &resp,
279 					     VPU_IPC_CHAN_GEN_CMD, vdev->timeout.d0i3_entry_msg);
280 	if (ret)
281 		return ret;
282 
283 	return ivpu_hw_wait_for_idle(vdev);
284 }
285 
ivpu_jsm_hws_create_cmdq(struct ivpu_device * vdev,u32 ctx_id,u32 cmdq_group,u32 cmdq_id,u32 pid,u32 engine,u64 cmdq_base,u32 cmdq_size)286 int ivpu_jsm_hws_create_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_group, u32 cmdq_id,
287 			     u32 pid, u32 engine, u64 cmdq_base, u32 cmdq_size)
288 {
289 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_CREATE_CMD_QUEUE };
290 	struct vpu_jsm_msg resp;
291 	int ret;
292 
293 	req.payload.hws_create_cmdq.host_ssid = ctx_id;
294 	req.payload.hws_create_cmdq.process_id = pid;
295 	req.payload.hws_create_cmdq.engine_idx = engine;
296 	req.payload.hws_create_cmdq.cmdq_group = cmdq_group;
297 	req.payload.hws_create_cmdq.cmdq_id = cmdq_id;
298 	req.payload.hws_create_cmdq.cmdq_base = cmdq_base;
299 	req.payload.hws_create_cmdq.cmdq_size = cmdq_size;
300 
301 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP, &resp,
302 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
303 	if (ret)
304 		ivpu_warn_ratelimited(vdev, "Failed to create command queue: %d\n", ret);
305 
306 	return ret;
307 }
308 
ivpu_jsm_hws_destroy_cmdq(struct ivpu_device * vdev,u32 ctx_id,u32 cmdq_id)309 int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
310 {
311 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DESTROY_CMD_QUEUE };
312 	struct vpu_jsm_msg resp;
313 	int ret;
314 
315 	req.payload.hws_destroy_cmdq.host_ssid = ctx_id;
316 	req.payload.hws_destroy_cmdq.cmdq_id = cmdq_id;
317 
318 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP, &resp,
319 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
320 	if (ret)
321 		ivpu_warn_ratelimited(vdev, "Failed to destroy command queue: %d\n", ret);
322 
323 	return ret;
324 }
325 
ivpu_jsm_hws_register_db(struct ivpu_device * vdev,u32 ctx_id,u32 cmdq_id,u32 db_id,u64 cmdq_base,u32 cmdq_size)326 int ivpu_jsm_hws_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id, u32 db_id,
327 			     u64 cmdq_base, u32 cmdq_size)
328 {
329 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_REGISTER_DB };
330 	struct vpu_jsm_msg resp;
331 	int ret = 0;
332 
333 	req.payload.hws_register_db.db_id = db_id;
334 	req.payload.hws_register_db.host_ssid = ctx_id;
335 	req.payload.hws_register_db.cmdq_id = cmdq_id;
336 	req.payload.hws_register_db.cmdq_base = cmdq_base;
337 	req.payload.hws_register_db.cmdq_size = cmdq_size;
338 
339 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
340 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
341 	if (ret)
342 		ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
343 
344 	return ret;
345 }
346 
ivpu_jsm_hws_resume_engine(struct ivpu_device * vdev,u32 engine)347 int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
348 {
349 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_ENGINE_RESUME };
350 	struct vpu_jsm_msg resp;
351 	int ret;
352 
353 	if (engine != VPU_ENGINE_COMPUTE)
354 		return -EINVAL;
355 
356 	req.payload.hws_resume_engine.engine_idx = engine;
357 
358 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
359 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
360 	if (ret) {
361 		ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
362 		ivpu_pm_trigger_recovery(vdev, "Engine resume failed");
363 	}
364 
365 	return ret;
366 }
367 
ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device * vdev,u32 ctx_id,u32 cmdq_id,u32 priority)368 int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id,
369 					      u32 priority)
370 {
371 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES };
372 	struct vpu_jsm_msg resp;
373 	int ret;
374 
375 	req.payload.hws_set_context_sched_properties.host_ssid = ctx_id;
376 	req.payload.hws_set_context_sched_properties.cmdq_id = cmdq_id;
377 	req.payload.hws_set_context_sched_properties.priority_band = priority;
378 	req.payload.hws_set_context_sched_properties.realtime_priority_level = 0;
379 	req.payload.hws_set_context_sched_properties.in_process_priority = 0;
380 	req.payload.hws_set_context_sched_properties.context_quantum = 20000;
381 	req.payload.hws_set_context_sched_properties.grace_period_same_priority = 10000;
382 	req.payload.hws_set_context_sched_properties.grace_period_lower_priority = 0;
383 
384 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP, &resp,
385 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
386 	if (ret)
387 		ivpu_warn_ratelimited(vdev, "Failed to set context sched properties: %d\n", ret);
388 
389 	return ret;
390 }
391 
ivpu_jsm_hws_set_scheduling_log(struct ivpu_device * vdev,u32 engine_idx,u32 host_ssid,u64 vpu_log_buffer_va)392 int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
393 				    u64 vpu_log_buffer_va)
394 {
395 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG };
396 	struct vpu_jsm_msg resp;
397 	int ret;
398 
399 	req.payload.hws_set_scheduling_log.engine_idx = engine_idx;
400 	req.payload.hws_set_scheduling_log.host_ssid = host_ssid;
401 	req.payload.hws_set_scheduling_log.vpu_log_buffer_va = vpu_log_buffer_va;
402 	req.payload.hws_set_scheduling_log.notify_index = 0;
403 
404 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP, &resp,
405 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
406 	if (ret)
407 		ivpu_warn_ratelimited(vdev, "Failed to set scheduling log: %d\n", ret);
408 
409 	return ret;
410 }
411 
ivpu_jsm_hws_setup_priority_bands(struct ivpu_device * vdev)412 int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
413 {
414 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
415 	struct vpu_jsm_msg resp;
416 	struct ivpu_hw_info *hw = vdev->hw;
417 	struct vpu_ipc_msg_payload_hws_priority_band_setup *setup =
418 		&req.payload.hws_priority_band_setup;
419 	int ret;
420 
421 	for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
422 	     band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
423 		setup->grace_period[band] = hw->hws.grace_period[band];
424 		setup->process_grace_period[band] = hw->hws.process_grace_period[band];
425 		setup->process_quantum[band] = hw->hws.process_quantum[band];
426 	}
427 	setup->normal_band_percentage = 10;
428 
429 	ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
430 					     &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
431 	if (ret)
432 		ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret);
433 
434 	return ret;
435 }
436 
ivpu_jsm_metric_streamer_start(struct ivpu_device * vdev,u64 metric_group_mask,u64 sampling_rate,u64 buffer_addr,u64 buffer_size)437 int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
438 				   u64 sampling_rate, u64 buffer_addr, u64 buffer_size)
439 {
440 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_START };
441 	struct vpu_jsm_msg resp;
442 	int ret;
443 
444 	req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
445 	req.payload.metric_streamer_start.sampling_rate = sampling_rate;
446 	req.payload.metric_streamer_start.buffer_addr = buffer_addr;
447 	req.payload.metric_streamer_start.buffer_size = buffer_size;
448 
449 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_START_DONE, &resp,
450 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
451 	if (ret) {
452 		ivpu_warn_ratelimited(vdev, "Failed to start metric streamer: ret %d\n", ret);
453 		return ret;
454 	}
455 
456 	return ret;
457 }
458 
ivpu_jsm_metric_streamer_stop(struct ivpu_device * vdev,u64 metric_group_mask)459 int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask)
460 {
461 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_STOP };
462 	struct vpu_jsm_msg resp;
463 	int ret;
464 
465 	req.payload.metric_streamer_stop.metric_group_mask = metric_group_mask;
466 
467 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE, &resp,
468 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
469 	if (ret)
470 		ivpu_warn_ratelimited(vdev, "Failed to stop metric streamer: ret %d\n", ret);
471 
472 	return ret;
473 }
474 
ivpu_jsm_metric_streamer_update(struct ivpu_device * vdev,u64 metric_group_mask,u64 buffer_addr,u64 buffer_size,u64 * bytes_written)475 int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
476 				    u64 buffer_addr, u64 buffer_size, u64 *bytes_written)
477 {
478 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_UPDATE };
479 	struct vpu_jsm_msg resp;
480 	int ret;
481 
482 	req.payload.metric_streamer_update.metric_group_mask = metric_group_mask;
483 	req.payload.metric_streamer_update.buffer_addr = buffer_addr;
484 	req.payload.metric_streamer_update.buffer_size = buffer_size;
485 
486 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE, &resp,
487 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
488 	if (ret) {
489 		ivpu_warn_ratelimited(vdev, "Failed to update metric streamer: ret %d\n", ret);
490 		return ret;
491 	}
492 
493 	if (buffer_size && resp.payload.metric_streamer_done.bytes_written > buffer_size) {
494 		ivpu_warn_ratelimited(vdev, "MS buffer overflow: bytes_written %#llx > buffer_size %#llx\n",
495 				      resp.payload.metric_streamer_done.bytes_written, buffer_size);
496 		return -EOVERFLOW;
497 	}
498 
499 	*bytes_written = resp.payload.metric_streamer_done.bytes_written;
500 
501 	return ret;
502 }
503 
ivpu_jsm_metric_streamer_info(struct ivpu_device * vdev,u64 metric_group_mask,u64 buffer_addr,u64 buffer_size,u32 * sample_size,u64 * info_size)504 int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
505 				  u64 buffer_size, u32 *sample_size, u64 *info_size)
506 {
507 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_INFO };
508 	struct vpu_jsm_msg resp;
509 	int ret;
510 
511 	req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
512 	req.payload.metric_streamer_start.buffer_addr = buffer_addr;
513 	req.payload.metric_streamer_start.buffer_size = buffer_size;
514 
515 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE, &resp,
516 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
517 	if (ret) {
518 		ivpu_warn_ratelimited(vdev, "Failed to get metric streamer info: ret %d\n", ret);
519 		return ret;
520 	}
521 
522 	if (!resp.payload.metric_streamer_done.sample_size) {
523 		ivpu_warn_ratelimited(vdev, "Invalid sample size\n");
524 		return -EBADMSG;
525 	}
526 
527 	if (sample_size)
528 		*sample_size = resp.payload.metric_streamer_done.sample_size;
529 	if (info_size)
530 		*info_size = resp.payload.metric_streamer_done.bytes_written;
531 
532 	return ret;
533 }
534 
ivpu_jsm_dct_enable(struct ivpu_device * vdev,u32 active_us,u32 inactive_us)535 int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us)
536 {
537 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_ENABLE };
538 	struct vpu_jsm_msg resp;
539 
540 	req.payload.pwr_dct_control.dct_active_us = active_us;
541 	req.payload.pwr_dct_control.dct_inactive_us = inactive_us;
542 
543 	return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE, &resp,
544 					      VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
545 }
546 
ivpu_jsm_dct_disable(struct ivpu_device * vdev)547 int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
548 {
549 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE };
550 	struct vpu_jsm_msg resp;
551 
552 	return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE, &resp,
553 					      VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
554 }
555 
ivpu_jsm_state_dump(struct ivpu_device * vdev)556 int ivpu_jsm_state_dump(struct ivpu_device *vdev)
557 {
558 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_STATE_DUMP };
559 
560 	return ivpu_ipc_send_and_wait(vdev, &req, VPU_IPC_CHAN_ASYNC_CMD,
561 				      vdev->timeout.state_dump_msg);
562 }
563