xref: /linux/drivers/accel/ivpu/ivpu_jsm_msg.c (revision 75079df919efcc30eb5bf0427c83fb578f4fe4fc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2024 Intel Corporation
4  */
5 
6 #include "ivpu_drv.h"
7 #include "ivpu_hw.h"
8 #include "ivpu_ipc.h"
9 #include "ivpu_jsm_msg.h"
10 
11 const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
12 {
13 	#define IVPU_CASE_TO_STR(x) case x: return #x
14 	switch (type) {
15 	IVPU_CASE_TO_STR(VPU_JSM_MSG_UNKNOWN);
16 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET);
17 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT);
18 	IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB);
19 	IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB);
20 	IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB);
21 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT);
22 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL);
23 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL);
24 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN);
25 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE);
26 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG);
27 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG);
28 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY);
29 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME);
30 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE);
31 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START);
32 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP);
33 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE);
34 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO);
35 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP);
36 	IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE);
37 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE);
38 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES);
39 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_REGISTER_DB);
40 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ);
41 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ);
42 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP);
43 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE);
44 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG);
45 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP);
46 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION);
47 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_ENGINE_RESUME);
48 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
49 	IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
50 	IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
51 	IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT);
52 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
53 	IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
54 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
55 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
56 	IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
57 	IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB_DONE);
58 	IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB_DONE);
59 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE);
60 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_DONE);
61 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL_DONE);
62 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN_DONE);
63 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE_DONE);
64 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG_RSP);
65 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG_RSP);
66 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP);
67 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME_RSP);
68 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE_DONE);
69 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START_DONE);
70 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE);
71 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE);
72 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE);
73 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION);
74 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP);
75 	IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP);
76 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP);
77 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP);
78 	IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DONE);
79 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL_RSP);
80 	IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER);
81 	IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER_DONE);
82 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE);
83 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE_DONE);
84 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE);
85 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE_DONE);
86 	}
87 	#undef IVPU_CASE_TO_STR
88 
89 	return "Unknown JSM message type";
90 }
91 
92 int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
93 			 u64 jobq_base, u32 jobq_size)
94 {
95 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB };
96 	struct vpu_jsm_msg resp;
97 	int ret = 0;
98 
99 	req.payload.register_db.db_idx = db_id;
100 	req.payload.register_db.jobq_base = jobq_base;
101 	req.payload.register_db.jobq_size = jobq_size;
102 	req.payload.register_db.host_ssid = ctx_id;
103 
104 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
105 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
106 	if (ret) {
107 		ivpu_err_ratelimited(vdev, "Failed to register doorbell %d: %d\n", db_id, ret);
108 		return ret;
109 	}
110 
111 	ivpu_dbg(vdev, JSM, "Doorbell %d registered to context %d\n", db_id, ctx_id);
112 
113 	return 0;
114 }
115 
116 int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
117 {
118 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB };
119 	struct vpu_jsm_msg resp;
120 	int ret = 0;
121 
122 	req.payload.unregister_db.db_idx = db_id;
123 
124 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
125 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
126 	if (ret) {
127 		ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %d: %d\n", db_id, ret);
128 		return ret;
129 	}
130 
131 	ivpu_dbg(vdev, JSM, "Doorbell %d unregistered\n", db_id);
132 
133 	return 0;
134 }
135 
136 int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
137 {
138 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
139 	struct vpu_jsm_msg resp;
140 	int ret;
141 
142 	if (engine > VPU_ENGINE_COPY)
143 		return -EINVAL;
144 
145 	req.payload.query_engine_hb.engine_idx = engine;
146 
147 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp,
148 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
149 	if (ret) {
150 		ivpu_err_ratelimited(vdev, "Failed to get heartbeat from engine %d: %d\n",
151 				     engine, ret);
152 		return ret;
153 	}
154 
155 	*heartbeat = resp.payload.query_engine_hb_done.heartbeat;
156 	return ret;
157 }
158 
159 int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
160 {
161 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET };
162 	struct vpu_jsm_msg resp;
163 	int ret;
164 
165 	if (engine > VPU_ENGINE_COPY)
166 		return -EINVAL;
167 
168 	req.payload.engine_reset.engine_idx = engine;
169 
170 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
171 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
172 	if (ret)
173 		ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
174 
175 	return ret;
176 }
177 
178 int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id)
179 {
180 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT };
181 	struct vpu_jsm_msg resp;
182 	int ret;
183 
184 	if (engine > VPU_ENGINE_COPY)
185 		return -EINVAL;
186 
187 	req.payload.engine_preempt.engine_idx = engine;
188 	req.payload.engine_preempt.preempt_id = preempt_id;
189 
190 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp,
191 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
192 	if (ret)
193 		ivpu_err_ratelimited(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
194 
195 	return ret;
196 }
197 
198 int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size)
199 {
200 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL };
201 	struct vpu_jsm_msg resp;
202 	int ret;
203 
204 	strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
205 
206 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
207 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
208 	if (ret)
209 		ivpu_warn_ratelimited(vdev, "Failed to send command \"%s\": ret %d\n",
210 				      command, ret);
211 
212 	return ret;
213 }
214 
215 int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
216 				  u64 *trace_hw_component_mask)
217 {
218 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY };
219 	struct vpu_jsm_msg resp;
220 	int ret;
221 
222 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp,
223 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
224 	if (ret) {
225 		ivpu_warn_ratelimited(vdev, "Failed to get trace capability: %d\n", ret);
226 		return ret;
227 	}
228 
229 	*trace_destination_mask = resp.payload.trace_capability.trace_destination_mask;
230 	*trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask;
231 
232 	return ret;
233 }
234 
235 int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
236 			      u64 trace_hw_component_mask)
237 {
238 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG };
239 	struct vpu_jsm_msg resp;
240 	int ret;
241 
242 	req.payload.trace_config.trace_level = trace_level;
243 	req.payload.trace_config.trace_destination_mask = trace_destination_mask;
244 	req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask;
245 
246 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp,
247 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
248 	if (ret)
249 		ivpu_warn_ratelimited(vdev, "Failed to set config: %d\n", ret);
250 
251 	return ret;
252 }
253 
254 int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
255 {
256 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
257 	struct vpu_jsm_msg resp;
258 
259 	req.payload.ssid_release.host_ssid = host_ssid;
260 
261 	return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
262 				     VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
263 }
264 
265 int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
266 {
267 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_PWR_D0I3_ENTER };
268 	struct vpu_jsm_msg resp;
269 	int ret;
270 
271 	if (IVPU_WA(disable_d0i3_msg))
272 		return 0;
273 
274 	req.payload.pwr_d0i3_enter.send_response = 1;
275 
276 	ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE,
277 					   &resp, VPU_IPC_CHAN_GEN_CMD,
278 					   vdev->timeout.d0i3_entry_msg);
279 	if (ret)
280 		return ret;
281 
282 	return ivpu_hw_wait_for_idle(vdev);
283 }
284 
285 int ivpu_jsm_hws_create_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_group, u32 cmdq_id,
286 			     u32 pid, u32 engine, u64 cmdq_base, u32 cmdq_size)
287 {
288 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_CREATE_CMD_QUEUE };
289 	struct vpu_jsm_msg resp;
290 	int ret;
291 
292 	req.payload.hws_create_cmdq.host_ssid = ctx_id;
293 	req.payload.hws_create_cmdq.process_id = pid;
294 	req.payload.hws_create_cmdq.engine_idx = engine;
295 	req.payload.hws_create_cmdq.cmdq_group = cmdq_group;
296 	req.payload.hws_create_cmdq.cmdq_id = cmdq_id;
297 	req.payload.hws_create_cmdq.cmdq_base = cmdq_base;
298 	req.payload.hws_create_cmdq.cmdq_size = cmdq_size;
299 
300 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP, &resp,
301 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
302 	if (ret)
303 		ivpu_warn_ratelimited(vdev, "Failed to create command queue: %d\n", ret);
304 
305 	return ret;
306 }
307 
308 int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
309 {
310 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DESTROY_CMD_QUEUE };
311 	struct vpu_jsm_msg resp;
312 	int ret;
313 
314 	req.payload.hws_destroy_cmdq.host_ssid = ctx_id;
315 	req.payload.hws_destroy_cmdq.cmdq_id = cmdq_id;
316 
317 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP, &resp,
318 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
319 	if (ret)
320 		ivpu_warn_ratelimited(vdev, "Failed to destroy command queue: %d\n", ret);
321 
322 	return ret;
323 }
324 
325 int ivpu_jsm_hws_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id, u32 db_id,
326 			     u64 cmdq_base, u32 cmdq_size)
327 {
328 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_REGISTER_DB };
329 	struct vpu_jsm_msg resp;
330 	int ret = 0;
331 
332 	req.payload.hws_register_db.db_id = db_id;
333 	req.payload.hws_register_db.host_ssid = ctx_id;
334 	req.payload.hws_register_db.cmdq_id = cmdq_id;
335 	req.payload.hws_register_db.cmdq_base = cmdq_base;
336 	req.payload.hws_register_db.cmdq_size = cmdq_size;
337 
338 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
339 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
340 	if (ret)
341 		ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
342 
343 	return ret;
344 }
345 
346 int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
347 {
348 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_ENGINE_RESUME };
349 	struct vpu_jsm_msg resp;
350 	int ret;
351 
352 	if (engine >= VPU_ENGINE_NB)
353 		return -EINVAL;
354 
355 	req.payload.hws_resume_engine.engine_idx = engine;
356 
357 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
358 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
359 	if (ret)
360 		ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
361 
362 	return ret;
363 }
364 
365 int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id,
366 					      u32 priority)
367 {
368 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES };
369 	struct vpu_jsm_msg resp;
370 	int ret;
371 
372 	req.payload.hws_set_context_sched_properties.host_ssid = ctx_id;
373 	req.payload.hws_set_context_sched_properties.cmdq_id = cmdq_id;
374 	req.payload.hws_set_context_sched_properties.priority_band = priority;
375 	req.payload.hws_set_context_sched_properties.realtime_priority_level = 0;
376 	req.payload.hws_set_context_sched_properties.in_process_priority = 0;
377 	req.payload.hws_set_context_sched_properties.context_quantum = 20000;
378 	req.payload.hws_set_context_sched_properties.grace_period_same_priority = 10000;
379 	req.payload.hws_set_context_sched_properties.grace_period_lower_priority = 0;
380 
381 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP, &resp,
382 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
383 	if (ret)
384 		ivpu_warn_ratelimited(vdev, "Failed to set context sched properties: %d\n", ret);
385 
386 	return ret;
387 }
388 
389 int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
390 				    u64 vpu_log_buffer_va)
391 {
392 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG };
393 	struct vpu_jsm_msg resp;
394 	int ret;
395 
396 	req.payload.hws_set_scheduling_log.engine_idx = engine_idx;
397 	req.payload.hws_set_scheduling_log.host_ssid = host_ssid;
398 	req.payload.hws_set_scheduling_log.vpu_log_buffer_va = vpu_log_buffer_va;
399 	req.payload.hws_set_scheduling_log.notify_index = 0;
400 	req.payload.hws_set_scheduling_log.enable_extra_events =
401 		ivpu_test_mode & IVPU_TEST_MODE_HWS_EXTRA_EVENTS;
402 
403 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP, &resp,
404 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
405 	if (ret)
406 		ivpu_warn_ratelimited(vdev, "Failed to set scheduling log: %d\n", ret);
407 
408 	return ret;
409 }
410 
411 int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
412 {
413 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
414 	struct vpu_jsm_msg resp;
415 	int ret;
416 
417 	/* Idle */
418 	req.payload.hws_priority_band_setup.grace_period[0] = 0;
419 	req.payload.hws_priority_band_setup.process_grace_period[0] = 50000;
420 	req.payload.hws_priority_band_setup.process_quantum[0] = 160000;
421 	/* Normal */
422 	req.payload.hws_priority_band_setup.grace_period[1] = 50000;
423 	req.payload.hws_priority_band_setup.process_grace_period[1] = 50000;
424 	req.payload.hws_priority_band_setup.process_quantum[1] = 300000;
425 	/* Focus */
426 	req.payload.hws_priority_band_setup.grace_period[2] = 50000;
427 	req.payload.hws_priority_band_setup.process_grace_period[2] = 50000;
428 	req.payload.hws_priority_band_setup.process_quantum[2] = 200000;
429 	/* Realtime */
430 	req.payload.hws_priority_band_setup.grace_period[3] = 0;
431 	req.payload.hws_priority_band_setup.process_grace_period[3] = 50000;
432 	req.payload.hws_priority_band_setup.process_quantum[3] = 200000;
433 
434 	req.payload.hws_priority_band_setup.normal_band_percentage = 10;
435 
436 	ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
437 					   &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
438 	if (ret)
439 		ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret);
440 
441 	return ret;
442 }
443 
444 int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
445 				   u64 sampling_rate, u64 buffer_addr, u64 buffer_size)
446 {
447 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_START };
448 	struct vpu_jsm_msg resp;
449 	int ret;
450 
451 	req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
452 	req.payload.metric_streamer_start.sampling_rate = sampling_rate;
453 	req.payload.metric_streamer_start.buffer_addr = buffer_addr;
454 	req.payload.metric_streamer_start.buffer_size = buffer_size;
455 
456 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_START_DONE, &resp,
457 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
458 	if (ret) {
459 		ivpu_warn_ratelimited(vdev, "Failed to start metric streamer: ret %d\n", ret);
460 		return ret;
461 	}
462 
463 	return ret;
464 }
465 
466 int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask)
467 {
468 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_STOP };
469 	struct vpu_jsm_msg resp;
470 	int ret;
471 
472 	req.payload.metric_streamer_stop.metric_group_mask = metric_group_mask;
473 
474 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE, &resp,
475 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
476 	if (ret)
477 		ivpu_warn_ratelimited(vdev, "Failed to stop metric streamer: ret %d\n", ret);
478 
479 	return ret;
480 }
481 
482 int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
483 				    u64 buffer_addr, u64 buffer_size, u64 *bytes_written)
484 {
485 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_UPDATE };
486 	struct vpu_jsm_msg resp;
487 	int ret;
488 
489 	req.payload.metric_streamer_update.metric_group_mask = metric_group_mask;
490 	req.payload.metric_streamer_update.buffer_addr = buffer_addr;
491 	req.payload.metric_streamer_update.buffer_size = buffer_size;
492 
493 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE, &resp,
494 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
495 	if (ret) {
496 		ivpu_warn_ratelimited(vdev, "Failed to update metric streamer: ret %d\n", ret);
497 		return ret;
498 	}
499 
500 	if (buffer_size && resp.payload.metric_streamer_done.bytes_written > buffer_size) {
501 		ivpu_warn_ratelimited(vdev, "MS buffer overflow: bytes_written %#llx > buffer_size %#llx\n",
502 				      resp.payload.metric_streamer_done.bytes_written, buffer_size);
503 		return -EOVERFLOW;
504 	}
505 
506 	*bytes_written = resp.payload.metric_streamer_done.bytes_written;
507 
508 	return ret;
509 }
510 
511 int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
512 				  u64 buffer_size, u32 *sample_size, u64 *info_size)
513 {
514 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_INFO };
515 	struct vpu_jsm_msg resp;
516 	int ret;
517 
518 	req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
519 	req.payload.metric_streamer_start.buffer_addr = buffer_addr;
520 	req.payload.metric_streamer_start.buffer_size = buffer_size;
521 
522 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE, &resp,
523 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
524 	if (ret) {
525 		ivpu_warn_ratelimited(vdev, "Failed to get metric streamer info: ret %d\n", ret);
526 		return ret;
527 	}
528 
529 	if (!resp.payload.metric_streamer_done.sample_size) {
530 		ivpu_warn_ratelimited(vdev, "Invalid sample size\n");
531 		return -EBADMSG;
532 	}
533 
534 	if (sample_size)
535 		*sample_size = resp.payload.metric_streamer_done.sample_size;
536 	if (info_size)
537 		*info_size = resp.payload.metric_streamer_done.bytes_written;
538 
539 	return ret;
540 }
541