xref: /linux/drivers/accel/ivpu/ivpu_jsm_msg.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2024 Intel Corporation
4  */
5 
6 #include "ivpu_drv.h"
7 #include "ivpu_hw.h"
8 #include "ivpu_ipc.h"
9 #include "ivpu_jsm_msg.h"
10 
ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)11 const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
12 {
13 	#define IVPU_CASE_TO_STR(x) case x: return #x
14 	switch (type) {
15 	IVPU_CASE_TO_STR(VPU_JSM_MSG_UNKNOWN);
16 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET);
17 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT);
18 	IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB);
19 	IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB);
20 	IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB);
21 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT);
22 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL);
23 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL);
24 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN);
25 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE);
26 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG);
27 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG);
28 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY);
29 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME);
30 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE);
31 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START);
32 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP);
33 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE);
34 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO);
35 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP);
36 	IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE);
37 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE);
38 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES);
39 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_REGISTER_DB);
40 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ);
41 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ);
42 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP);
43 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE);
44 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG);
45 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP);
46 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION);
47 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_ENGINE_RESUME);
48 	IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
49 	IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
50 	IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
51 	IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT);
52 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
53 	IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
54 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
55 	IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
56 	IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
57 	IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB_DONE);
58 	IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB_DONE);
59 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE);
60 	IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_DONE);
61 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL_DONE);
62 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN_DONE);
63 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE_DONE);
64 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG_RSP);
65 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG_RSP);
66 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP);
67 	IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME_RSP);
68 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE_DONE);
69 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START_DONE);
70 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE);
71 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE);
72 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE);
73 	IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION);
74 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP);
75 	IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP);
76 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP);
77 	IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP);
78 	IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DONE);
79 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL_RSP);
80 	IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER);
81 	IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER_DONE);
82 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE);
83 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE_DONE);
84 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE);
85 	IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE_DONE);
86 	}
87 	#undef IVPU_CASE_TO_STR
88 
89 	return "Unknown JSM message type";
90 }
91 
ivpu_jsm_register_db(struct ivpu_device * vdev,u32 ctx_id,u32 db_id,u64 jobq_base,u32 jobq_size)92 int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
93 			 u64 jobq_base, u32 jobq_size)
94 {
95 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB };
96 	struct vpu_jsm_msg resp;
97 	int ret = 0;
98 
99 	req.payload.register_db.db_idx = db_id;
100 	req.payload.register_db.jobq_base = jobq_base;
101 	req.payload.register_db.jobq_size = jobq_size;
102 	req.payload.register_db.host_ssid = ctx_id;
103 
104 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
105 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
106 	if (ret)
107 		ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
108 
109 	return ret;
110 }
111 
ivpu_jsm_unregister_db(struct ivpu_device * vdev,u32 db_id)112 int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
113 {
114 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB };
115 	struct vpu_jsm_msg resp;
116 	int ret = 0;
117 
118 	req.payload.unregister_db.db_idx = db_id;
119 
120 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
121 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
122 	if (ret)
123 		ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %u: %d\n", db_id, ret);
124 
125 	return ret;
126 }
127 
ivpu_jsm_get_heartbeat(struct ivpu_device * vdev,u32 engine,u64 * heartbeat)128 int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
129 {
130 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
131 	struct vpu_jsm_msg resp;
132 	int ret;
133 
134 	if (engine > VPU_ENGINE_COPY)
135 		return -EINVAL;
136 
137 	req.payload.query_engine_hb.engine_idx = engine;
138 
139 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp,
140 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
141 	if (ret) {
142 		ivpu_err_ratelimited(vdev, "Failed to get heartbeat from engine %d: %d\n",
143 				     engine, ret);
144 		return ret;
145 	}
146 
147 	*heartbeat = resp.payload.query_engine_hb_done.heartbeat;
148 	return ret;
149 }
150 
ivpu_jsm_reset_engine(struct ivpu_device * vdev,u32 engine)151 int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
152 {
153 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET };
154 	struct vpu_jsm_msg resp;
155 	int ret;
156 
157 	if (engine > VPU_ENGINE_COPY)
158 		return -EINVAL;
159 
160 	req.payload.engine_reset.engine_idx = engine;
161 
162 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
163 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
164 	if (ret)
165 		ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
166 
167 	return ret;
168 }
169 
ivpu_jsm_preempt_engine(struct ivpu_device * vdev,u32 engine,u32 preempt_id)170 int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id)
171 {
172 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT };
173 	struct vpu_jsm_msg resp;
174 	int ret;
175 
176 	if (engine > VPU_ENGINE_COPY)
177 		return -EINVAL;
178 
179 	req.payload.engine_preempt.engine_idx = engine;
180 	req.payload.engine_preempt.preempt_id = preempt_id;
181 
182 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp,
183 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
184 	if (ret)
185 		ivpu_err_ratelimited(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
186 
187 	return ret;
188 }
189 
ivpu_jsm_dyndbg_control(struct ivpu_device * vdev,char * command,size_t size)190 int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size)
191 {
192 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL };
193 	struct vpu_jsm_msg resp;
194 	int ret;
195 
196 	strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
197 
198 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
199 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
200 	if (ret)
201 		ivpu_warn_ratelimited(vdev, "Failed to send command \"%s\": ret %d\n",
202 				      command, ret);
203 
204 	return ret;
205 }
206 
ivpu_jsm_trace_get_capability(struct ivpu_device * vdev,u32 * trace_destination_mask,u64 * trace_hw_component_mask)207 int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
208 				  u64 *trace_hw_component_mask)
209 {
210 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY };
211 	struct vpu_jsm_msg resp;
212 	int ret;
213 
214 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp,
215 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
216 	if (ret) {
217 		ivpu_warn_ratelimited(vdev, "Failed to get trace capability: %d\n", ret);
218 		return ret;
219 	}
220 
221 	*trace_destination_mask = resp.payload.trace_capability.trace_destination_mask;
222 	*trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask;
223 
224 	return ret;
225 }
226 
ivpu_jsm_trace_set_config(struct ivpu_device * vdev,u32 trace_level,u32 trace_destination_mask,u64 trace_hw_component_mask)227 int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
228 			      u64 trace_hw_component_mask)
229 {
230 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG };
231 	struct vpu_jsm_msg resp;
232 	int ret;
233 
234 	req.payload.trace_config.trace_level = trace_level;
235 	req.payload.trace_config.trace_destination_mask = trace_destination_mask;
236 	req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask;
237 
238 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp,
239 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
240 	if (ret)
241 		ivpu_warn_ratelimited(vdev, "Failed to set config: %d\n", ret);
242 
243 	return ret;
244 }
245 
ivpu_jsm_context_release(struct ivpu_device * vdev,u32 host_ssid)246 int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
247 {
248 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
249 	struct vpu_jsm_msg resp;
250 	int ret;
251 
252 	req.payload.ssid_release.host_ssid = host_ssid;
253 
254 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
255 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
256 	if (ret)
257 		ivpu_warn_ratelimited(vdev, "Failed to release context: %d\n", ret);
258 
259 	return ret;
260 }
261 
ivpu_jsm_pwr_d0i3_enter(struct ivpu_device * vdev)262 int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
263 {
264 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_PWR_D0I3_ENTER };
265 	struct vpu_jsm_msg resp;
266 	int ret;
267 
268 	if (IVPU_WA(disable_d0i3_msg))
269 		return 0;
270 
271 	req.payload.pwr_d0i3_enter.send_response = 1;
272 
273 	ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE,
274 					   &resp, VPU_IPC_CHAN_GEN_CMD,
275 					   vdev->timeout.d0i3_entry_msg);
276 	if (ret)
277 		return ret;
278 
279 	return ivpu_hw_wait_for_idle(vdev);
280 }
281 
ivpu_jsm_hws_create_cmdq(struct ivpu_device * vdev,u32 ctx_id,u32 cmdq_group,u32 cmdq_id,u32 pid,u32 engine,u64 cmdq_base,u32 cmdq_size)282 int ivpu_jsm_hws_create_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_group, u32 cmdq_id,
283 			     u32 pid, u32 engine, u64 cmdq_base, u32 cmdq_size)
284 {
285 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_CREATE_CMD_QUEUE };
286 	struct vpu_jsm_msg resp;
287 	int ret;
288 
289 	req.payload.hws_create_cmdq.host_ssid = ctx_id;
290 	req.payload.hws_create_cmdq.process_id = pid;
291 	req.payload.hws_create_cmdq.engine_idx = engine;
292 	req.payload.hws_create_cmdq.cmdq_group = cmdq_group;
293 	req.payload.hws_create_cmdq.cmdq_id = cmdq_id;
294 	req.payload.hws_create_cmdq.cmdq_base = cmdq_base;
295 	req.payload.hws_create_cmdq.cmdq_size = cmdq_size;
296 
297 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP, &resp,
298 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
299 	if (ret)
300 		ivpu_warn_ratelimited(vdev, "Failed to create command queue: %d\n", ret);
301 
302 	return ret;
303 }
304 
ivpu_jsm_hws_destroy_cmdq(struct ivpu_device * vdev,u32 ctx_id,u32 cmdq_id)305 int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
306 {
307 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DESTROY_CMD_QUEUE };
308 	struct vpu_jsm_msg resp;
309 	int ret;
310 
311 	req.payload.hws_destroy_cmdq.host_ssid = ctx_id;
312 	req.payload.hws_destroy_cmdq.cmdq_id = cmdq_id;
313 
314 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP, &resp,
315 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
316 	if (ret)
317 		ivpu_warn_ratelimited(vdev, "Failed to destroy command queue: %d\n", ret);
318 
319 	return ret;
320 }
321 
ivpu_jsm_hws_register_db(struct ivpu_device * vdev,u32 ctx_id,u32 cmdq_id,u32 db_id,u64 cmdq_base,u32 cmdq_size)322 int ivpu_jsm_hws_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id, u32 db_id,
323 			     u64 cmdq_base, u32 cmdq_size)
324 {
325 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_REGISTER_DB };
326 	struct vpu_jsm_msg resp;
327 	int ret = 0;
328 
329 	req.payload.hws_register_db.db_id = db_id;
330 	req.payload.hws_register_db.host_ssid = ctx_id;
331 	req.payload.hws_register_db.cmdq_id = cmdq_id;
332 	req.payload.hws_register_db.cmdq_base = cmdq_base;
333 	req.payload.hws_register_db.cmdq_size = cmdq_size;
334 
335 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
336 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
337 	if (ret)
338 		ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
339 
340 	return ret;
341 }
342 
ivpu_jsm_hws_resume_engine(struct ivpu_device * vdev,u32 engine)343 int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
344 {
345 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_ENGINE_RESUME };
346 	struct vpu_jsm_msg resp;
347 	int ret;
348 
349 	if (engine >= VPU_ENGINE_NB)
350 		return -EINVAL;
351 
352 	req.payload.hws_resume_engine.engine_idx = engine;
353 
354 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
355 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
356 	if (ret)
357 		ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
358 
359 	return ret;
360 }
361 
ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device * vdev,u32 ctx_id,u32 cmdq_id,u32 priority)362 int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id,
363 					      u32 priority)
364 {
365 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES };
366 	struct vpu_jsm_msg resp;
367 	int ret;
368 
369 	req.payload.hws_set_context_sched_properties.host_ssid = ctx_id;
370 	req.payload.hws_set_context_sched_properties.cmdq_id = cmdq_id;
371 	req.payload.hws_set_context_sched_properties.priority_band = priority;
372 	req.payload.hws_set_context_sched_properties.realtime_priority_level = 0;
373 	req.payload.hws_set_context_sched_properties.in_process_priority = 0;
374 	req.payload.hws_set_context_sched_properties.context_quantum = 20000;
375 	req.payload.hws_set_context_sched_properties.grace_period_same_priority = 10000;
376 	req.payload.hws_set_context_sched_properties.grace_period_lower_priority = 0;
377 
378 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP, &resp,
379 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
380 	if (ret)
381 		ivpu_warn_ratelimited(vdev, "Failed to set context sched properties: %d\n", ret);
382 
383 	return ret;
384 }
385 
ivpu_jsm_hws_set_scheduling_log(struct ivpu_device * vdev,u32 engine_idx,u32 host_ssid,u64 vpu_log_buffer_va)386 int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
387 				    u64 vpu_log_buffer_va)
388 {
389 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG };
390 	struct vpu_jsm_msg resp;
391 	int ret;
392 
393 	req.payload.hws_set_scheduling_log.engine_idx = engine_idx;
394 	req.payload.hws_set_scheduling_log.host_ssid = host_ssid;
395 	req.payload.hws_set_scheduling_log.vpu_log_buffer_va = vpu_log_buffer_va;
396 	req.payload.hws_set_scheduling_log.notify_index = 0;
397 	req.payload.hws_set_scheduling_log.enable_extra_events =
398 		ivpu_test_mode & IVPU_TEST_MODE_HWS_EXTRA_EVENTS;
399 
400 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP, &resp,
401 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
402 	if (ret)
403 		ivpu_warn_ratelimited(vdev, "Failed to set scheduling log: %d\n", ret);
404 
405 	return ret;
406 }
407 
ivpu_jsm_hws_setup_priority_bands(struct ivpu_device * vdev)408 int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
409 {
410 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
411 	struct vpu_jsm_msg resp;
412 	int ret;
413 
414 	/* Idle */
415 	req.payload.hws_priority_band_setup.grace_period[0] = 0;
416 	req.payload.hws_priority_band_setup.process_grace_period[0] = 50000;
417 	req.payload.hws_priority_band_setup.process_quantum[0] = 160000;
418 	/* Normal */
419 	req.payload.hws_priority_band_setup.grace_period[1] = 50000;
420 	req.payload.hws_priority_band_setup.process_grace_period[1] = 50000;
421 	req.payload.hws_priority_band_setup.process_quantum[1] = 300000;
422 	/* Focus */
423 	req.payload.hws_priority_band_setup.grace_period[2] = 50000;
424 	req.payload.hws_priority_band_setup.process_grace_period[2] = 50000;
425 	req.payload.hws_priority_band_setup.process_quantum[2] = 200000;
426 	/* Realtime */
427 	req.payload.hws_priority_band_setup.grace_period[3] = 0;
428 	req.payload.hws_priority_band_setup.process_grace_period[3] = 50000;
429 	req.payload.hws_priority_band_setup.process_quantum[3] = 200000;
430 
431 	req.payload.hws_priority_band_setup.normal_band_percentage = 10;
432 
433 	ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
434 					   &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
435 	if (ret)
436 		ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret);
437 
438 	return ret;
439 }
440 
ivpu_jsm_metric_streamer_start(struct ivpu_device * vdev,u64 metric_group_mask,u64 sampling_rate,u64 buffer_addr,u64 buffer_size)441 int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
442 				   u64 sampling_rate, u64 buffer_addr, u64 buffer_size)
443 {
444 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_START };
445 	struct vpu_jsm_msg resp;
446 	int ret;
447 
448 	req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
449 	req.payload.metric_streamer_start.sampling_rate = sampling_rate;
450 	req.payload.metric_streamer_start.buffer_addr = buffer_addr;
451 	req.payload.metric_streamer_start.buffer_size = buffer_size;
452 
453 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_START_DONE, &resp,
454 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
455 	if (ret) {
456 		ivpu_warn_ratelimited(vdev, "Failed to start metric streamer: ret %d\n", ret);
457 		return ret;
458 	}
459 
460 	return ret;
461 }
462 
ivpu_jsm_metric_streamer_stop(struct ivpu_device * vdev,u64 metric_group_mask)463 int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask)
464 {
465 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_STOP };
466 	struct vpu_jsm_msg resp;
467 	int ret;
468 
469 	req.payload.metric_streamer_stop.metric_group_mask = metric_group_mask;
470 
471 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE, &resp,
472 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
473 	if (ret)
474 		ivpu_warn_ratelimited(vdev, "Failed to stop metric streamer: ret %d\n", ret);
475 
476 	return ret;
477 }
478 
ivpu_jsm_metric_streamer_update(struct ivpu_device * vdev,u64 metric_group_mask,u64 buffer_addr,u64 buffer_size,u64 * bytes_written)479 int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
480 				    u64 buffer_addr, u64 buffer_size, u64 *bytes_written)
481 {
482 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_UPDATE };
483 	struct vpu_jsm_msg resp;
484 	int ret;
485 
486 	req.payload.metric_streamer_update.metric_group_mask = metric_group_mask;
487 	req.payload.metric_streamer_update.buffer_addr = buffer_addr;
488 	req.payload.metric_streamer_update.buffer_size = buffer_size;
489 
490 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE, &resp,
491 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
492 	if (ret) {
493 		ivpu_warn_ratelimited(vdev, "Failed to update metric streamer: ret %d\n", ret);
494 		return ret;
495 	}
496 
497 	if (buffer_size && resp.payload.metric_streamer_done.bytes_written > buffer_size) {
498 		ivpu_warn_ratelimited(vdev, "MS buffer overflow: bytes_written %#llx > buffer_size %#llx\n",
499 				      resp.payload.metric_streamer_done.bytes_written, buffer_size);
500 		return -EOVERFLOW;
501 	}
502 
503 	*bytes_written = resp.payload.metric_streamer_done.bytes_written;
504 
505 	return ret;
506 }
507 
ivpu_jsm_metric_streamer_info(struct ivpu_device * vdev,u64 metric_group_mask,u64 buffer_addr,u64 buffer_size,u32 * sample_size,u64 * info_size)508 int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
509 				  u64 buffer_size, u32 *sample_size, u64 *info_size)
510 {
511 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_INFO };
512 	struct vpu_jsm_msg resp;
513 	int ret;
514 
515 	req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
516 	req.payload.metric_streamer_start.buffer_addr = buffer_addr;
517 	req.payload.metric_streamer_start.buffer_size = buffer_size;
518 
519 	ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE, &resp,
520 				    VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
521 	if (ret) {
522 		ivpu_warn_ratelimited(vdev, "Failed to get metric streamer info: ret %d\n", ret);
523 		return ret;
524 	}
525 
526 	if (!resp.payload.metric_streamer_done.sample_size) {
527 		ivpu_warn_ratelimited(vdev, "Invalid sample size\n");
528 		return -EBADMSG;
529 	}
530 
531 	if (sample_size)
532 		*sample_size = resp.payload.metric_streamer_done.sample_size;
533 	if (info_size)
534 		*info_size = resp.payload.metric_streamer_done.bytes_written;
535 
536 	return ret;
537 }
538 
ivpu_jsm_dct_enable(struct ivpu_device * vdev,u32 active_us,u32 inactive_us)539 int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us)
540 {
541 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_ENABLE };
542 	struct vpu_jsm_msg resp;
543 
544 	req.payload.pwr_dct_control.dct_active_us = active_us;
545 	req.payload.pwr_dct_control.dct_inactive_us = inactive_us;
546 
547 	return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE,
548 					    &resp, VPU_IPC_CHAN_ASYNC_CMD,
549 					    vdev->timeout.jsm);
550 }
551 
ivpu_jsm_dct_disable(struct ivpu_device * vdev)552 int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
553 {
554 	struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE };
555 	struct vpu_jsm_msg resp;
556 
557 	return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE,
558 					    &resp, VPU_IPC_CHAN_ASYNC_CMD,
559 					    vdev->timeout.jsm);
560 }
561