1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6 #include "ivpu_drv.h" 7 #include "ivpu_ipc.h" 8 #include "ivpu_jsm_msg.h" 9 10 const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type) 11 { 12 #define IVPU_CASE_TO_STR(x) case x: return #x 13 switch (type) { 14 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNKNOWN); 15 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET); 16 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT); 17 IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB); 18 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB); 19 IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB); 20 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT); 21 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL); 22 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL); 23 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN); 24 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE); 25 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG); 26 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG); 27 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY); 28 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME); 29 IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE); 30 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START); 31 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP); 32 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE); 33 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO); 34 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP); 35 IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE); 36 IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE); 37 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES); 38 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_REGISTER_DB); 39 IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT); 40 IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL); 41 IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE); 42 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE); 43 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE); 44 IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE); 45 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB_DONE); 46 IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB_DONE); 47 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE); 48 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_DONE); 49 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL_DONE); 50 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN_DONE); 51 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE_DONE); 52 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG_RSP); 53 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG_RSP); 54 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP); 55 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME_RSP); 56 IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE_DONE); 57 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START_DONE); 58 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE); 59 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE); 60 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE); 61 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION); 62 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP); 63 IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP); 64 IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP); 65 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP); 66 IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DONE); 67 IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL_RSP); 68 } 69 #undef IVPU_CASE_TO_STR 70 71 return "Unknown JSM message type"; 72 } 73 74 int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id, 75 u64 jobq_base, u32 jobq_size) 76 { 77 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB }; 78 struct vpu_jsm_msg resp; 79 int ret = 0; 80 81 req.payload.register_db.db_idx = db_id; 82 req.payload.register_db.jobq_base = jobq_base; 83 req.payload.register_db.jobq_size = jobq_size; 84 req.payload.register_db.host_ssid = ctx_id; 85 86 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp, 87 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 88 if (ret) { 89 ivpu_err_ratelimited(vdev, "Failed to register doorbell %d: %d\n", db_id, ret); 90 return ret; 91 } 92 93 ivpu_dbg(vdev, JSM, "Doorbell %d registered to context %d\n", db_id, ctx_id); 94 95 return 0; 96 } 97 98 int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id) 99 { 100 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB }; 101 struct vpu_jsm_msg resp; 102 int ret = 0; 103 104 req.payload.unregister_db.db_idx = db_id; 105 106 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp, 107 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 108 if (ret) { 109 ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %d: %d\n", db_id, ret); 110 return ret; 111 } 112 113 ivpu_dbg(vdev, JSM, "Doorbell %d unregistered\n", db_id); 114 115 return 0; 116 } 117 118 int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat) 119 { 120 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB }; 121 struct vpu_jsm_msg resp; 122 int ret; 123 124 if (engine > VPU_ENGINE_COPY) 125 return -EINVAL; 126 127 req.payload.query_engine_hb.engine_idx = engine; 128 129 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp, 130 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 131 if (ret) { 132 ivpu_err_ratelimited(vdev, "Failed to get heartbeat from engine %d: %d\n", 133 engine, ret); 134 return ret; 135 } 136 137 *heartbeat = resp.payload.query_engine_hb_done.heartbeat; 138 return ret; 139 } 140 141 int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine) 142 { 143 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET }; 144 struct vpu_jsm_msg resp; 145 int ret; 146 147 if (engine > VPU_ENGINE_COPY) 148 return -EINVAL; 149 150 req.payload.engine_reset.engine_idx = engine; 151 152 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp, 153 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 154 if (ret) 155 ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret); 156 157 return ret; 158 } 159 160 int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id) 161 { 162 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT }; 163 struct vpu_jsm_msg resp; 164 int ret; 165 166 if (engine > VPU_ENGINE_COPY) 167 return -EINVAL; 168 169 req.payload.engine_preempt.engine_idx = engine; 170 req.payload.engine_preempt.preempt_id = preempt_id; 171 172 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp, 173 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 174 if (ret) 175 ivpu_err_ratelimited(vdev, "Failed to preempt engine %d: %d\n", engine, ret); 176 177 return ret; 178 } 179 180 int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size) 181 { 182 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL }; 183 struct vpu_jsm_msg resp; 184 int ret; 185 186 strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN); 187 188 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp, 189 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 190 if (ret) 191 ivpu_warn_ratelimited(vdev, "Failed to send command \"%s\": ret %d\n", 192 command, ret); 193 194 return ret; 195 } 196 197 int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask, 198 u64 *trace_hw_component_mask) 199 { 200 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY }; 201 struct vpu_jsm_msg resp; 202 int ret; 203 204 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp, 205 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 206 if (ret) { 207 ivpu_warn_ratelimited(vdev, "Failed to get trace capability: %d\n", ret); 208 return ret; 209 } 210 211 *trace_destination_mask = resp.payload.trace_capability.trace_destination_mask; 212 *trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask; 213 214 return ret; 215 } 216 217 int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask, 218 u64 trace_hw_component_mask) 219 { 220 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG }; 221 struct vpu_jsm_msg resp; 222 int ret; 223 224 req.payload.trace_config.trace_level = trace_level; 225 req.payload.trace_config.trace_destination_mask = trace_destination_mask; 226 req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask; 227 228 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp, 229 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 230 if (ret) 231 ivpu_warn_ratelimited(vdev, "Failed to set config: %d\n", ret); 232 233 return ret; 234 } 235 236 int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid) 237 { 238 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE }; 239 struct vpu_jsm_msg resp; 240 241 req.payload.ssid_release.host_ssid = host_ssid; 242 243 return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp, 244 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); 245 } 246