1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2020-2021 NXP 4 */ 5 6 #include <linux/init.h> 7 #include <linux/interconnect.h> 8 #include <linux/ioctl.h> 9 #include <linux/list.h> 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/platform_device.h> 13 #include <linux/slab.h> 14 #include <linux/types.h> 15 #include <linux/delay.h> 16 #include "vpu.h" 17 #include "vpu_defs.h" 18 #include "vpu_cmds.h" 19 #include "vpu_rpc.h" 20 #include "vpu_mbox.h" 21 22 struct vpu_cmd_request { 23 u32 request; 24 u32 response; 25 u32 handled; 26 }; 27 28 struct vpu_cmd_t { 29 struct list_head list; 30 u32 id; 31 struct vpu_cmd_request *request; 32 struct vpu_rpc_event *pkt; 33 unsigned long key; 34 atomic_long_t *last_response_cmd; 35 }; 36 37 static struct vpu_cmd_request vpu_cmd_requests[] = { 38 { 39 .request = VPU_CMD_ID_CONFIGURE_CODEC, 40 .response = VPU_MSG_ID_MEM_REQUEST, 41 .handled = 1, 42 }, 43 { 44 .request = VPU_CMD_ID_START, 45 .response = VPU_MSG_ID_START_DONE, 46 .handled = 0, 47 }, 48 { 49 .request = VPU_CMD_ID_STOP, 50 .response = VPU_MSG_ID_STOP_DONE, 51 .handled = 0, 52 }, 53 { 54 .request = VPU_CMD_ID_ABORT, 55 .response = VPU_MSG_ID_ABORT_DONE, 56 .handled = 0, 57 }, 58 { 59 .request = VPU_CMD_ID_RST_BUF, 60 .response = VPU_MSG_ID_BUF_RST, 61 .handled = 1, 62 }, 63 }; 64 65 static int vpu_cmd_send(struct vpu_core *core, struct vpu_rpc_event *pkt) 66 { 67 int ret = 0; 68 69 ret = vpu_iface_send_cmd(core, pkt); 70 if (ret) 71 return ret; 72 73 /*write cmd data to cmd buffer before trigger a cmd interrupt*/ 74 mb(); 75 vpu_mbox_send_type(core, COMMAND); 76 77 return ret; 78 } 79 80 static struct vpu_cmd_t *vpu_alloc_cmd(struct vpu_inst *inst, u32 id, void *data) 81 { 82 struct vpu_cmd_t *cmd; 83 int i; 84 int ret; 85 86 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 87 if (!cmd) 88 return NULL; 89 90 cmd->pkt = kzalloc(sizeof(*cmd->pkt), GFP_KERNEL); 91 if (!cmd->pkt) { 92 kfree(cmd); 93 return NULL; 94 } 95 96 cmd->id = id; 97 ret = vpu_iface_pack_cmd(inst->core, cmd->pkt, inst->id, id, data); 98 if (ret) { 99 dev_err(inst->dev, "iface pack cmd %s fail\n", vpu_id_name(id)); 100 kfree(cmd->pkt); 101 kfree(cmd); 102 return NULL; 103 } 104 for (i = 0; i < ARRAY_SIZE(vpu_cmd_requests); i++) { 105 if (vpu_cmd_requests[i].request == id) { 106 cmd->request = &vpu_cmd_requests[i]; 107 break; 108 } 109 } 110 111 return cmd; 112 } 113 114 static void vpu_free_cmd(struct vpu_cmd_t *cmd) 115 { 116 if (!cmd) 117 return; 118 if (cmd->last_response_cmd) 119 atomic_long_set(cmd->last_response_cmd, cmd->key); 120 kfree(cmd->pkt); 121 kfree(cmd); 122 } 123 124 static int vpu_session_process_cmd(struct vpu_inst *inst, struct vpu_cmd_t *cmd) 125 { 126 int ret; 127 128 dev_dbg(inst->dev, "[%d]send cmd %s\n", inst->id, vpu_id_name(cmd->id)); 129 vpu_iface_pre_send_cmd(inst); 130 ret = vpu_cmd_send(inst->core, cmd->pkt); 131 if (!ret) { 132 vpu_iface_post_send_cmd(inst); 133 vpu_inst_record_flow(inst, cmd->id); 134 } else { 135 dev_err(inst->dev, "[%d] iface send cmd %s fail\n", inst->id, vpu_id_name(cmd->id)); 136 } 137 138 return ret; 139 } 140 141 static void vpu_process_cmd_request(struct vpu_inst *inst) 142 { 143 struct vpu_cmd_t *cmd; 144 struct vpu_cmd_t *tmp; 145 146 if (!inst || inst->pending) 147 return; 148 149 list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) { 150 list_del_init(&cmd->list); 151 if (vpu_session_process_cmd(inst, cmd)) 152 dev_err(inst->dev, "[%d] process cmd %s fail\n", 153 inst->id, vpu_id_name(cmd->id)); 154 if (cmd->request) { 155 inst->pending = (void *)cmd; 156 break; 157 } 158 vpu_free_cmd(cmd); 159 } 160 } 161 162 static int vpu_request_cmd(struct vpu_inst *inst, u32 id, void *data, 163 unsigned long *key, int *sync) 164 { 165 struct vpu_core *core; 166 struct vpu_cmd_t *cmd; 167 168 if (!inst || !inst->core) 169 return -EINVAL; 170 171 core = inst->core; 172 cmd = vpu_alloc_cmd(inst, id, data); 173 if (!cmd) 174 return -ENOMEM; 175 176 mutex_lock(&core->cmd_lock); 177 cmd->key = ++inst->cmd_seq; 178 cmd->last_response_cmd = &inst->last_response_cmd; 179 if (key) 180 *key = cmd->key; 181 if (sync) 182 *sync = cmd->request ? true : false; 183 list_add_tail(&cmd->list, &inst->cmd_q); 184 vpu_process_cmd_request(inst); 185 mutex_unlock(&core->cmd_lock); 186 187 return 0; 188 } 189 190 static void vpu_clear_pending(struct vpu_inst *inst) 191 { 192 if (!inst || !inst->pending) 193 return; 194 195 vpu_free_cmd(inst->pending); 196 wake_up_all(&inst->core->ack_wq); 197 inst->pending = NULL; 198 } 199 200 static bool vpu_check_response(struct vpu_cmd_t *cmd, u32 response, u32 handled) 201 { 202 struct vpu_cmd_request *request; 203 204 if (!cmd || !cmd->request) 205 return false; 206 207 request = cmd->request; 208 if (request->response != response) 209 return false; 210 if (request->handled != handled) 211 return false; 212 213 return true; 214 } 215 216 int vpu_response_cmd(struct vpu_inst *inst, u32 response, u32 handled) 217 { 218 struct vpu_core *core; 219 220 if (!inst || !inst->core) 221 return -EINVAL; 222 223 core = inst->core; 224 mutex_lock(&core->cmd_lock); 225 if (vpu_check_response(inst->pending, response, handled)) 226 vpu_clear_pending(inst); 227 228 vpu_process_cmd_request(inst); 229 mutex_unlock(&core->cmd_lock); 230 231 return 0; 232 } 233 234 void vpu_clear_request(struct vpu_inst *inst) 235 { 236 struct vpu_cmd_t *cmd; 237 struct vpu_cmd_t *tmp; 238 239 mutex_lock(&inst->core->cmd_lock); 240 if (inst->pending) 241 vpu_clear_pending(inst); 242 243 list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) { 244 list_del_init(&cmd->list); 245 vpu_free_cmd(cmd); 246 } 247 mutex_unlock(&inst->core->cmd_lock); 248 } 249 250 static bool check_is_responsed(struct vpu_inst *inst, unsigned long key) 251 { 252 unsigned long last_response = atomic_long_read(&inst->last_response_cmd); 253 254 if (key <= last_response && (last_response - key) < (ULONG_MAX >> 1)) 255 return true; 256 257 return false; 258 } 259 260 static int sync_session_response(struct vpu_inst *inst, unsigned long key, long timeout, int try) 261 { 262 struct vpu_core *core; 263 264 if (!inst || !inst->core) 265 return -EINVAL; 266 267 core = inst->core; 268 269 call_void_vop(inst, wait_prepare); 270 wait_event_timeout(core->ack_wq, check_is_responsed(inst, key), timeout); 271 call_void_vop(inst, wait_finish); 272 273 if (!check_is_responsed(inst, key)) { 274 if (try) 275 return -EINVAL; 276 dev_err(inst->dev, "[%d] sync session timeout\n", inst->id); 277 set_bit(inst->id, &core->hang_mask); 278 mutex_lock(&inst->core->cmd_lock); 279 vpu_clear_pending(inst); 280 mutex_unlock(&inst->core->cmd_lock); 281 return -EINVAL; 282 } 283 284 return 0; 285 } 286 287 static void vpu_core_keep_active(struct vpu_core *core) 288 { 289 struct vpu_rpc_event pkt; 290 291 memset(&pkt, 0, sizeof(pkt)); 292 vpu_iface_pack_cmd(core, &pkt, 0, VPU_CMD_ID_NOOP, NULL); 293 294 dev_dbg(core->dev, "try to wake up\n"); 295 mutex_lock(&core->cmd_lock); 296 if (vpu_cmd_send(core, &pkt)) 297 dev_err(core->dev, "fail to keep active\n"); 298 mutex_unlock(&core->cmd_lock); 299 } 300 301 static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data) 302 { 303 unsigned long key; 304 int sync = false; 305 int ret; 306 307 if (inst->id < 0) 308 return -EINVAL; 309 310 ret = vpu_request_cmd(inst, id, data, &key, &sync); 311 if (ret) 312 goto exit; 313 314 /* workaround for a firmware issue, 315 * firmware should be waked up by start or configure command, 316 * but there is a very small change that firmware failed to wakeup. 317 * in such case, try to wakeup firmware again by sending a noop command 318 */ 319 if (sync && (id == VPU_CMD_ID_CONFIGURE_CODEC || id == VPU_CMD_ID_START)) { 320 if (sync_session_response(inst, key, VPU_TIMEOUT_WAKEUP, 1)) 321 vpu_core_keep_active(inst->core); 322 else 323 goto exit; 324 } 325 326 if (sync) 327 ret = sync_session_response(inst, key, VPU_TIMEOUT, 0); 328 329 exit: 330 if (ret) 331 dev_err(inst->dev, "[%d] send cmd %s fail\n", inst->id, vpu_id_name(id)); 332 333 return ret; 334 } 335 336 int vpu_session_configure_codec(struct vpu_inst *inst) 337 { 338 return vpu_session_send_cmd(inst, VPU_CMD_ID_CONFIGURE_CODEC, NULL); 339 } 340 341 int vpu_session_start(struct vpu_inst *inst) 342 { 343 vpu_trace(inst->dev, "[%d]\n", inst->id); 344 345 return vpu_session_send_cmd(inst, VPU_CMD_ID_START, NULL); 346 } 347 348 int vpu_session_stop(struct vpu_inst *inst) 349 { 350 int ret; 351 352 vpu_trace(inst->dev, "[%d]\n", inst->id); 353 354 ret = vpu_session_send_cmd(inst, VPU_CMD_ID_STOP, NULL); 355 /* workaround for a firmware bug, 356 * if the next command is too close after stop cmd, 357 * the firmware may enter wfi wrongly. 358 */ 359 usleep_range(3000, 5000); 360 return ret; 361 } 362 363 int vpu_session_encode_frame(struct vpu_inst *inst, s64 timestamp) 364 { 365 return vpu_session_send_cmd(inst, VPU_CMD_ID_FRAME_ENCODE, ×tamp); 366 } 367 368 int vpu_session_alloc_fs(struct vpu_inst *inst, struct vpu_fs_info *fs) 369 { 370 return vpu_session_send_cmd(inst, VPU_CMD_ID_FS_ALLOC, fs); 371 } 372 373 int vpu_session_release_fs(struct vpu_inst *inst, struct vpu_fs_info *fs) 374 { 375 return vpu_session_send_cmd(inst, VPU_CMD_ID_FS_RELEASE, fs); 376 } 377 378 int vpu_session_abort(struct vpu_inst *inst) 379 { 380 return vpu_session_send_cmd(inst, VPU_CMD_ID_ABORT, NULL); 381 } 382 383 int vpu_session_rst_buf(struct vpu_inst *inst) 384 { 385 return vpu_session_send_cmd(inst, VPU_CMD_ID_RST_BUF, NULL); 386 } 387 388 int vpu_session_fill_timestamp(struct vpu_inst *inst, struct vpu_ts_info *info) 389 { 390 return vpu_session_send_cmd(inst, VPU_CMD_ID_TIMESTAMP, info); 391 } 392 393 int vpu_session_update_parameters(struct vpu_inst *inst, void *arg) 394 { 395 if (inst->type & VPU_CORE_TYPE_DEC) 396 vpu_iface_set_decode_params(inst, arg, 1); 397 else 398 vpu_iface_set_encode_params(inst, arg, 1); 399 400 return vpu_session_send_cmd(inst, VPU_CMD_ID_UPDATE_PARAMETER, arg); 401 } 402 403 int vpu_session_debug(struct vpu_inst *inst) 404 { 405 return vpu_session_send_cmd(inst, VPU_CMD_ID_DEBUG, NULL); 406 } 407 408 int vpu_core_snapshot(struct vpu_core *core) 409 { 410 struct vpu_inst *inst; 411 int ret; 412 413 if (!core || list_empty(&core->instances)) 414 return 0; 415 416 inst = list_first_entry(&core->instances, struct vpu_inst, list); 417 418 reinit_completion(&core->cmp); 419 ret = vpu_session_send_cmd(inst, VPU_CMD_ID_SNAPSHOT, NULL); 420 if (ret) 421 return ret; 422 ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT); 423 if (!ret) { 424 dev_err(core->dev, "snapshot timeout\n"); 425 return -EINVAL; 426 } 427 428 return 0; 429 } 430 431 int vpu_core_sw_reset(struct vpu_core *core) 432 { 433 struct vpu_rpc_event pkt; 434 int ret; 435 436 memset(&pkt, 0, sizeof(pkt)); 437 vpu_iface_pack_cmd(core, &pkt, 0, VPU_CMD_ID_FIRM_RESET, NULL); 438 439 reinit_completion(&core->cmp); 440 mutex_lock(&core->cmd_lock); 441 ret = vpu_cmd_send(core, &pkt); 442 mutex_unlock(&core->cmd_lock); 443 if (ret) 444 return ret; 445 ret = wait_for_completion_timeout(&core->cmp, VPU_TIMEOUT); 446 if (!ret) { 447 dev_err(core->dev, "sw reset timeout\n"); 448 return -EINVAL; 449 } 450 451 return 0; 452 } 453