Lines Matching full:vdev

26 static void ivpu_ipc_msg_dump(struct ivpu_device *vdev, char *c,  in ivpu_ipc_msg_dump()  argument
29 ivpu_dbg(vdev, IPC, in ivpu_ipc_msg_dump()
35 static void ivpu_jsm_msg_dump(struct ivpu_device *vdev, char *c, in ivpu_jsm_msg_dump() argument
40 ivpu_dbg(vdev, JSM, in ivpu_jsm_msg_dump()
48 ivpu_ipc_rx_mark_free(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr, in ivpu_ipc_rx_mark_free() argument
57 static void ivpu_ipc_mem_fini(struct ivpu_device *vdev) in ivpu_ipc_mem_fini() argument
59 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_mem_fini()
66 ivpu_ipc_tx_prepare(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, in ivpu_ipc_tx_prepare() argument
69 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_tx_prepare()
76 ivpu_err_ratelimited(vdev, "Failed to reserve IPC buffer, size %ld\n", in ivpu_ipc_tx_prepare()
82 if (drm_WARN_ON(&vdev->drm, !tx_buf)) { in ivpu_ipc_tx_prepare()
90 ivpu_warn_ratelimited(vdev, "IPC message vpu:0x%x not released by firmware\n", in ivpu_ipc_tx_prepare()
94 ivpu_warn_ratelimited(vdev, "JSM message vpu:0x%x not released by firmware\n", in ivpu_ipc_tx_prepare()
116 ivpu_jsm_msg_dump(vdev, "TX", &tx_buf->jsm, jsm_vpu_addr); in ivpu_ipc_tx_prepare()
117 ivpu_ipc_msg_dump(vdev, "TX", &tx_buf->ipc, tx_buf_vpu_addr); in ivpu_ipc_tx_prepare()
122 static void ivpu_ipc_tx_release(struct ivpu_device *vdev, u32 vpu_addr) in ivpu_ipc_tx_release() argument
124 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_tx_release()
130 static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr) in ivpu_ipc_tx() argument
132 ivpu_hw_ipc_tx_set(vdev, vpu_addr); in ivpu_ipc_tx()
136 ivpu_ipc_rx_msg_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, in ivpu_ipc_rx_msg_add() argument
139 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_rx_msg_add()
147 ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg); in ivpu_ipc_rx_msg_add()
168 ivpu_ipc_rx_msg_del(struct ivpu_device *vdev, struct ivpu_ipc_rx_msg *rx_msg) in ivpu_ipc_rx_msg_del() argument
171 ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg); in ivpu_ipc_rx_msg_del()
172 atomic_dec(&vdev->ipc->rx_msg_count); in ivpu_ipc_rx_msg_del()
176 void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, in ivpu_ipc_consumer_add() argument
179 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_consumer_add()
196 void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons) in ivpu_ipc_consumer_del() argument
198 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_consumer_del()
207 ivpu_ipc_rx_msg_del(vdev, rx_msg); in ivpu_ipc_consumer_del()
210 ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr); in ivpu_ipc_consumer_del()
213 int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req) in ivpu_ipc_send() argument
215 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_send()
225 ret = ivpu_ipc_tx_prepare(vdev, cons, req); in ivpu_ipc_send()
229 ivpu_ipc_tx(vdev, cons->tx_vpu_addr); in ivpu_ipc_send()
247 int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, in ivpu_ipc_receive() argument
254 if (drm_WARN_ONCE(&vdev->drm, cons->rx_callback, "Consumer works only in async mode\n")) in ivpu_ipc_receive()
281 ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n", rx_msg->jsm_msg->result); in ivpu_ipc_receive()
289 ivpu_ipc_rx_msg_del(vdev, rx_msg); in ivpu_ipc_receive()
295 ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req, in ivpu_ipc_send_receive_internal() argument
303 ivpu_ipc_consumer_add(vdev, &cons, channel, NULL); in ivpu_ipc_send_receive_internal()
305 ret = ivpu_ipc_send(vdev, &cons, req); in ivpu_ipc_send_receive_internal()
307 ivpu_warn_ratelimited(vdev, "IPC send failed: %d\n", ret); in ivpu_ipc_send_receive_internal()
311 ret = ivpu_ipc_receive(vdev, &cons, NULL, resp, timeout_ms); in ivpu_ipc_send_receive_internal()
313 ivpu_warn_ratelimited(vdev, "IPC receive failed: type %s, ret %d\n", in ivpu_ipc_send_receive_internal()
319 ivpu_warn_ratelimited(vdev, "Invalid JSM response type: 0x%x\n", resp->type); in ivpu_ipc_send_receive_internal()
324 ivpu_ipc_consumer_del(vdev, &cons); in ivpu_ipc_send_receive_internal()
328 int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req, in ivpu_ipc_send_receive_active() argument
336 drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev)); in ivpu_ipc_send_receive_active()
338 ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp, resp, channel, timeout_ms); in ivpu_ipc_send_receive_active()
342 hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, in ivpu_ipc_send_receive_active()
344 vdev->timeout.jsm); in ivpu_ipc_send_receive_active()
346 ivpu_pm_trigger_recovery(vdev, "IPC timeout"); in ivpu_ipc_send_receive_active()
351 int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req, in ivpu_ipc_send_receive() argument
357 ret = ivpu_rpm_get(vdev); in ivpu_ipc_send_receive()
361 ret = ivpu_ipc_send_receive_active(vdev, req, expected_resp, resp, channel, timeout_ms); in ivpu_ipc_send_receive()
363 ivpu_rpm_put(vdev); in ivpu_ipc_send_receive()
368 ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, in ivpu_ipc_match_consumer() argument
380 void ivpu_ipc_irq_handler(struct ivpu_device *vdev) in ivpu_ipc_irq_handler() argument
382 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_irq_handler()
394 while (ivpu_hw_ipc_rx_count_get(vdev)) { in ivpu_ipc_irq_handler()
395 vpu_addr = ivpu_hw_ipc_rx_addr_get(vdev); in ivpu_ipc_irq_handler()
397 ivpu_err_ratelimited(vdev, "Failed to read IPC rx addr register\n"); in ivpu_ipc_irq_handler()
403 ivpu_warn_ratelimited(vdev, "IPC msg 0x%x out of range\n", vpu_addr); in ivpu_ipc_irq_handler()
406 ivpu_ipc_msg_dump(vdev, "RX", ipc_hdr, vpu_addr); in ivpu_ipc_irq_handler()
412 ivpu_warn_ratelimited(vdev, "JSM msg 0x%x out of range\n", in ivpu_ipc_irq_handler()
414 ivpu_ipc_rx_mark_free(vdev, ipc_hdr, NULL); in ivpu_ipc_irq_handler()
417 ivpu_jsm_msg_dump(vdev, "RX", jsm_msg, ipc_hdr->data_addr); in ivpu_ipc_irq_handler()
421 ivpu_warn_ratelimited(vdev, "IPC RX msg dropped, msg count %d\n", in ivpu_ipc_irq_handler()
423 ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg); in ivpu_ipc_irq_handler()
430 if (ivpu_ipc_match_consumer(vdev, cons, ipc_hdr, jsm_msg)) { in ivpu_ipc_irq_handler()
431 ivpu_ipc_rx_msg_add(vdev, cons, ipc_hdr, jsm_msg); in ivpu_ipc_irq_handler()
439 ivpu_dbg(vdev, IPC, "IPC RX msg 0x%x dropped (no consumer)\n", vpu_addr); in ivpu_ipc_irq_handler()
440 ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg); in ivpu_ipc_irq_handler()
445 if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_IPC)) in ivpu_ipc_irq_handler()
446 ivpu_err_ratelimited(vdev, "IRQ FIFO full\n"); in ivpu_ipc_irq_handler()
449 void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev) in ivpu_ipc_irq_thread_handler() argument
451 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_irq_thread_handler()
462 rx_msg->callback(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg); in ivpu_ipc_irq_thread_handler()
463 ivpu_ipc_rx_msg_del(vdev, rx_msg); in ivpu_ipc_irq_thread_handler()
467 int ivpu_ipc_init(struct ivpu_device *vdev) in ivpu_ipc_init() argument
469 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_init()
472 ipc->mem_tx = ivpu_bo_create_global(vdev, SZ_16K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); in ivpu_ipc_init()
474 ivpu_err(vdev, "Failed to allocate mem_tx\n"); in ivpu_ipc_init()
478 ipc->mem_rx = ivpu_bo_create_global(vdev, SZ_16K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); in ivpu_ipc_init()
480 ivpu_err(vdev, "Failed to allocate mem_rx\n"); in ivpu_ipc_init()
485 ipc->mm_tx = devm_gen_pool_create(vdev->drm.dev, __ffs(IVPU_IPC_ALIGNMENT), in ivpu_ipc_init()
489 ivpu_err(vdev, "Failed to create gen pool, %pe\n", ipc->mm_tx); in ivpu_ipc_init()
495 ivpu_err(vdev, "gen_pool_add failed, ret %d\n", ret); in ivpu_ipc_init()
502 ret = drmm_mutex_init(&vdev->drm, &ipc->lock); in ivpu_ipc_init()
504 ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n", ret); in ivpu_ipc_init()
507 ivpu_ipc_reset(vdev); in ivpu_ipc_init()
517 void ivpu_ipc_fini(struct ivpu_device *vdev) in ivpu_ipc_fini() argument
519 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_fini()
521 drm_WARN_ON(&vdev->drm, ipc->on); in ivpu_ipc_fini()
522 drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cons_list)); in ivpu_ipc_fini()
523 drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list)); in ivpu_ipc_fini()
524 drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0); in ivpu_ipc_fini()
526 ivpu_ipc_mem_fini(vdev); in ivpu_ipc_fini()
529 void ivpu_ipc_enable(struct ivpu_device *vdev) in ivpu_ipc_enable() argument
531 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_enable()
538 void ivpu_ipc_disable(struct ivpu_device *vdev) in ivpu_ipc_disable() argument
540 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_disable()
544 drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list)); in ivpu_ipc_disable()
556 ivpu_ipc_rx_msg_del(vdev, rx_msg); in ivpu_ipc_disable()
562 drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0); in ivpu_ipc_disable()
565 void ivpu_ipc_reset(struct ivpu_device *vdev) in ivpu_ipc_reset() argument
567 struct ivpu_ipc_info *ipc = vdev->ipc; in ivpu_ipc_reset()
570 drm_WARN_ON(&vdev->drm, ipc->on); in ivpu_ipc_reset()