1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2020-2024 Intel Corporation 4 */ 5 6 #ifndef __IVPU_IPC_H__ 7 #define __IVPU_IPC_H__ 8 9 #include <linux/interrupt.h> 10 #include <linux/spinlock.h> 11 12 #include "vpu_jsm_api.h" 13 14 struct ivpu_bo; 15 16 /* VPU FW boot notification */ 17 #define IVPU_IPC_CHAN_BOOT_MSG 0x3ff 18 #define IVPU_IPC_BOOT_MSG_DATA_ADDR 0x424f4f54 19 20 /* The alignment to be used for IPC Buffers and IPC Data. */ 21 #define IVPU_IPC_ALIGNMENT 64 22 23 #define IVPU_IPC_HDR_FREE 0 24 #define IVPU_IPC_HDR_ALLOCATED 1 25 26 /** 27 * struct ivpu_ipc_hdr - The IPC message header structure, exchanged 28 * with the VPU device firmware. 29 * @data_addr: The VPU address of the payload (JSM message) 30 * @data_size: The size of the payload. 31 * @channel: The channel used. 32 * @src_node: The Node ID of the sender. 33 * @dst_node: The Node ID of the intended receiver. 34 * @status: IPC buffer usage status 35 */ 36 struct ivpu_ipc_hdr { 37 u32 data_addr; 38 u32 data_size; 39 u16 channel; 40 u8 src_node; 41 u8 dst_node; 42 u8 status; 43 } __packed __aligned(IVPU_IPC_ALIGNMENT); 44 45 typedef void (*ivpu_ipc_rx_callback_t)(struct ivpu_device *vdev, 46 struct ivpu_ipc_hdr *ipc_hdr, 47 struct vpu_jsm_msg *jsm_msg); 48 49 struct ivpu_ipc_rx_msg { 50 struct list_head link; 51 struct ivpu_ipc_hdr *ipc_hdr; 52 struct vpu_jsm_msg *jsm_msg; 53 ivpu_ipc_rx_callback_t callback; 54 }; 55 56 struct ivpu_ipc_consumer { 57 struct list_head link; 58 u32 channel; 59 u32 tx_vpu_addr; 60 u32 request_id; 61 bool aborted; 62 ivpu_ipc_rx_callback_t rx_callback; 63 64 spinlock_t rx_lock; /* Protects rx_msg_list and aborted */ 65 struct list_head rx_msg_list; 66 wait_queue_head_t rx_msg_wq; 67 }; 68 69 struct ivpu_ipc_info { 70 struct gen_pool *mm_tx; 71 struct ivpu_bo *mem_tx; 72 struct ivpu_bo *mem_rx; 73 74 atomic_t rx_msg_count; 75 76 spinlock_t cons_lock; /* Protects cons_list and cb_msg_list */ 77 struct list_head cons_list; 78 struct list_head cb_msg_list; 79 80 atomic_t request_id; 81 struct mutex lock; /* Lock on status */ 82 bool on; 83 }; 84 85 int ivpu_ipc_init(struct ivpu_device *vdev); 86 void ivpu_ipc_fini(struct ivpu_device *vdev); 87 88 void ivpu_ipc_enable(struct ivpu_device *vdev); 89 void ivpu_ipc_disable(struct ivpu_device *vdev); 90 void ivpu_ipc_reset(struct ivpu_device *vdev); 91 92 void ivpu_ipc_irq_handler(struct ivpu_device *vdev); 93 void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev); 94 95 void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, 96 u32 channel, ivpu_ipc_rx_callback_t callback); 97 void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons); 98 99 int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, 100 struct vpu_jsm_msg *req); 101 int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, 102 struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *jsm_msg, 103 unsigned long timeout_ms); 104 105 int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req, 106 enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp, 107 u32 channel, unsigned long timeout_ms); 108 int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req, 109 enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp, 110 u32 channel, unsigned long timeout_ms); 111 112 #endif /* __IVPU_IPC_H__ */ 113