xref: /linux/drivers/media/platform/amphion/vpu_msgs.c (revision 34f7c6e7d4396090692a09789db231e12cb4762b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2020-2021 NXP
4  */
5 
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include "vpu.h"
13 #include "vpu_core.h"
14 #include "vpu_rpc.h"
15 #include "vpu_mbox.h"
16 #include "vpu_defs.h"
17 #include "vpu_cmds.h"
18 #include "vpu_msgs.h"
19 #include "vpu_v4l2.h"
20 
21 #define VPU_PKT_HEADER_LENGTH		3
22 
23 struct vpu_msg_handler {
24 	u32 id;
25 	void (*done)(struct vpu_inst *inst, struct vpu_rpc_event *pkt);
26 };
27 
28 static void vpu_session_handle_start_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
29 {
30 	vpu_trace(inst->dev, "[%d]\n", inst->id);
31 }
32 
33 static void vpu_session_handle_mem_request(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
34 {
35 	struct vpu_pkt_mem_req_data req_data;
36 
37 	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&req_data);
38 	vpu_trace(inst->dev, "[%d] %d:%d %d:%d %d:%d\n",
39 		  inst->id,
40 		  req_data.enc_frame_size,
41 		  req_data.enc_frame_num,
42 		  req_data.ref_frame_size,
43 		  req_data.ref_frame_num,
44 		  req_data.act_buf_size,
45 		  req_data.act_buf_num);
46 	call_void_vop(inst, mem_request,
47 		      req_data.enc_frame_size,
48 		      req_data.enc_frame_num,
49 		      req_data.ref_frame_size,
50 		      req_data.ref_frame_num,
51 		      req_data.act_buf_size,
52 		      req_data.act_buf_num);
53 }
54 
55 static void vpu_session_handle_stop_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
56 {
57 	vpu_trace(inst->dev, "[%d]\n", inst->id);
58 
59 	call_void_vop(inst, stop_done);
60 }
61 
62 static void vpu_session_handle_seq_hdr(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
63 {
64 	struct vpu_dec_codec_info info;
65 	const struct vpu_core_resources *res;
66 
67 	memset(&info, 0, sizeof(info));
68 	res = vpu_get_resource(inst);
69 	info.stride = res ? res->stride : 1;
70 	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
71 	call_void_vop(inst, event_notify, VPU_MSG_ID_SEQ_HDR_FOUND, &info);
72 }
73 
74 static void vpu_session_handle_resolution_change(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
75 {
76 	call_void_vop(inst, event_notify, VPU_MSG_ID_RES_CHANGE, NULL);
77 }
78 
79 static void vpu_session_handle_enc_frame_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
80 {
81 	struct vpu_enc_pic_info info;
82 
83 	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
84 	dev_dbg(inst->dev, "[%d] frame id = %d, wptr = 0x%x, size = %d\n",
85 		inst->id, info.frame_id, info.wptr, info.frame_size);
86 	call_void_vop(inst, get_one_frame, &info);
87 }
88 
89 static void vpu_session_handle_frame_request(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
90 {
91 	struct vpu_fs_info fs;
92 
93 	vpu_iface_unpack_msg_data(inst->core, pkt, &fs);
94 	call_void_vop(inst, event_notify, VPU_MSG_ID_FRAME_REQ, &fs);
95 }
96 
97 static void vpu_session_handle_frame_release(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
98 {
99 	if (inst->core->type == VPU_CORE_TYPE_ENC) {
100 		struct vpu_frame_info info;
101 
102 		memset(&info, 0, sizeof(info));
103 		vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info.sequence);
104 		dev_dbg(inst->dev, "[%d] %d\n", inst->id, info.sequence);
105 		info.type = inst->out_format.type;
106 		call_void_vop(inst, buf_done, &info);
107 	} else if (inst->core->type == VPU_CORE_TYPE_DEC) {
108 		struct vpu_fs_info fs;
109 
110 		vpu_iface_unpack_msg_data(inst->core, pkt, &fs);
111 		call_void_vop(inst, event_notify, VPU_MSG_ID_FRAME_RELEASE, &fs);
112 	}
113 }
114 
115 static void vpu_session_handle_input_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
116 {
117 	dev_dbg(inst->dev, "[%d]\n", inst->id);
118 	call_void_vop(inst, input_done);
119 }
120 
121 static void vpu_session_handle_pic_decoded(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
122 {
123 	struct vpu_dec_pic_info info;
124 
125 	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
126 	call_void_vop(inst, get_one_frame, &info);
127 }
128 
129 static void vpu_session_handle_pic_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
130 {
131 	struct vpu_dec_pic_info info;
132 	struct vpu_frame_info frame;
133 
134 	memset(&frame, 0, sizeof(frame));
135 	vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
136 	if (inst->core->type == VPU_CORE_TYPE_DEC)
137 		frame.type = inst->cap_format.type;
138 	frame.id = info.id;
139 	frame.luma = info.luma;
140 	frame.skipped = info.skipped;
141 	frame.timestamp = info.timestamp;
142 
143 	call_void_vop(inst, buf_done, &frame);
144 }
145 
146 static void vpu_session_handle_eos(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
147 {
148 	call_void_vop(inst, event_notify, VPU_MSG_ID_PIC_EOS, NULL);
149 }
150 
151 static void vpu_session_handle_error(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
152 {
153 	dev_err(inst->dev, "unsupported stream\n");
154 	call_void_vop(inst, event_notify, VPU_MSG_ID_UNSUPPORTED, NULL);
155 	vpu_v4l2_set_error(inst);
156 }
157 
158 static void vpu_session_handle_firmware_xcpt(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
159 {
160 	char *str = (char *)pkt->data;
161 
162 	dev_err(inst->dev, "%s firmware xcpt: %s\n",
163 		vpu_core_type_desc(inst->core->type), str);
164 	call_void_vop(inst, event_notify, VPU_MSG_ID_FIRMWARE_XCPT, NULL);
165 	set_bit(inst->id, &inst->core->hang_mask);
166 	vpu_v4l2_set_error(inst);
167 }
168 
169 static struct vpu_msg_handler handlers[] = {
170 	{VPU_MSG_ID_START_DONE, vpu_session_handle_start_done},
171 	{VPU_MSG_ID_STOP_DONE, vpu_session_handle_stop_done},
172 	{VPU_MSG_ID_MEM_REQUEST, vpu_session_handle_mem_request},
173 	{VPU_MSG_ID_SEQ_HDR_FOUND, vpu_session_handle_seq_hdr},
174 	{VPU_MSG_ID_RES_CHANGE, vpu_session_handle_resolution_change},
175 	{VPU_MSG_ID_FRAME_INPUT_DONE, vpu_session_handle_input_done},
176 	{VPU_MSG_ID_FRAME_REQ, vpu_session_handle_frame_request},
177 	{VPU_MSG_ID_FRAME_RELEASE, vpu_session_handle_frame_release},
178 	{VPU_MSG_ID_ENC_DONE, vpu_session_handle_enc_frame_done},
179 	{VPU_MSG_ID_PIC_DECODED, vpu_session_handle_pic_decoded},
180 	{VPU_MSG_ID_DEC_DONE, vpu_session_handle_pic_done},
181 	{VPU_MSG_ID_PIC_EOS, vpu_session_handle_eos},
182 	{VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error},
183 	{VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt},
184 };
185 
186 static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *msg)
187 {
188 	int ret;
189 	u32 msg_id;
190 	struct vpu_msg_handler *handler = NULL;
191 	unsigned int i;
192 
193 	ret = vpu_iface_convert_msg_id(inst->core, msg->hdr.id);
194 	if (ret < 0)
195 		return -EINVAL;
196 
197 	msg_id = ret;
198 	dev_dbg(inst->dev, "[%d] receive event(0x%x)\n", inst->id, msg_id);
199 
200 	for (i = 0; i < ARRAY_SIZE(handlers); i++) {
201 		if (handlers[i].id == msg_id) {
202 			handler = &handlers[i];
203 			break;
204 		}
205 	}
206 
207 	if (handler && handler->done)
208 		handler->done(inst, msg);
209 
210 	vpu_response_cmd(inst, msg_id, 1);
211 
212 	return 0;
213 }
214 
215 static bool vpu_inst_receive_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
216 {
217 	unsigned long bytes = sizeof(struct vpu_rpc_event_header);
218 	u32 ret;
219 
220 	memset(pkt, 0, sizeof(*pkt));
221 	if (kfifo_len(&inst->msg_fifo) < bytes)
222 		return false;
223 
224 	ret = kfifo_out(&inst->msg_fifo, pkt, bytes);
225 	if (ret != bytes)
226 		return false;
227 
228 	if (pkt->hdr.num > 0) {
229 		bytes = pkt->hdr.num * sizeof(u32);
230 		ret = kfifo_out(&inst->msg_fifo, pkt->data, bytes);
231 		if (ret != bytes)
232 			return false;
233 	}
234 
235 	return true;
236 }
237 
238 void vpu_inst_run_work(struct work_struct *work)
239 {
240 	struct vpu_inst *inst = container_of(work, struct vpu_inst, msg_work);
241 	struct vpu_rpc_event pkt;
242 
243 	while (vpu_inst_receive_msg(inst, &pkt))
244 		vpu_session_handle_msg(inst, &pkt);
245 }
246 
247 static void vpu_inst_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
248 {
249 	unsigned long bytes;
250 	u32 id = pkt->hdr.id;
251 	int ret;
252 
253 	if (!inst->workqueue)
254 		return;
255 
256 	bytes = sizeof(pkt->hdr) + pkt->hdr.num * sizeof(u32);
257 	ret = kfifo_in(&inst->msg_fifo, pkt, bytes);
258 	if (ret != bytes)
259 		dev_err(inst->dev, "[%d:%d]overflow: %d\n", inst->core->id, inst->id, id);
260 	queue_work(inst->workqueue, &inst->msg_work);
261 }
262 
263 static int vpu_handle_msg(struct vpu_core *core)
264 {
265 	struct vpu_rpc_event pkt;
266 	struct vpu_inst *inst;
267 	int ret;
268 
269 	memset(&pkt, 0, sizeof(pkt));
270 	while (!vpu_iface_receive_msg(core, &pkt)) {
271 		dev_dbg(core->dev, "event index = %d, id = %d, num = %d\n",
272 			pkt.hdr.index, pkt.hdr.id, pkt.hdr.num);
273 
274 		ret = vpu_iface_convert_msg_id(core, pkt.hdr.id);
275 		if (ret < 0)
276 			continue;
277 
278 		inst = vpu_core_find_instance(core, pkt.hdr.index);
279 		if (inst) {
280 			vpu_response_cmd(inst, ret, 0);
281 			mutex_lock(&core->cmd_lock);
282 			vpu_inst_record_flow(inst, ret);
283 			mutex_unlock(&core->cmd_lock);
284 
285 			vpu_inst_handle_msg(inst, &pkt);
286 			vpu_inst_put(inst);
287 		}
288 		memset(&pkt, 0, sizeof(pkt));
289 	}
290 
291 	return 0;
292 }
293 
294 static int vpu_isr_thread(struct vpu_core *core, u32 irq_code)
295 {
296 	dev_dbg(core->dev, "irq code = 0x%x\n", irq_code);
297 	switch (irq_code) {
298 	case VPU_IRQ_CODE_SYNC:
299 		vpu_mbox_send_msg(core, PRC_BUF_OFFSET, core->rpc.phys - core->fw.phys);
300 		vpu_mbox_send_msg(core, BOOT_ADDRESS, core->fw.phys);
301 		vpu_mbox_send_msg(core, INIT_DONE, 2);
302 		break;
303 	case VPU_IRQ_CODE_BOOT_DONE:
304 		break;
305 	case VPU_IRQ_CODE_SNAPSHOT_DONE:
306 		break;
307 	default:
308 		vpu_handle_msg(core);
309 		break;
310 	}
311 
312 	return 0;
313 }
314 
315 static void vpu_core_run_msg_work(struct vpu_core *core)
316 {
317 	const unsigned int SIZE = sizeof(u32);
318 
319 	while (kfifo_len(&core->msg_fifo) >= SIZE) {
320 		u32 data = 0;
321 
322 		if (kfifo_out(&core->msg_fifo, &data, SIZE) == SIZE)
323 			vpu_isr_thread(core, data);
324 	}
325 }
326 
327 void vpu_msg_run_work(struct work_struct *work)
328 {
329 	struct vpu_core *core = container_of(work, struct vpu_core, msg_work);
330 	unsigned long delay = msecs_to_jiffies(10);
331 
332 	vpu_core_run_msg_work(core);
333 	queue_delayed_work(core->workqueue, &core->msg_delayed_work, delay);
334 }
335 
336 void vpu_msg_delayed_work(struct work_struct *work)
337 {
338 	struct vpu_core *core;
339 	struct delayed_work *dwork;
340 	unsigned long bytes = sizeof(u32);
341 	u32 i;
342 
343 	if (!work)
344 		return;
345 
346 	dwork = to_delayed_work(work);
347 	core = container_of(dwork, struct vpu_core, msg_delayed_work);
348 	if (kfifo_len(&core->msg_fifo) >= bytes)
349 		vpu_core_run_msg_work(core);
350 
351 	bytes = sizeof(struct vpu_rpc_event_header);
352 	for (i = 0; i < core->supported_instance_count; i++) {
353 		struct vpu_inst *inst = vpu_core_find_instance(core, i);
354 
355 		if (!inst)
356 			continue;
357 
358 		if (inst->workqueue && kfifo_len(&inst->msg_fifo) >= bytes)
359 			queue_work(inst->workqueue, &inst->msg_work);
360 
361 		vpu_inst_put(inst);
362 	}
363 }
364 
365 int vpu_isr(struct vpu_core *core, u32 irq)
366 {
367 	switch (irq) {
368 	case VPU_IRQ_CODE_SYNC:
369 		break;
370 	case VPU_IRQ_CODE_BOOT_DONE:
371 		complete(&core->cmp);
372 		break;
373 	case VPU_IRQ_CODE_SNAPSHOT_DONE:
374 		complete(&core->cmp);
375 		break;
376 	default:
377 		break;
378 	}
379 
380 	if (kfifo_in(&core->msg_fifo, &irq, sizeof(irq)) != sizeof(irq))
381 		dev_err(core->dev, "[%d]overflow: %d\n", core->id, irq);
382 	queue_work(core->workqueue, &core->msg_work);
383 
384 	return 0;
385 }
386