1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2020-2021 NXP
4 */
5
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include "vpu.h"
13 #include "vpu_core.h"
14 #include "vpu_rpc.h"
15 #include "vpu_mbox.h"
16 #include "vpu_defs.h"
17 #include "vpu_cmds.h"
18 #include "vpu_msgs.h"
19 #include "vpu_v4l2.h"
20
21 #define VPU_PKT_HEADER_LENGTH 3
22
23 struct vpu_msg_handler {
24 u32 id;
25 void (*done)(struct vpu_inst *inst, struct vpu_rpc_event *pkt);
26 u32 is_str;
27 };
28
vpu_session_handle_start_done(struct vpu_inst * inst,struct vpu_rpc_event * pkt)29 static void vpu_session_handle_start_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
30 {
31 vpu_trace(inst->dev, "[%d]\n", inst->id);
32 }
33
vpu_session_handle_mem_request(struct vpu_inst * inst,struct vpu_rpc_event * pkt)34 static void vpu_session_handle_mem_request(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
35 {
36 struct vpu_pkt_mem_req_data req_data = { 0 };
37
38 vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&req_data);
39 vpu_trace(inst->dev, "[%d] %d:%d %d:%d %d:%d\n",
40 inst->id,
41 req_data.enc_frame_size,
42 req_data.enc_frame_num,
43 req_data.ref_frame_size,
44 req_data.ref_frame_num,
45 req_data.act_buf_size,
46 req_data.act_buf_num);
47 vpu_inst_lock(inst);
48 call_void_vop(inst, mem_request,
49 req_data.enc_frame_size,
50 req_data.enc_frame_num,
51 req_data.ref_frame_size,
52 req_data.ref_frame_num,
53 req_data.act_buf_size,
54 req_data.act_buf_num);
55 vpu_inst_unlock(inst);
56 }
57
vpu_session_handle_stop_done(struct vpu_inst * inst,struct vpu_rpc_event * pkt)58 static void vpu_session_handle_stop_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
59 {
60 vpu_trace(inst->dev, "[%d]\n", inst->id);
61
62 call_void_vop(inst, stop_done);
63 }
64
vpu_session_handle_seq_hdr(struct vpu_inst * inst,struct vpu_rpc_event * pkt)65 static void vpu_session_handle_seq_hdr(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
66 {
67 struct vpu_dec_codec_info info;
68 const struct vpu_core_resources *res;
69
70 memset(&info, 0, sizeof(info));
71 res = vpu_get_resource(inst);
72 info.stride = res ? res->stride : 1;
73 vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
74 call_void_vop(inst, event_notify, VPU_MSG_ID_SEQ_HDR_FOUND, &info);
75 }
76
vpu_session_handle_resolution_change(struct vpu_inst * inst,struct vpu_rpc_event * pkt)77 static void vpu_session_handle_resolution_change(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
78 {
79 call_void_vop(inst, event_notify, VPU_MSG_ID_RES_CHANGE, NULL);
80 }
81
vpu_session_handle_enc_frame_done(struct vpu_inst * inst,struct vpu_rpc_event * pkt)82 static void vpu_session_handle_enc_frame_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
83 {
84 struct vpu_enc_pic_info info = { 0 };
85
86 vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
87 dev_dbg(inst->dev, "[%d] frame id = %d, wptr = 0x%x, size = %d\n",
88 inst->id, info.frame_id, info.wptr, info.frame_size);
89 call_void_vop(inst, get_one_frame, &info);
90 }
91
vpu_session_handle_frame_request(struct vpu_inst * inst,struct vpu_rpc_event * pkt)92 static void vpu_session_handle_frame_request(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
93 {
94 struct vpu_fs_info fs = { 0 };
95
96 vpu_iface_unpack_msg_data(inst->core, pkt, &fs);
97 call_void_vop(inst, event_notify, VPU_MSG_ID_FRAME_REQ, &fs);
98 }
99
vpu_session_handle_frame_release(struct vpu_inst * inst,struct vpu_rpc_event * pkt)100 static void vpu_session_handle_frame_release(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
101 {
102 if (inst->core->type == VPU_CORE_TYPE_ENC) {
103 struct vpu_frame_info info;
104
105 memset(&info, 0, sizeof(info));
106 vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info.sequence);
107 dev_dbg(inst->dev, "[%d] %d\n", inst->id, info.sequence);
108 info.type = inst->out_format.type;
109 call_void_vop(inst, buf_done, &info);
110 } else if (inst->core->type == VPU_CORE_TYPE_DEC) {
111 struct vpu_fs_info fs = { 0 };
112
113 vpu_iface_unpack_msg_data(inst->core, pkt, &fs);
114 call_void_vop(inst, event_notify, VPU_MSG_ID_FRAME_RELEASE, &fs);
115 }
116 }
117
vpu_session_handle_input_done(struct vpu_inst * inst,struct vpu_rpc_event * pkt)118 static void vpu_session_handle_input_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
119 {
120 dev_dbg(inst->dev, "[%d]\n", inst->id);
121 call_void_vop(inst, input_done);
122 }
123
vpu_session_handle_pic_decoded(struct vpu_inst * inst,struct vpu_rpc_event * pkt)124 static void vpu_session_handle_pic_decoded(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
125 {
126 struct vpu_dec_pic_info info = { 0 };
127
128 vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
129 call_void_vop(inst, get_one_frame, &info);
130 }
131
vpu_session_handle_pic_done(struct vpu_inst * inst,struct vpu_rpc_event * pkt)132 static void vpu_session_handle_pic_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
133 {
134 struct vpu_dec_pic_info info = { 0 };
135 struct vpu_frame_info frame;
136
137 memset(&frame, 0, sizeof(frame));
138 vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
139 if (inst->core->type == VPU_CORE_TYPE_DEC)
140 frame.type = inst->cap_format.type;
141 frame.id = info.id;
142 frame.luma = info.luma;
143 frame.skipped = info.skipped;
144 frame.timestamp = info.timestamp;
145
146 call_void_vop(inst, buf_done, &frame);
147 }
148
vpu_session_handle_eos(struct vpu_inst * inst,struct vpu_rpc_event * pkt)149 static void vpu_session_handle_eos(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
150 {
151 call_void_vop(inst, event_notify, VPU_MSG_ID_PIC_EOS, NULL);
152 }
153
vpu_session_handle_error(struct vpu_inst * inst,struct vpu_rpc_event * pkt)154 static void vpu_session_handle_error(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
155 {
156 char *str = (char *)pkt->data;
157
158 if (*str)
159 dev_err(inst->dev, "instance %d firmware error : %s\n", inst->id, str);
160 else
161 dev_err(inst->dev, "instance %d is unsupported stream\n", inst->id);
162 call_void_vop(inst, event_notify, VPU_MSG_ID_UNSUPPORTED, NULL);
163 vpu_v4l2_set_error(inst);
164 }
165
vpu_session_handle_firmware_xcpt(struct vpu_inst * inst,struct vpu_rpc_event * pkt)166 static void vpu_session_handle_firmware_xcpt(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
167 {
168 char *str = (char *)pkt->data;
169
170 dev_err(inst->dev, "%s firmware xcpt: %s\n",
171 vpu_core_type_desc(inst->core->type), str);
172 call_void_vop(inst, event_notify, VPU_MSG_ID_FIRMWARE_XCPT, NULL);
173 set_bit(inst->id, &inst->core->hang_mask);
174 vpu_v4l2_set_error(inst);
175 }
176
vpu_session_handle_pic_skipped(struct vpu_inst * inst,struct vpu_rpc_event * pkt)177 static void vpu_session_handle_pic_skipped(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
178 {
179 vpu_inst_lock(inst);
180 vpu_skip_frame(inst, 1);
181 vpu_inst_unlock(inst);
182 }
183
vpu_session_handle_dbg_msg(struct vpu_inst * inst,struct vpu_rpc_event * pkt)184 static void vpu_session_handle_dbg_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
185 {
186 char *str = (char *)pkt->data;
187
188 if (*str)
189 dev_info(inst->dev, "instance %d firmware dbg msg : %s\n", inst->id, str);
190 }
191
vpu_terminate_string_msg(struct vpu_rpc_event * pkt)192 static void vpu_terminate_string_msg(struct vpu_rpc_event *pkt)
193 {
194 if (pkt->hdr.num == ARRAY_SIZE(pkt->data))
195 pkt->hdr.num--;
196 pkt->data[pkt->hdr.num] = 0;
197 }
198
199 static struct vpu_msg_handler handlers[] = {
200 {VPU_MSG_ID_START_DONE, vpu_session_handle_start_done},
201 {VPU_MSG_ID_STOP_DONE, vpu_session_handle_stop_done},
202 {VPU_MSG_ID_MEM_REQUEST, vpu_session_handle_mem_request},
203 {VPU_MSG_ID_SEQ_HDR_FOUND, vpu_session_handle_seq_hdr},
204 {VPU_MSG_ID_RES_CHANGE, vpu_session_handle_resolution_change},
205 {VPU_MSG_ID_FRAME_INPUT_DONE, vpu_session_handle_input_done},
206 {VPU_MSG_ID_FRAME_REQ, vpu_session_handle_frame_request},
207 {VPU_MSG_ID_FRAME_RELEASE, vpu_session_handle_frame_release},
208 {VPU_MSG_ID_ENC_DONE, vpu_session_handle_enc_frame_done},
209 {VPU_MSG_ID_PIC_DECODED, vpu_session_handle_pic_decoded},
210 {VPU_MSG_ID_DEC_DONE, vpu_session_handle_pic_done},
211 {VPU_MSG_ID_PIC_EOS, vpu_session_handle_eos},
212 {VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error, true},
213 {VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt, true},
214 {VPU_MSG_ID_PIC_SKIPPED, vpu_session_handle_pic_skipped},
215 {VPU_MSG_ID_DBG_MSG, vpu_session_handle_dbg_msg, true},
216 };
217
vpu_session_handle_msg(struct vpu_inst * inst,struct vpu_rpc_event * msg)218 static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *msg)
219 {
220 int ret;
221 u32 msg_id;
222 struct vpu_msg_handler *handler = NULL;
223 unsigned int i;
224
225 ret = vpu_iface_convert_msg_id(inst->core, msg->hdr.id);
226 if (ret < 0)
227 return -EINVAL;
228
229 msg_id = ret;
230 dev_dbg(inst->dev, "[%d] receive event(%s)\n", inst->id, vpu_id_name(msg_id));
231
232 for (i = 0; i < ARRAY_SIZE(handlers); i++) {
233 if (handlers[i].id == msg_id) {
234 handler = &handlers[i];
235 break;
236 }
237 }
238
239 if (handler) {
240 if (handler->is_str)
241 vpu_terminate_string_msg(msg);
242 if (handler->done)
243 handler->done(inst, msg);
244 }
245
246 vpu_response_cmd(inst, msg_id, 1);
247
248 return 0;
249 }
250
vpu_inst_receive_msg(struct vpu_inst * inst,struct vpu_rpc_event * pkt)251 static bool vpu_inst_receive_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
252 {
253 unsigned long bytes = sizeof(struct vpu_rpc_event_header);
254 u32 ret;
255
256 memset(pkt, 0, sizeof(*pkt));
257 if (kfifo_len(&inst->msg_fifo) < bytes)
258 return false;
259
260 ret = kfifo_out(&inst->msg_fifo, pkt, bytes);
261 if (ret != bytes)
262 return false;
263
264 if (pkt->hdr.num > 0) {
265 bytes = pkt->hdr.num * sizeof(u32);
266 ret = kfifo_out(&inst->msg_fifo, pkt->data, bytes);
267 if (ret != bytes)
268 return false;
269 }
270
271 return true;
272 }
273
vpu_inst_run_work(struct work_struct * work)274 void vpu_inst_run_work(struct work_struct *work)
275 {
276 struct vpu_inst *inst = container_of(work, struct vpu_inst, msg_work);
277 struct vpu_rpc_event pkt;
278
279 while (vpu_inst_receive_msg(inst, &pkt))
280 vpu_session_handle_msg(inst, &pkt);
281 }
282
vpu_inst_handle_msg(struct vpu_inst * inst,struct vpu_rpc_event * pkt)283 static void vpu_inst_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
284 {
285 unsigned long bytes;
286 u32 id = pkt->hdr.id;
287 int ret;
288
289 if (!inst->workqueue)
290 return;
291
292 bytes = sizeof(pkt->hdr) + pkt->hdr.num * sizeof(u32);
293 ret = kfifo_in(&inst->msg_fifo, pkt, bytes);
294 if (ret != bytes)
295 dev_err(inst->dev, "[%d:%d]overflow: %d\n", inst->core->id, inst->id, id);
296 queue_work(inst->workqueue, &inst->msg_work);
297 }
298
vpu_handle_msg(struct vpu_core * core)299 static int vpu_handle_msg(struct vpu_core *core)
300 {
301 struct vpu_rpc_event pkt;
302 struct vpu_inst *inst;
303 int ret;
304
305 memset(&pkt, 0, sizeof(pkt));
306 while (!vpu_iface_receive_msg(core, &pkt)) {
307 dev_dbg(core->dev, "event index = %d, id = %d, num = %d\n",
308 pkt.hdr.index, pkt.hdr.id, pkt.hdr.num);
309
310 ret = vpu_iface_convert_msg_id(core, pkt.hdr.id);
311 if (ret < 0)
312 continue;
313
314 inst = vpu_core_find_instance(core, pkt.hdr.index);
315 if (inst) {
316 vpu_response_cmd(inst, ret, 0);
317 mutex_lock(&core->cmd_lock);
318 vpu_inst_record_flow(inst, ret);
319 mutex_unlock(&core->cmd_lock);
320
321 vpu_inst_handle_msg(inst, &pkt);
322 vpu_inst_put(inst);
323 }
324 memset(&pkt, 0, sizeof(pkt));
325 }
326
327 return 0;
328 }
329
vpu_isr_thread(struct vpu_core * core,u32 irq_code)330 static int vpu_isr_thread(struct vpu_core *core, u32 irq_code)
331 {
332 dev_dbg(core->dev, "irq code = 0x%x\n", irq_code);
333 switch (irq_code) {
334 case VPU_IRQ_CODE_SYNC:
335 vpu_mbox_send_msg(core, PRC_BUF_OFFSET, core->rpc.phys - core->fw.phys);
336 vpu_mbox_send_msg(core, BOOT_ADDRESS, core->fw.phys);
337 vpu_mbox_send_msg(core, INIT_DONE, 2);
338 break;
339 case VPU_IRQ_CODE_BOOT_DONE:
340 break;
341 case VPU_IRQ_CODE_SNAPSHOT_DONE:
342 break;
343 default:
344 vpu_handle_msg(core);
345 break;
346 }
347
348 return 0;
349 }
350
vpu_core_run_msg_work(struct vpu_core * core)351 static void vpu_core_run_msg_work(struct vpu_core *core)
352 {
353 const unsigned int SIZE = sizeof(u32);
354
355 while (kfifo_len(&core->msg_fifo) >= SIZE) {
356 u32 data = 0;
357
358 if (kfifo_out(&core->msg_fifo, &data, SIZE) == SIZE)
359 vpu_isr_thread(core, data);
360 }
361 }
362
vpu_msg_run_work(struct work_struct * work)363 void vpu_msg_run_work(struct work_struct *work)
364 {
365 struct vpu_core *core = container_of(work, struct vpu_core, msg_work);
366 unsigned long delay = msecs_to_jiffies(10);
367
368 vpu_core_run_msg_work(core);
369 queue_delayed_work(core->workqueue, &core->msg_delayed_work, delay);
370 }
371
vpu_msg_delayed_work(struct work_struct * work)372 void vpu_msg_delayed_work(struct work_struct *work)
373 {
374 struct vpu_core *core;
375 struct delayed_work *dwork;
376 unsigned long bytes = sizeof(u32);
377 u32 i;
378
379 if (!work)
380 return;
381
382 dwork = to_delayed_work(work);
383 core = container_of(dwork, struct vpu_core, msg_delayed_work);
384 if (kfifo_len(&core->msg_fifo) >= bytes)
385 vpu_core_run_msg_work(core);
386
387 bytes = sizeof(struct vpu_rpc_event_header);
388 for (i = 0; i < core->supported_instance_count; i++) {
389 struct vpu_inst *inst = vpu_core_find_instance(core, i);
390
391 if (!inst)
392 continue;
393
394 if (inst->workqueue && kfifo_len(&inst->msg_fifo) >= bytes)
395 queue_work(inst->workqueue, &inst->msg_work);
396
397 vpu_inst_put(inst);
398 }
399 }
400
vpu_isr(struct vpu_core * core,u32 irq)401 int vpu_isr(struct vpu_core *core, u32 irq)
402 {
403 switch (irq) {
404 case VPU_IRQ_CODE_SYNC:
405 break;
406 case VPU_IRQ_CODE_BOOT_DONE:
407 complete(&core->cmp);
408 break;
409 case VPU_IRQ_CODE_SNAPSHOT_DONE:
410 complete(&core->cmp);
411 break;
412 default:
413 break;
414 }
415
416 if (kfifo_in(&core->msg_fifo, &irq, sizeof(irq)) != sizeof(irq))
417 dev_err(core->dev, "[%d]overflow: %d\n", core->id, irq);
418 queue_work(core->workqueue, &core->msg_work);
419
420 return 0;
421 }
422