xref: /linux/drivers/media/platform/amphion/vpu_helpers.c (revision 3d0fe49454652117522f60bfbefb978ba0e5300b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2020-2021 NXP
4  */
5 
6 #include <linux/init.h>
7 #include <linux/interconnect.h>
8 #include <linux/ioctl.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include "vpu.h"
14 #include "vpu_defs.h"
15 #include "vpu_core.h"
16 #include "vpu_rpc.h"
17 #include "vpu_helpers.h"
18 
19 int vpu_helper_find_in_array_u8(const u8 *array, u32 size, u32 x)
20 {
21 	int i;
22 
23 	for (i = 0; i < size; i++) {
24 		if (array[i] == x)
25 			return i;
26 	}
27 
28 	return 0;
29 }
30 
31 bool vpu_helper_check_type(struct vpu_inst *inst, u32 type)
32 {
33 	const struct vpu_format *pfmt;
34 
35 	for (pfmt = inst->formats; pfmt->pixfmt; pfmt++) {
36 		if (!vpu_iface_check_format(inst, pfmt->pixfmt))
37 			continue;
38 		if (pfmt->type == type)
39 			return true;
40 	}
41 
42 	return false;
43 }
44 
45 const struct vpu_format *vpu_helper_find_format(struct vpu_inst *inst, u32 type, u32 pixelfmt)
46 {
47 	const struct vpu_format *pfmt;
48 
49 	if (!inst || !inst->formats)
50 		return NULL;
51 
52 	if (!vpu_iface_check_format(inst, pixelfmt))
53 		return NULL;
54 
55 	for (pfmt = inst->formats; pfmt->pixfmt; pfmt++) {
56 		if (pfmt->pixfmt == pixelfmt && (!type || type == pfmt->type))
57 			return pfmt;
58 	}
59 
60 	return NULL;
61 }
62 
63 const struct vpu_format *vpu_helper_find_sibling(struct vpu_inst *inst, u32 type, u32 pixelfmt)
64 {
65 	const struct vpu_format *fmt;
66 	const struct vpu_format *sibling;
67 
68 	fmt = vpu_helper_find_format(inst, type, pixelfmt);
69 	if (!fmt || !fmt->sibling)
70 		return NULL;
71 
72 	sibling = vpu_helper_find_format(inst, type, fmt->sibling);
73 	if (!sibling || sibling->sibling != fmt->pixfmt ||
74 	    sibling->comp_planes != fmt->comp_planes)
75 		return NULL;
76 
77 	return sibling;
78 }
79 
80 bool vpu_helper_match_format(struct vpu_inst *inst, u32 type, u32 fmta, u32 fmtb)
81 {
82 	const struct vpu_format *sibling;
83 
84 	if (fmta == fmtb)
85 		return true;
86 
87 	sibling = vpu_helper_find_sibling(inst, type, fmta);
88 	if (sibling && sibling->pixfmt == fmtb)
89 		return true;
90 	return false;
91 }
92 
93 const struct vpu_format *vpu_helper_enum_format(struct vpu_inst *inst, u32 type, int index)
94 {
95 	const struct vpu_format *pfmt;
96 	int i = 0;
97 
98 	if (!inst || !inst->formats)
99 		return NULL;
100 
101 	for (pfmt = inst->formats; pfmt->pixfmt; pfmt++) {
102 		if (!vpu_iface_check_format(inst, pfmt->pixfmt))
103 			continue;
104 
105 		if (pfmt->type == type) {
106 			if (index == i)
107 				return pfmt;
108 			i++;
109 		}
110 	}
111 
112 	return NULL;
113 }
114 
115 u32 vpu_helper_valid_frame_width(struct vpu_inst *inst, u32 width)
116 {
117 	const struct vpu_core_resources *res;
118 
119 	if (!inst)
120 		return width;
121 
122 	res = vpu_get_resource(inst);
123 	if (!res)
124 		return width;
125 	if (res->max_width)
126 		width = clamp(width, res->min_width, res->max_width);
127 	if (res->step_width)
128 		width = ALIGN(width, res->step_width);
129 
130 	return width;
131 }
132 
133 u32 vpu_helper_valid_frame_height(struct vpu_inst *inst, u32 height)
134 {
135 	const struct vpu_core_resources *res;
136 
137 	if (!inst)
138 		return height;
139 
140 	res = vpu_get_resource(inst);
141 	if (!res)
142 		return height;
143 	if (res->max_height)
144 		height = clamp(height, res->min_height, res->max_height);
145 	if (res->step_height)
146 		height = ALIGN(height, res->step_height);
147 
148 	return height;
149 }
150 
151 static u32 get_nv12_plane_size(u32 width, u32 height, int plane_no,
152 			       u32 stride, u32 interlaced, u32 *pbl)
153 {
154 	u32 bytesperline;
155 	u32 size = 0;
156 
157 	bytesperline = width;
158 	if (pbl)
159 		bytesperline = max(bytesperline, *pbl);
160 	bytesperline = ALIGN(bytesperline, stride);
161 	height = ALIGN(height, 2);
162 	if (plane_no == 0)
163 		size = bytesperline * height;
164 	else if (plane_no == 1)
165 		size = bytesperline * height >> 1;
166 	if (pbl)
167 		*pbl = bytesperline;
168 
169 	return size;
170 }
171 
172 static u32 get_tiled_8l128_plane_size(u32 fmt, u32 width, u32 height, int plane_no,
173 				      u32 stride, u32 interlaced, u32 *pbl)
174 {
175 	u32 ws = 3;
176 	u32 hs = 7;
177 	u32 bitdepth = 8;
178 	u32 bytesperline;
179 	u32 size = 0;
180 
181 	if (interlaced)
182 		hs++;
183 	if (fmt == V4L2_PIX_FMT_NV12M_10BE_8L128 || fmt == V4L2_PIX_FMT_NV12_10BE_8L128)
184 		bitdepth = 10;
185 	bytesperline = DIV_ROUND_UP(width * bitdepth, BITS_PER_BYTE);
186 	if (pbl)
187 		bytesperline = max(bytesperline, *pbl);
188 	bytesperline = ALIGN(bytesperline, 1 << ws);
189 	bytesperline = ALIGN(bytesperline, stride);
190 	height = ALIGN(height, 1 << hs);
191 	if (plane_no == 0)
192 		size = bytesperline * height;
193 	else if (plane_no == 1)
194 		size = (bytesperline * ALIGN(height, 1 << (hs + 1))) >> 1;
195 	if (pbl)
196 		*pbl = bytesperline;
197 
198 	return size;
199 }
200 
201 static u32 get_default_plane_size(u32 width, u32 height, int plane_no,
202 				  u32 stride, u32 interlaced, u32 *pbl)
203 {
204 	u32 bytesperline;
205 	u32 size = 0;
206 
207 	bytesperline = width;
208 	if (pbl)
209 		bytesperline = max(bytesperline, *pbl);
210 	bytesperline = ALIGN(bytesperline, stride);
211 	if (plane_no == 0)
212 		size = bytesperline * height;
213 	if (pbl)
214 		*pbl = bytesperline;
215 
216 	return size;
217 }
218 
219 u32 vpu_helper_get_plane_size(u32 fmt, u32 w, u32 h, int plane_no,
220 			      u32 stride, u32 interlaced, u32 *pbl)
221 {
222 	switch (fmt) {
223 	case V4L2_PIX_FMT_NV12:
224 	case V4L2_PIX_FMT_NV12M:
225 		return get_nv12_plane_size(w, h, plane_no, stride, interlaced, pbl);
226 	case V4L2_PIX_FMT_NV12_8L128:
227 	case V4L2_PIX_FMT_NV12M_8L128:
228 	case V4L2_PIX_FMT_NV12_10BE_8L128:
229 	case V4L2_PIX_FMT_NV12M_10BE_8L128:
230 		return get_tiled_8l128_plane_size(fmt, w, h, plane_no, stride, interlaced, pbl);
231 	default:
232 		return get_default_plane_size(w, h, plane_no, stride, interlaced, pbl);
233 	}
234 }
235 
236 int vpu_helper_copy_from_stream_buffer(struct vpu_buffer *stream_buffer,
237 				       u32 *rptr, u32 size, void *dst)
238 {
239 	u32 offset;
240 	u32 start;
241 	u32 end;
242 	void *virt;
243 
244 	if (!stream_buffer || !rptr || !dst)
245 		return -EINVAL;
246 
247 	if (!size)
248 		return 0;
249 
250 	offset = *rptr;
251 	start = stream_buffer->phys;
252 	end = start + stream_buffer->length;
253 	virt = stream_buffer->virt;
254 
255 	if (offset < start || offset > end)
256 		return -EINVAL;
257 
258 	if (offset + size <= end) {
259 		memcpy(dst, virt + (offset - start), size);
260 	} else {
261 		memcpy(dst, virt + (offset - start), end - offset);
262 		memcpy(dst + end - offset, virt, size + offset - end);
263 	}
264 
265 	*rptr = vpu_helper_step_walk(stream_buffer, offset, size);
266 
267 	return 0;
268 }
269 
270 int vpu_helper_copy_to_stream_buffer(struct vpu_buffer *stream_buffer,
271 				     u32 *wptr, u32 size, void *src)
272 {
273 	u32 offset;
274 	u32 start;
275 	u32 end;
276 	void *virt;
277 
278 	if (!stream_buffer || !wptr || !src)
279 		return -EINVAL;
280 
281 	if (!size)
282 		return 0;
283 
284 	offset = *wptr;
285 	start = stream_buffer->phys;
286 	end = start + stream_buffer->length;
287 	virt = stream_buffer->virt;
288 	if (offset < start || offset > end)
289 		return -EINVAL;
290 
291 	if (offset + size <= end) {
292 		memcpy(virt + (offset - start), src, size);
293 	} else {
294 		memcpy(virt + (offset - start), src, end - offset);
295 		memcpy(virt, src + end - offset, size + offset - end);
296 	}
297 
298 	*wptr = vpu_helper_step_walk(stream_buffer, offset, size);
299 
300 	return 0;
301 }
302 
303 int vpu_helper_memset_stream_buffer(struct vpu_buffer *stream_buffer,
304 				    u32 *wptr, u8 val, u32 size)
305 {
306 	u32 offset;
307 	u32 start;
308 	u32 end;
309 	void *virt;
310 
311 	if (!stream_buffer || !wptr)
312 		return -EINVAL;
313 
314 	if (!size)
315 		return 0;
316 
317 	offset = *wptr;
318 	start = stream_buffer->phys;
319 	end = start + stream_buffer->length;
320 	virt = stream_buffer->virt;
321 	if (offset < start || offset > end)
322 		return -EINVAL;
323 
324 	if (offset + size <= end) {
325 		memset(virt + (offset - start), val, size);
326 	} else {
327 		memset(virt + (offset - start), val, end - offset);
328 		memset(virt, val, size + offset - end);
329 	}
330 
331 	offset += size;
332 	if (offset >= end)
333 		offset -= stream_buffer->length;
334 
335 	*wptr = offset;
336 
337 	return 0;
338 }
339 
340 u32 vpu_helper_get_free_space(struct vpu_inst *inst)
341 {
342 	struct vpu_rpc_buffer_desc desc;
343 
344 	if (vpu_iface_get_stream_buffer_desc(inst, &desc))
345 		return 0;
346 
347 	if (desc.rptr > desc.wptr)
348 		return desc.rptr - desc.wptr;
349 	else if (desc.rptr < desc.wptr)
350 		return (desc.end - desc.start + desc.rptr - desc.wptr);
351 	else
352 		return desc.end - desc.start;
353 }
354 
355 u32 vpu_helper_get_used_space(struct vpu_inst *inst)
356 {
357 	struct vpu_rpc_buffer_desc desc;
358 
359 	if (vpu_iface_get_stream_buffer_desc(inst, &desc))
360 		return 0;
361 
362 	if (desc.wptr > desc.rptr)
363 		return desc.wptr - desc.rptr;
364 	else if (desc.wptr < desc.rptr)
365 		return (desc.end - desc.start + desc.wptr - desc.rptr);
366 	else
367 		return 0;
368 }
369 
370 int vpu_helper_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
371 {
372 	struct vpu_inst *inst = ctrl_to_inst(ctrl);
373 
374 	switch (ctrl->id) {
375 	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
376 		ctrl->val = inst->min_buffer_cap;
377 		break;
378 	case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
379 		ctrl->val = inst->min_buffer_out;
380 		break;
381 	default:
382 		return -EINVAL;
383 	}
384 
385 	return 0;
386 }
387 
388 int vpu_helper_find_startcode(struct vpu_buffer *stream_buffer,
389 			      u32 pixelformat, u32 offset, u32 bytesused)
390 {
391 	u32 start_code;
392 	int start_code_size;
393 	u32 val = 0;
394 	int i;
395 	int ret = -EINVAL;
396 
397 	if (!stream_buffer || !stream_buffer->virt)
398 		return -EINVAL;
399 
400 	switch (pixelformat) {
401 	case V4L2_PIX_FMT_H264:
402 		start_code_size = 4;
403 		start_code = 0x00000001;
404 		break;
405 	default:
406 		return 0;
407 	}
408 
409 	for (i = 0; i < bytesused; i++) {
410 		val = (val << 8) | vpu_helper_read_byte(stream_buffer, offset + i);
411 		if (i < start_code_size - 1)
412 			continue;
413 		if (val == start_code) {
414 			ret = i + 1 - start_code_size;
415 			break;
416 		}
417 	}
418 
419 	return ret;
420 }
421 
422 int vpu_find_dst_by_src(struct vpu_pair *pairs, u32 cnt, u32 src)
423 {
424 	u32 i;
425 
426 	if (!pairs || !cnt)
427 		return -EINVAL;
428 
429 	for (i = 0; i < cnt; i++) {
430 		if (pairs[i].src == src)
431 			return pairs[i].dst;
432 	}
433 
434 	return -EINVAL;
435 }
436 
437 int vpu_find_src_by_dst(struct vpu_pair *pairs, u32 cnt, u32 dst)
438 {
439 	u32 i;
440 
441 	if (!pairs || !cnt)
442 		return -EINVAL;
443 
444 	for (i = 0; i < cnt; i++) {
445 		if (pairs[i].dst == dst)
446 			return pairs[i].src;
447 	}
448 
449 	return -EINVAL;
450 }
451 
452 const char *vpu_id_name(u32 id)
453 {
454 	switch (id) {
455 	case VPU_CMD_ID_NOOP: return "noop";
456 	case VPU_CMD_ID_CONFIGURE_CODEC: return "configure codec";
457 	case VPU_CMD_ID_START: return "start";
458 	case VPU_CMD_ID_STOP: return "stop";
459 	case VPU_CMD_ID_ABORT: return "abort";
460 	case VPU_CMD_ID_RST_BUF: return "reset buf";
461 	case VPU_CMD_ID_SNAPSHOT: return "snapshot";
462 	case VPU_CMD_ID_FIRM_RESET: return "reset firmware";
463 	case VPU_CMD_ID_UPDATE_PARAMETER: return "update parameter";
464 	case VPU_CMD_ID_FRAME_ENCODE: return "encode frame";
465 	case VPU_CMD_ID_SKIP: return "skip";
466 	case VPU_CMD_ID_FS_ALLOC: return "alloc fb";
467 	case VPU_CMD_ID_FS_RELEASE: return "release fb";
468 	case VPU_CMD_ID_TIMESTAMP: return "timestamp";
469 	case VPU_CMD_ID_DEBUG: return "debug";
470 	case VPU_MSG_ID_RESET_DONE: return "reset done";
471 	case VPU_MSG_ID_START_DONE: return "start done";
472 	case VPU_MSG_ID_STOP_DONE: return "stop done";
473 	case VPU_MSG_ID_ABORT_DONE: return "abort done";
474 	case VPU_MSG_ID_BUF_RST: return "buf reset done";
475 	case VPU_MSG_ID_MEM_REQUEST: return "mem request";
476 	case VPU_MSG_ID_PARAM_UPD_DONE: return "param upd done";
477 	case VPU_MSG_ID_FRAME_INPUT_DONE: return "frame input done";
478 	case VPU_MSG_ID_ENC_DONE: return "encode done";
479 	case VPU_MSG_ID_DEC_DONE: return "frame display";
480 	case VPU_MSG_ID_FRAME_REQ: return "fb request";
481 	case VPU_MSG_ID_FRAME_RELEASE: return "fb release";
482 	case VPU_MSG_ID_SEQ_HDR_FOUND: return "seq hdr found";
483 	case VPU_MSG_ID_RES_CHANGE: return "resolution change";
484 	case VPU_MSG_ID_PIC_HDR_FOUND: return "pic hdr found";
485 	case VPU_MSG_ID_PIC_DECODED: return "picture decoded";
486 	case VPU_MSG_ID_PIC_EOS: return "eos";
487 	case VPU_MSG_ID_FIFO_LOW: return "fifo low";
488 	case VPU_MSG_ID_BS_ERROR: return "bs error";
489 	case VPU_MSG_ID_UNSUPPORTED: return "unsupported";
490 	case VPU_MSG_ID_FIRMWARE_XCPT: return "exception";
491 	case VPU_MSG_ID_PIC_SKIPPED: return "skipped";
492 	case VPU_MSG_ID_DBG_MSG: return "debug msg";
493 	}
494 	return "<unknown>";
495 }
496 
497 const char *vpu_codec_state_name(enum vpu_codec_state state)
498 {
499 	switch (state) {
500 	case VPU_CODEC_STATE_DEINIT: return "initialization";
501 	case VPU_CODEC_STATE_CONFIGURED: return "configured";
502 	case VPU_CODEC_STATE_START: return "start";
503 	case VPU_CODEC_STATE_STARTED: return "started";
504 	case VPU_CODEC_STATE_ACTIVE: return "active";
505 	case VPU_CODEC_STATE_SEEK: return "seek";
506 	case VPU_CODEC_STATE_STOP: return "stop";
507 	case VPU_CODEC_STATE_DRAIN: return "drain";
508 	case VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE: return "resolution change";
509 	}
510 	return "<unknown>";
511 }
512