xref: /linux/drivers/media/platform/qcom/venus/helpers.c (revision 8c3854d03bd7b86e8f36e6d9b07b4a6bc20deccd)
1 /*
2  * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
3  * Copyright (C) 2017 Linaro Ltd.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 and
7  * only version 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  */
15 #include <linux/clk.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/mutex.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/slab.h>
21 #include <media/videobuf2-dma-sg.h>
22 #include <media/v4l2-mem2mem.h>
23 #include <asm/div64.h>
24 
25 #include "core.h"
26 #include "helpers.h"
27 #include "hfi_helper.h"
28 #include "hfi_venus_io.h"
29 
30 struct intbuf {
31 	struct list_head list;
32 	u32 type;
33 	size_t size;
34 	void *va;
35 	dma_addr_t da;
36 	unsigned long attrs;
37 };
38 
39 bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt)
40 {
41 	struct venus_core *core = inst->core;
42 	u32 session_type = inst->session_type;
43 	u32 codec;
44 
45 	switch (v4l2_pixfmt) {
46 	case V4L2_PIX_FMT_H264:
47 		codec = HFI_VIDEO_CODEC_H264;
48 		break;
49 	case V4L2_PIX_FMT_H263:
50 		codec = HFI_VIDEO_CODEC_H263;
51 		break;
52 	case V4L2_PIX_FMT_MPEG1:
53 		codec = HFI_VIDEO_CODEC_MPEG1;
54 		break;
55 	case V4L2_PIX_FMT_MPEG2:
56 		codec = HFI_VIDEO_CODEC_MPEG2;
57 		break;
58 	case V4L2_PIX_FMT_MPEG4:
59 		codec = HFI_VIDEO_CODEC_MPEG4;
60 		break;
61 	case V4L2_PIX_FMT_VC1_ANNEX_G:
62 	case V4L2_PIX_FMT_VC1_ANNEX_L:
63 		codec = HFI_VIDEO_CODEC_VC1;
64 		break;
65 	case V4L2_PIX_FMT_VP8:
66 		codec = HFI_VIDEO_CODEC_VP8;
67 		break;
68 	case V4L2_PIX_FMT_VP9:
69 		codec = HFI_VIDEO_CODEC_VP9;
70 		break;
71 	case V4L2_PIX_FMT_XVID:
72 		codec = HFI_VIDEO_CODEC_DIVX;
73 		break;
74 	case V4L2_PIX_FMT_HEVC:
75 		codec = HFI_VIDEO_CODEC_HEVC;
76 		break;
77 	default:
78 		return false;
79 	}
80 
81 	if (session_type == VIDC_SESSION_TYPE_ENC && core->enc_codecs & codec)
82 		return true;
83 
84 	if (session_type == VIDC_SESSION_TYPE_DEC && core->dec_codecs & codec)
85 		return true;
86 
87 	return false;
88 }
89 EXPORT_SYMBOL_GPL(venus_helper_check_codec);
90 
91 static int venus_helper_queue_dpb_bufs(struct venus_inst *inst)
92 {
93 	struct intbuf *buf;
94 	int ret = 0;
95 
96 	list_for_each_entry(buf, &inst->dpbbufs, list) {
97 		struct hfi_frame_data fdata;
98 
99 		memset(&fdata, 0, sizeof(fdata));
100 		fdata.alloc_len = buf->size;
101 		fdata.device_addr = buf->da;
102 		fdata.buffer_type = buf->type;
103 
104 		ret = hfi_session_process_buf(inst, &fdata);
105 		if (ret)
106 			goto fail;
107 	}
108 
109 fail:
110 	return ret;
111 }
112 
113 int venus_helper_free_dpb_bufs(struct venus_inst *inst)
114 {
115 	struct intbuf *buf, *n;
116 
117 	list_for_each_entry_safe(buf, n, &inst->dpbbufs, list) {
118 		list_del_init(&buf->list);
119 		dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
120 			       buf->attrs);
121 		kfree(buf);
122 	}
123 
124 	INIT_LIST_HEAD(&inst->dpbbufs);
125 
126 	return 0;
127 }
128 EXPORT_SYMBOL_GPL(venus_helper_free_dpb_bufs);
129 
130 int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
131 {
132 	struct venus_core *core = inst->core;
133 	struct device *dev = core->dev;
134 	enum hfi_version ver = core->res->hfi_version;
135 	struct hfi_buffer_requirements bufreq;
136 	u32 buftype = inst->dpb_buftype;
137 	unsigned int dpb_size = 0;
138 	struct intbuf *buf;
139 	unsigned int i;
140 	u32 count;
141 	int ret;
142 
143 	/* no need to allocate dpb buffers */
144 	if (!inst->dpb_fmt)
145 		return 0;
146 
147 	if (inst->dpb_buftype == HFI_BUFFER_OUTPUT)
148 		dpb_size = inst->output_buf_size;
149 	else if (inst->dpb_buftype == HFI_BUFFER_OUTPUT2)
150 		dpb_size = inst->output2_buf_size;
151 
152 	if (!dpb_size)
153 		return 0;
154 
155 	ret = venus_helper_get_bufreq(inst, buftype, &bufreq);
156 	if (ret)
157 		return ret;
158 
159 	count = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
160 
161 	for (i = 0; i < count; i++) {
162 		buf = kzalloc(sizeof(*buf), GFP_KERNEL);
163 		if (!buf) {
164 			ret = -ENOMEM;
165 			goto fail;
166 		}
167 
168 		buf->type = buftype;
169 		buf->size = dpb_size;
170 		buf->attrs = DMA_ATTR_WRITE_COMBINE |
171 			     DMA_ATTR_NO_KERNEL_MAPPING;
172 		buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
173 					  buf->attrs);
174 		if (!buf->va) {
175 			kfree(buf);
176 			ret = -ENOMEM;
177 			goto fail;
178 		}
179 
180 		list_add_tail(&buf->list, &inst->dpbbufs);
181 	}
182 
183 	return 0;
184 
185 fail:
186 	venus_helper_free_dpb_bufs(inst);
187 	return ret;
188 }
189 EXPORT_SYMBOL_GPL(venus_helper_alloc_dpb_bufs);
190 
191 static int intbufs_set_buffer(struct venus_inst *inst, u32 type)
192 {
193 	struct venus_core *core = inst->core;
194 	struct device *dev = core->dev;
195 	struct hfi_buffer_requirements bufreq;
196 	struct hfi_buffer_desc bd;
197 	struct intbuf *buf;
198 	unsigned int i;
199 	int ret;
200 
201 	ret = venus_helper_get_bufreq(inst, type, &bufreq);
202 	if (ret)
203 		return 0;
204 
205 	if (!bufreq.size)
206 		return 0;
207 
208 	for (i = 0; i < bufreq.count_actual; i++) {
209 		buf = kzalloc(sizeof(*buf), GFP_KERNEL);
210 		if (!buf) {
211 			ret = -ENOMEM;
212 			goto fail;
213 		}
214 
215 		buf->type = bufreq.type;
216 		buf->size = bufreq.size;
217 		buf->attrs = DMA_ATTR_WRITE_COMBINE |
218 			     DMA_ATTR_NO_KERNEL_MAPPING;
219 		buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
220 					  buf->attrs);
221 		if (!buf->va) {
222 			ret = -ENOMEM;
223 			goto fail;
224 		}
225 
226 		memset(&bd, 0, sizeof(bd));
227 		bd.buffer_size = buf->size;
228 		bd.buffer_type = buf->type;
229 		bd.num_buffers = 1;
230 		bd.device_addr = buf->da;
231 
232 		ret = hfi_session_set_buffers(inst, &bd);
233 		if (ret) {
234 			dev_err(dev, "set session buffers failed\n");
235 			goto dma_free;
236 		}
237 
238 		list_add_tail(&buf->list, &inst->internalbufs);
239 	}
240 
241 	return 0;
242 
243 dma_free:
244 	dma_free_attrs(dev, buf->size, buf->va, buf->da, buf->attrs);
245 fail:
246 	kfree(buf);
247 	return ret;
248 }
249 
250 static int intbufs_unset_buffers(struct venus_inst *inst)
251 {
252 	struct hfi_buffer_desc bd = {0};
253 	struct intbuf *buf, *n;
254 	int ret = 0;
255 
256 	list_for_each_entry_safe(buf, n, &inst->internalbufs, list) {
257 		bd.buffer_size = buf->size;
258 		bd.buffer_type = buf->type;
259 		bd.num_buffers = 1;
260 		bd.device_addr = buf->da;
261 		bd.response_required = true;
262 
263 		ret = hfi_session_unset_buffers(inst, &bd);
264 
265 		list_del_init(&buf->list);
266 		dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
267 			       buf->attrs);
268 		kfree(buf);
269 	}
270 
271 	return ret;
272 }
273 
274 static const unsigned int intbuf_types_1xx[] = {
275 	HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_1XX),
276 	HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_1XX),
277 	HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_1XX),
278 	HFI_BUFFER_INTERNAL_PERSIST,
279 	HFI_BUFFER_INTERNAL_PERSIST_1,
280 };
281 
282 static const unsigned int intbuf_types_4xx[] = {
283 	HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_4XX),
284 	HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_4XX),
285 	HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_4XX),
286 	HFI_BUFFER_INTERNAL_PERSIST,
287 	HFI_BUFFER_INTERNAL_PERSIST_1,
288 };
289 
290 static int intbufs_alloc(struct venus_inst *inst)
291 {
292 	const unsigned int *intbuf;
293 	size_t arr_sz, i;
294 	int ret;
295 
296 	if (IS_V4(inst->core)) {
297 		arr_sz = ARRAY_SIZE(intbuf_types_4xx);
298 		intbuf = intbuf_types_4xx;
299 	} else {
300 		arr_sz = ARRAY_SIZE(intbuf_types_1xx);
301 		intbuf = intbuf_types_1xx;
302 	}
303 
304 	for (i = 0; i < arr_sz; i++) {
305 		ret = intbufs_set_buffer(inst, intbuf[i]);
306 		if (ret)
307 			goto error;
308 	}
309 
310 	return 0;
311 
312 error:
313 	intbufs_unset_buffers(inst);
314 	return ret;
315 }
316 
317 static int intbufs_free(struct venus_inst *inst)
318 {
319 	return intbufs_unset_buffers(inst);
320 }
321 
322 static u32 load_per_instance(struct venus_inst *inst)
323 {
324 	u32 mbs;
325 
326 	if (!inst || !(inst->state >= INST_INIT && inst->state < INST_STOP))
327 		return 0;
328 
329 	mbs = (ALIGN(inst->width, 16) / 16) * (ALIGN(inst->height, 16) / 16);
330 
331 	return mbs * inst->fps;
332 }
333 
334 static u32 load_per_type(struct venus_core *core, u32 session_type)
335 {
336 	struct venus_inst *inst = NULL;
337 	u32 mbs_per_sec = 0;
338 
339 	mutex_lock(&core->lock);
340 	list_for_each_entry(inst, &core->instances, list) {
341 		if (inst->session_type != session_type)
342 			continue;
343 
344 		mbs_per_sec += load_per_instance(inst);
345 	}
346 	mutex_unlock(&core->lock);
347 
348 	return mbs_per_sec;
349 }
350 
351 static int load_scale_clocks(struct venus_core *core)
352 {
353 	const struct freq_tbl *table = core->res->freq_tbl;
354 	unsigned int num_rows = core->res->freq_tbl_size;
355 	unsigned long freq = table[0].freq;
356 	struct clk *clk = core->clks[0];
357 	struct device *dev = core->dev;
358 	u32 mbs_per_sec;
359 	unsigned int i;
360 	int ret;
361 
362 	mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) +
363 		      load_per_type(core, VIDC_SESSION_TYPE_DEC);
364 
365 	if (mbs_per_sec > core->res->max_load)
366 		dev_warn(dev, "HW is overloaded, needed: %d max: %d\n",
367 			 mbs_per_sec, core->res->max_load);
368 
369 	if (!mbs_per_sec && num_rows > 1) {
370 		freq = table[num_rows - 1].freq;
371 		goto set_freq;
372 	}
373 
374 	for (i = 0; i < num_rows; i++) {
375 		if (mbs_per_sec > table[i].load)
376 			break;
377 		freq = table[i].freq;
378 	}
379 
380 set_freq:
381 
382 	ret = clk_set_rate(clk, freq);
383 	if (ret)
384 		goto err;
385 
386 	ret = clk_set_rate(core->core0_clk, freq);
387 	if (ret)
388 		goto err;
389 
390 	ret = clk_set_rate(core->core1_clk, freq);
391 	if (ret)
392 		goto err;
393 
394 	return 0;
395 
396 err:
397 	dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret);
398 	return ret;
399 }
400 
401 static void fill_buffer_desc(const struct venus_buffer *buf,
402 			     struct hfi_buffer_desc *bd, bool response)
403 {
404 	memset(bd, 0, sizeof(*bd));
405 	bd->buffer_type = HFI_BUFFER_OUTPUT;
406 	bd->buffer_size = buf->size;
407 	bd->num_buffers = 1;
408 	bd->device_addr = buf->dma_addr;
409 	bd->response_required = response;
410 }
411 
412 static void return_buf_error(struct venus_inst *inst,
413 			     struct vb2_v4l2_buffer *vbuf)
414 {
415 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
416 
417 	if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
418 		v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf);
419 	else
420 		v4l2_m2m_dst_buf_remove_by_buf(m2m_ctx, vbuf);
421 
422 	v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
423 }
424 
425 static int
426 session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
427 {
428 	struct venus_buffer *buf = to_venus_buffer(vbuf);
429 	struct vb2_buffer *vb = &vbuf->vb2_buf;
430 	unsigned int type = vb->type;
431 	struct hfi_frame_data fdata;
432 	int ret;
433 
434 	memset(&fdata, 0, sizeof(fdata));
435 	fdata.alloc_len = buf->size;
436 	fdata.device_addr = buf->dma_addr;
437 	fdata.timestamp = vb->timestamp;
438 	do_div(fdata.timestamp, NSEC_PER_USEC);
439 	fdata.flags = 0;
440 	fdata.clnt_data = vbuf->vb2_buf.index;
441 
442 	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
443 		fdata.buffer_type = HFI_BUFFER_INPUT;
444 		fdata.filled_len = vb2_get_plane_payload(vb, 0);
445 		fdata.offset = vb->planes[0].data_offset;
446 
447 		if (vbuf->flags & V4L2_BUF_FLAG_LAST || !fdata.filled_len)
448 			fdata.flags |= HFI_BUFFERFLAG_EOS;
449 	} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
450 		if (inst->session_type == VIDC_SESSION_TYPE_ENC)
451 			fdata.buffer_type = HFI_BUFFER_OUTPUT;
452 		else
453 			fdata.buffer_type = inst->opb_buftype;
454 		fdata.filled_len = 0;
455 		fdata.offset = 0;
456 	}
457 
458 	ret = hfi_session_process_buf(inst, &fdata);
459 	if (ret)
460 		return ret;
461 
462 	return 0;
463 }
464 
465 static bool is_dynamic_bufmode(struct venus_inst *inst)
466 {
467 	struct venus_core *core = inst->core;
468 	struct venus_caps *caps;
469 
470 	/*
471 	 * v4 doesn't send BUFFER_ALLOC_MODE_SUPPORTED property and supports
472 	 * dynamic buffer mode by default for HFI_BUFFER_OUTPUT/OUTPUT2.
473 	 */
474 	if (IS_V4(core))
475 		return true;
476 
477 	caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
478 	if (!caps)
479 		return false;
480 
481 	return caps->cap_bufs_mode_dynamic;
482 }
483 
484 static int session_unregister_bufs(struct venus_inst *inst)
485 {
486 	struct venus_buffer *buf, *n;
487 	struct hfi_buffer_desc bd;
488 	int ret = 0;
489 
490 	if (is_dynamic_bufmode(inst))
491 		return 0;
492 
493 	list_for_each_entry_safe(buf, n, &inst->registeredbufs, reg_list) {
494 		fill_buffer_desc(buf, &bd, true);
495 		ret = hfi_session_unset_buffers(inst, &bd);
496 		list_del_init(&buf->reg_list);
497 	}
498 
499 	return ret;
500 }
501 
502 static int session_register_bufs(struct venus_inst *inst)
503 {
504 	struct venus_core *core = inst->core;
505 	struct device *dev = core->dev;
506 	struct hfi_buffer_desc bd;
507 	struct venus_buffer *buf;
508 	int ret = 0;
509 
510 	if (is_dynamic_bufmode(inst))
511 		return 0;
512 
513 	list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
514 		fill_buffer_desc(buf, &bd, false);
515 		ret = hfi_session_set_buffers(inst, &bd);
516 		if (ret) {
517 			dev_err(dev, "%s: set buffer failed\n", __func__);
518 			break;
519 		}
520 	}
521 
522 	return ret;
523 }
524 
525 static u32 to_hfi_raw_fmt(u32 v4l2_fmt)
526 {
527 	switch (v4l2_fmt) {
528 	case V4L2_PIX_FMT_NV12:
529 		return HFI_COLOR_FORMAT_NV12;
530 	case V4L2_PIX_FMT_NV21:
531 		return HFI_COLOR_FORMAT_NV21;
532 	default:
533 		break;
534 	}
535 
536 	return 0;
537 }
538 
539 int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
540 			    struct hfi_buffer_requirements *req)
541 {
542 	u32 ptype = HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS;
543 	union hfi_get_property hprop;
544 	unsigned int i;
545 	int ret;
546 
547 	if (req)
548 		memset(req, 0, sizeof(*req));
549 
550 	ret = hfi_session_get_property(inst, ptype, &hprop);
551 	if (ret)
552 		return ret;
553 
554 	ret = -EINVAL;
555 
556 	for (i = 0; i < HFI_BUFFER_TYPE_MAX; i++) {
557 		if (hprop.bufreq[i].type != type)
558 			continue;
559 
560 		if (req)
561 			memcpy(req, &hprop.bufreq[i], sizeof(*req));
562 		ret = 0;
563 		break;
564 	}
565 
566 	return ret;
567 }
568 EXPORT_SYMBOL_GPL(venus_helper_get_bufreq);
569 
570 static u32 get_framesize_raw_nv12(u32 width, u32 height)
571 {
572 	u32 y_stride, uv_stride, y_plane;
573 	u32 y_sclines, uv_sclines, uv_plane;
574 	u32 size;
575 
576 	y_stride = ALIGN(width, 128);
577 	uv_stride = ALIGN(width, 128);
578 	y_sclines = ALIGN(height, 32);
579 	uv_sclines = ALIGN(((height + 1) >> 1), 16);
580 
581 	y_plane = y_stride * y_sclines;
582 	uv_plane = uv_stride * uv_sclines + SZ_4K;
583 	size = y_plane + uv_plane + SZ_8K;
584 
585 	return ALIGN(size, SZ_4K);
586 }
587 
588 static u32 get_framesize_raw_nv12_ubwc(u32 width, u32 height)
589 {
590 	u32 y_meta_stride, y_meta_plane;
591 	u32 y_stride, y_plane;
592 	u32 uv_meta_stride, uv_meta_plane;
593 	u32 uv_stride, uv_plane;
594 	u32 extradata = SZ_16K;
595 
596 	y_meta_stride = ALIGN(DIV_ROUND_UP(width, 32), 64);
597 	y_meta_plane = y_meta_stride * ALIGN(DIV_ROUND_UP(height, 8), 16);
598 	y_meta_plane = ALIGN(y_meta_plane, SZ_4K);
599 
600 	y_stride = ALIGN(width, 128);
601 	y_plane = ALIGN(y_stride * ALIGN(height, 32), SZ_4K);
602 
603 	uv_meta_stride = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
604 	uv_meta_plane = uv_meta_stride * ALIGN(DIV_ROUND_UP(height / 2, 8), 16);
605 	uv_meta_plane = ALIGN(uv_meta_plane, SZ_4K);
606 
607 	uv_stride = ALIGN(width, 128);
608 	uv_plane = ALIGN(uv_stride * ALIGN(height / 2, 32), SZ_4K);
609 
610 	return ALIGN(y_meta_plane + y_plane + uv_meta_plane + uv_plane +
611 		     max(extradata, y_stride * 48), SZ_4K);
612 }
613 
614 u32 venus_helper_get_framesz_raw(u32 hfi_fmt, u32 width, u32 height)
615 {
616 	switch (hfi_fmt) {
617 	case HFI_COLOR_FORMAT_NV12:
618 	case HFI_COLOR_FORMAT_NV21:
619 		return get_framesize_raw_nv12(width, height);
620 	case HFI_COLOR_FORMAT_NV12_UBWC:
621 		return get_framesize_raw_nv12_ubwc(width, height);
622 	default:
623 		return 0;
624 	}
625 }
626 EXPORT_SYMBOL_GPL(venus_helper_get_framesz_raw);
627 
628 u32 venus_helper_get_framesz(u32 v4l2_fmt, u32 width, u32 height)
629 {
630 	u32 hfi_fmt, sz;
631 	bool compressed;
632 
633 	switch (v4l2_fmt) {
634 	case V4L2_PIX_FMT_MPEG:
635 	case V4L2_PIX_FMT_H264:
636 	case V4L2_PIX_FMT_H264_NO_SC:
637 	case V4L2_PIX_FMT_H264_MVC:
638 	case V4L2_PIX_FMT_H263:
639 	case V4L2_PIX_FMT_MPEG1:
640 	case V4L2_PIX_FMT_MPEG2:
641 	case V4L2_PIX_FMT_MPEG4:
642 	case V4L2_PIX_FMT_XVID:
643 	case V4L2_PIX_FMT_VC1_ANNEX_G:
644 	case V4L2_PIX_FMT_VC1_ANNEX_L:
645 	case V4L2_PIX_FMT_VP8:
646 	case V4L2_PIX_FMT_VP9:
647 	case V4L2_PIX_FMT_HEVC:
648 		compressed = true;
649 		break;
650 	default:
651 		compressed = false;
652 		break;
653 	}
654 
655 	if (compressed) {
656 		sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2 / 2;
657 		return ALIGN(sz, SZ_4K);
658 	}
659 
660 	hfi_fmt = to_hfi_raw_fmt(v4l2_fmt);
661 	if (!hfi_fmt)
662 		return 0;
663 
664 	return venus_helper_get_framesz_raw(hfi_fmt, width, height);
665 }
666 EXPORT_SYMBOL_GPL(venus_helper_get_framesz);
667 
668 int venus_helper_set_input_resolution(struct venus_inst *inst,
669 				      unsigned int width, unsigned int height)
670 {
671 	u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
672 	struct hfi_framesize fs;
673 
674 	fs.buffer_type = HFI_BUFFER_INPUT;
675 	fs.width = width;
676 	fs.height = height;
677 
678 	return hfi_session_set_property(inst, ptype, &fs);
679 }
680 EXPORT_SYMBOL_GPL(venus_helper_set_input_resolution);
681 
682 int venus_helper_set_output_resolution(struct venus_inst *inst,
683 				       unsigned int width, unsigned int height,
684 				       u32 buftype)
685 {
686 	u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
687 	struct hfi_framesize fs;
688 
689 	fs.buffer_type = buftype;
690 	fs.width = width;
691 	fs.height = height;
692 
693 	return hfi_session_set_property(inst, ptype, &fs);
694 }
695 EXPORT_SYMBOL_GPL(venus_helper_set_output_resolution);
696 
697 int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode)
698 {
699 	const u32 ptype = HFI_PROPERTY_PARAM_WORK_MODE;
700 	struct hfi_video_work_mode wm;
701 
702 	if (!IS_V4(inst->core))
703 		return 0;
704 
705 	wm.video_work_mode = mode;
706 
707 	return hfi_session_set_property(inst, ptype, &wm);
708 }
709 EXPORT_SYMBOL_GPL(venus_helper_set_work_mode);
710 
711 int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
712 {
713 	const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
714 	struct hfi_videocores_usage_type cu;
715 
716 	if (!IS_V4(inst->core))
717 		return 0;
718 
719 	cu.video_core_enable_mask = usage;
720 
721 	return hfi_session_set_property(inst, ptype, &cu);
722 }
723 EXPORT_SYMBOL_GPL(venus_helper_set_core_usage);
724 
725 int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
726 			      unsigned int output_bufs,
727 			      unsigned int output2_bufs)
728 {
729 	u32 ptype = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
730 	struct hfi_buffer_count_actual buf_count;
731 	int ret;
732 
733 	buf_count.type = HFI_BUFFER_INPUT;
734 	buf_count.count_actual = input_bufs;
735 
736 	ret = hfi_session_set_property(inst, ptype, &buf_count);
737 	if (ret)
738 		return ret;
739 
740 	buf_count.type = HFI_BUFFER_OUTPUT;
741 	buf_count.count_actual = output_bufs;
742 
743 	ret = hfi_session_set_property(inst, ptype, &buf_count);
744 	if (ret)
745 		return ret;
746 
747 	if (output2_bufs) {
748 		buf_count.type = HFI_BUFFER_OUTPUT2;
749 		buf_count.count_actual = output2_bufs;
750 
751 		ret = hfi_session_set_property(inst, ptype, &buf_count);
752 	}
753 
754 	return ret;
755 }
756 EXPORT_SYMBOL_GPL(venus_helper_set_num_bufs);
757 
758 int venus_helper_set_raw_format(struct venus_inst *inst, u32 hfi_format,
759 				u32 buftype)
760 {
761 	const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
762 	struct hfi_uncompressed_format_select fmt;
763 
764 	fmt.buffer_type = buftype;
765 	fmt.format = hfi_format;
766 
767 	return hfi_session_set_property(inst, ptype, &fmt);
768 }
769 EXPORT_SYMBOL_GPL(venus_helper_set_raw_format);
770 
771 int venus_helper_set_color_format(struct venus_inst *inst, u32 pixfmt)
772 {
773 	u32 hfi_format, buftype;
774 
775 	if (inst->session_type == VIDC_SESSION_TYPE_DEC)
776 		buftype = HFI_BUFFER_OUTPUT;
777 	else if (inst->session_type == VIDC_SESSION_TYPE_ENC)
778 		buftype = HFI_BUFFER_INPUT;
779 	else
780 		return -EINVAL;
781 
782 	hfi_format = to_hfi_raw_fmt(pixfmt);
783 	if (!hfi_format)
784 		return -EINVAL;
785 
786 	return venus_helper_set_raw_format(inst, hfi_format, buftype);
787 }
788 EXPORT_SYMBOL_GPL(venus_helper_set_color_format);
789 
790 int venus_helper_set_multistream(struct venus_inst *inst, bool out_en,
791 				 bool out2_en)
792 {
793 	struct hfi_multi_stream multi = {0};
794 	u32 ptype = HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
795 	int ret;
796 
797 	multi.buffer_type = HFI_BUFFER_OUTPUT;
798 	multi.enable = out_en;
799 
800 	ret = hfi_session_set_property(inst, ptype, &multi);
801 	if (ret)
802 		return ret;
803 
804 	multi.buffer_type = HFI_BUFFER_OUTPUT2;
805 	multi.enable = out2_en;
806 
807 	return hfi_session_set_property(inst, ptype, &multi);
808 }
809 EXPORT_SYMBOL_GPL(venus_helper_set_multistream);
810 
811 int venus_helper_set_dyn_bufmode(struct venus_inst *inst)
812 {
813 	const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE;
814 	struct hfi_buffer_alloc_mode mode;
815 	int ret;
816 
817 	if (!is_dynamic_bufmode(inst))
818 		return 0;
819 
820 	mode.type = HFI_BUFFER_OUTPUT;
821 	mode.mode = HFI_BUFFER_MODE_DYNAMIC;
822 
823 	ret = hfi_session_set_property(inst, ptype, &mode);
824 	if (ret)
825 		return ret;
826 
827 	mode.type = HFI_BUFFER_OUTPUT2;
828 
829 	return hfi_session_set_property(inst, ptype, &mode);
830 }
831 EXPORT_SYMBOL_GPL(venus_helper_set_dyn_bufmode);
832 
833 int venus_helper_set_bufsize(struct venus_inst *inst, u32 bufsize, u32 buftype)
834 {
835 	const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL;
836 	struct hfi_buffer_size_actual bufsz;
837 
838 	bufsz.type = buftype;
839 	bufsz.size = bufsize;
840 
841 	return hfi_session_set_property(inst, ptype, &bufsz);
842 }
843 EXPORT_SYMBOL_GPL(venus_helper_set_bufsize);
844 
845 unsigned int venus_helper_get_opb_size(struct venus_inst *inst)
846 {
847 	/* the encoder has only one output */
848 	if (inst->session_type == VIDC_SESSION_TYPE_ENC)
849 		return inst->output_buf_size;
850 
851 	if (inst->opb_buftype == HFI_BUFFER_OUTPUT)
852 		return inst->output_buf_size;
853 	else if (inst->opb_buftype == HFI_BUFFER_OUTPUT2)
854 		return inst->output2_buf_size;
855 
856 	return 0;
857 }
858 EXPORT_SYMBOL_GPL(venus_helper_get_opb_size);
859 
860 static void delayed_process_buf_func(struct work_struct *work)
861 {
862 	struct venus_buffer *buf, *n;
863 	struct venus_inst *inst;
864 	int ret;
865 
866 	inst = container_of(work, struct venus_inst, delayed_process_work);
867 
868 	mutex_lock(&inst->lock);
869 
870 	if (!(inst->streamon_out & inst->streamon_cap))
871 		goto unlock;
872 
873 	list_for_each_entry_safe(buf, n, &inst->delayed_process, ref_list) {
874 		if (buf->flags & HFI_BUFFERFLAG_READONLY)
875 			continue;
876 
877 		ret = session_process_buf(inst, &buf->vb);
878 		if (ret)
879 			return_buf_error(inst, &buf->vb);
880 
881 		list_del_init(&buf->ref_list);
882 	}
883 unlock:
884 	mutex_unlock(&inst->lock);
885 }
886 
887 void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx)
888 {
889 	struct venus_buffer *buf;
890 
891 	list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
892 		if (buf->vb.vb2_buf.index == idx) {
893 			buf->flags &= ~HFI_BUFFERFLAG_READONLY;
894 			schedule_work(&inst->delayed_process_work);
895 			break;
896 		}
897 	}
898 }
899 EXPORT_SYMBOL_GPL(venus_helper_release_buf_ref);
900 
901 void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf)
902 {
903 	struct venus_buffer *buf = to_venus_buffer(vbuf);
904 
905 	buf->flags |= HFI_BUFFERFLAG_READONLY;
906 }
907 EXPORT_SYMBOL_GPL(venus_helper_acquire_buf_ref);
908 
909 static int is_buf_refed(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
910 {
911 	struct venus_buffer *buf = to_venus_buffer(vbuf);
912 
913 	if (buf->flags & HFI_BUFFERFLAG_READONLY) {
914 		list_add_tail(&buf->ref_list, &inst->delayed_process);
915 		schedule_work(&inst->delayed_process_work);
916 		return 1;
917 	}
918 
919 	return 0;
920 }
921 
922 struct vb2_v4l2_buffer *
923 venus_helper_find_buf(struct venus_inst *inst, unsigned int type, u32 idx)
924 {
925 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
926 
927 	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
928 		return v4l2_m2m_src_buf_remove_by_idx(m2m_ctx, idx);
929 	else
930 		return v4l2_m2m_dst_buf_remove_by_idx(m2m_ctx, idx);
931 }
932 EXPORT_SYMBOL_GPL(venus_helper_find_buf);
933 
934 int venus_helper_vb2_buf_init(struct vb2_buffer *vb)
935 {
936 	struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
937 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
938 	struct venus_buffer *buf = to_venus_buffer(vbuf);
939 	struct sg_table *sgt;
940 
941 	sgt = vb2_dma_sg_plane_desc(vb, 0);
942 	if (!sgt)
943 		return -EFAULT;
944 
945 	buf->size = vb2_plane_size(vb, 0);
946 	buf->dma_addr = sg_dma_address(sgt->sgl);
947 
948 	if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
949 		list_add_tail(&buf->reg_list, &inst->registeredbufs);
950 
951 	return 0;
952 }
953 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_init);
954 
955 int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb)
956 {
957 	struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
958 	unsigned int out_buf_size = venus_helper_get_opb_size(inst);
959 
960 	if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
961 	    vb2_plane_size(vb, 0) < out_buf_size)
962 		return -EINVAL;
963 	if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
964 	    vb2_plane_size(vb, 0) < inst->input_buf_size)
965 		return -EINVAL;
966 
967 	return 0;
968 }
969 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_prepare);
970 
971 void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
972 {
973 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
974 	struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
975 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
976 	int ret;
977 
978 	mutex_lock(&inst->lock);
979 
980 	v4l2_m2m_buf_queue(m2m_ctx, vbuf);
981 
982 	if (!(inst->streamon_out & inst->streamon_cap))
983 		goto unlock;
984 
985 	ret = is_buf_refed(inst, vbuf);
986 	if (ret)
987 		goto unlock;
988 
989 	ret = session_process_buf(inst, vbuf);
990 	if (ret)
991 		return_buf_error(inst, vbuf);
992 
993 unlock:
994 	mutex_unlock(&inst->lock);
995 }
996 EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_queue);
997 
998 void venus_helper_buffers_done(struct venus_inst *inst,
999 			       enum vb2_buffer_state state)
1000 {
1001 	struct vb2_v4l2_buffer *buf;
1002 
1003 	while ((buf = v4l2_m2m_src_buf_remove(inst->m2m_ctx)))
1004 		v4l2_m2m_buf_done(buf, state);
1005 	while ((buf = v4l2_m2m_dst_buf_remove(inst->m2m_ctx)))
1006 		v4l2_m2m_buf_done(buf, state);
1007 }
1008 EXPORT_SYMBOL_GPL(venus_helper_buffers_done);
1009 
1010 void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
1011 {
1012 	struct venus_inst *inst = vb2_get_drv_priv(q);
1013 	struct venus_core *core = inst->core;
1014 	int ret;
1015 
1016 	mutex_lock(&inst->lock);
1017 
1018 	if (inst->streamon_out & inst->streamon_cap) {
1019 		ret = hfi_session_stop(inst);
1020 		ret |= hfi_session_unload_res(inst);
1021 		ret |= session_unregister_bufs(inst);
1022 		ret |= intbufs_free(inst);
1023 		ret |= hfi_session_deinit(inst);
1024 
1025 		if (inst->session_error || core->sys_error)
1026 			ret = -EIO;
1027 
1028 		if (ret)
1029 			hfi_session_abort(inst);
1030 
1031 		venus_helper_free_dpb_bufs(inst);
1032 
1033 		load_scale_clocks(core);
1034 		INIT_LIST_HEAD(&inst->registeredbufs);
1035 	}
1036 
1037 	venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
1038 
1039 	if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
1040 		inst->streamon_out = 0;
1041 	else
1042 		inst->streamon_cap = 0;
1043 
1044 	mutex_unlock(&inst->lock);
1045 }
1046 EXPORT_SYMBOL_GPL(venus_helper_vb2_stop_streaming);
1047 
1048 int venus_helper_vb2_start_streaming(struct venus_inst *inst)
1049 {
1050 	struct venus_core *core = inst->core;
1051 	int ret;
1052 
1053 	ret = intbufs_alloc(inst);
1054 	if (ret)
1055 		return ret;
1056 
1057 	ret = session_register_bufs(inst);
1058 	if (ret)
1059 		goto err_bufs_free;
1060 
1061 	load_scale_clocks(core);
1062 
1063 	ret = hfi_session_load_res(inst);
1064 	if (ret)
1065 		goto err_unreg_bufs;
1066 
1067 	ret = hfi_session_start(inst);
1068 	if (ret)
1069 		goto err_unload_res;
1070 
1071 	ret = venus_helper_queue_dpb_bufs(inst);
1072 	if (ret)
1073 		goto err_session_stop;
1074 
1075 	return 0;
1076 
1077 err_session_stop:
1078 	hfi_session_stop(inst);
1079 err_unload_res:
1080 	hfi_session_unload_res(inst);
1081 err_unreg_bufs:
1082 	session_unregister_bufs(inst);
1083 err_bufs_free:
1084 	intbufs_free(inst);
1085 	return ret;
1086 }
1087 EXPORT_SYMBOL_GPL(venus_helper_vb2_start_streaming);
1088 
1089 void venus_helper_m2m_device_run(void *priv)
1090 {
1091 	struct venus_inst *inst = priv;
1092 	struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
1093 	struct v4l2_m2m_buffer *buf, *n;
1094 	int ret;
1095 
1096 	mutex_lock(&inst->lock);
1097 
1098 	v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) {
1099 		ret = session_process_buf(inst, &buf->vb);
1100 		if (ret)
1101 			return_buf_error(inst, &buf->vb);
1102 	}
1103 
1104 	v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
1105 		ret = session_process_buf(inst, &buf->vb);
1106 		if (ret)
1107 			return_buf_error(inst, &buf->vb);
1108 	}
1109 
1110 	mutex_unlock(&inst->lock);
1111 }
1112 EXPORT_SYMBOL_GPL(venus_helper_m2m_device_run);
1113 
1114 void venus_helper_m2m_job_abort(void *priv)
1115 {
1116 	struct venus_inst *inst = priv;
1117 
1118 	v4l2_m2m_job_finish(inst->m2m_dev, inst->m2m_ctx);
1119 }
1120 EXPORT_SYMBOL_GPL(venus_helper_m2m_job_abort);
1121 
1122 void venus_helper_init_instance(struct venus_inst *inst)
1123 {
1124 	if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
1125 		INIT_LIST_HEAD(&inst->delayed_process);
1126 		INIT_WORK(&inst->delayed_process_work,
1127 			  delayed_process_buf_func);
1128 	}
1129 }
1130 EXPORT_SYMBOL_GPL(venus_helper_init_instance);
1131 
1132 static bool find_fmt_from_caps(struct venus_caps *caps, u32 buftype, u32 fmt)
1133 {
1134 	unsigned int i;
1135 
1136 	for (i = 0; i < caps->num_fmts; i++) {
1137 		if (caps->fmts[i].buftype == buftype &&
1138 		    caps->fmts[i].fmt == fmt)
1139 			return true;
1140 	}
1141 
1142 	return false;
1143 }
1144 
1145 int venus_helper_get_out_fmts(struct venus_inst *inst, u32 v4l2_fmt,
1146 			      u32 *out_fmt, u32 *out2_fmt, bool ubwc)
1147 {
1148 	struct venus_core *core = inst->core;
1149 	struct venus_caps *caps;
1150 	u32 ubwc_fmt, fmt = to_hfi_raw_fmt(v4l2_fmt);
1151 	bool found, found_ubwc;
1152 
1153 	*out_fmt = *out2_fmt = 0;
1154 
1155 	if (!fmt)
1156 		return -EINVAL;
1157 
1158 	caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
1159 	if (!caps)
1160 		return -EINVAL;
1161 
1162 	if (ubwc) {
1163 		ubwc_fmt = fmt | HFI_COLOR_FORMAT_UBWC_BASE;
1164 		found_ubwc = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT,
1165 						ubwc_fmt);
1166 		found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
1167 
1168 		if (found_ubwc && found) {
1169 			*out_fmt = ubwc_fmt;
1170 			*out2_fmt = fmt;
1171 			return 0;
1172 		}
1173 	}
1174 
1175 	found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, fmt);
1176 	if (found) {
1177 		*out_fmt = fmt;
1178 		*out2_fmt = 0;
1179 		return 0;
1180 	}
1181 
1182 	found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
1183 	if (found) {
1184 		*out_fmt = 0;
1185 		*out2_fmt = fmt;
1186 		return 0;
1187 	}
1188 
1189 	return -EINVAL;
1190 }
1191 EXPORT_SYMBOL_GPL(venus_helper_get_out_fmts);
1192 
1193 int venus_helper_power_enable(struct venus_core *core, u32 session_type,
1194 			      bool enable)
1195 {
1196 	void __iomem *ctrl, *stat;
1197 	u32 val;
1198 	int ret;
1199 
1200 	if (!IS_V3(core) && !IS_V4(core))
1201 		return 0;
1202 
1203 	if (IS_V3(core)) {
1204 		if (session_type == VIDC_SESSION_TYPE_DEC)
1205 			ctrl = core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL;
1206 		else
1207 			ctrl = core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL;
1208 		if (enable)
1209 			writel(0, ctrl);
1210 		else
1211 			writel(1, ctrl);
1212 
1213 		return 0;
1214 	}
1215 
1216 	if (session_type == VIDC_SESSION_TYPE_DEC) {
1217 		ctrl = core->base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL;
1218 		stat = core->base + WRAPPER_VCODEC0_MMCC_POWER_STATUS;
1219 	} else {
1220 		ctrl = core->base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL;
1221 		stat = core->base + WRAPPER_VCODEC1_MMCC_POWER_STATUS;
1222 	}
1223 
1224 	if (enable) {
1225 		writel(0, ctrl);
1226 
1227 		ret = readl_poll_timeout(stat, val, val & BIT(1), 1, 100);
1228 		if (ret)
1229 			return ret;
1230 	} else {
1231 		writel(1, ctrl);
1232 
1233 		ret = readl_poll_timeout(stat, val, !(val & BIT(1)), 1, 100);
1234 		if (ret)
1235 			return ret;
1236 	}
1237 
1238 	return 0;
1239 }
1240 EXPORT_SYMBOL_GPL(venus_helper_power_enable);
1241