xref: /linux/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022 MediaTek Inc.
4  * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
5  */
6 
7 #include <linux/mailbox_controller.h>
8 #include <linux/platform_device.h>
9 #include "mtk-mdp3-cfg.h"
10 #include "mtk-mdp3-cmdq.h"
11 #include "mtk-mdp3-comp.h"
12 #include "mtk-mdp3-core.h"
13 #include "mtk-mdp3-m2m.h"
14 #include "mtk-img-ipi.h"
15 
16 #define MDP_PATH_MAX_COMPS	IMG_MAX_COMPONENTS
17 
18 struct mdp_path {
19 	struct mdp_dev		*mdp_dev;
20 	struct mdp_comp_ctx	comps[MDP_PATH_MAX_COMPS];
21 	u32			num_comps;
22 	const struct img_config	*config;
23 	const struct img_ipi_frameparam *param;
24 	const struct v4l2_rect	*composes[IMG_MAX_HW_OUTPUTS];
25 	struct v4l2_rect	bounds[IMG_MAX_HW_OUTPUTS];
26 };
27 
28 #define has_op(ctx, op) \
29 	((ctx)->comp->ops && (ctx)->comp->ops->op)
30  #define call_op(ctx, op, ...) \
31 	(has_op(ctx, op) ? (ctx)->comp->ops->op(ctx, ##__VA_ARGS__) : 0)
32 
33 static bool is_output_disabled(int p_id, const struct img_compparam *param, u32 count)
34 {
35 	u32 num = 0;
36 	bool dis_output = false;
37 	bool dis_tile = false;
38 
39 	if (CFG_CHECK(MT8183, p_id)) {
40 		num = CFG_COMP(MT8183, param, num_subfrms);
41 		dis_output = CFG_COMP(MT8183, param, frame.output_disable);
42 		dis_tile = CFG_COMP(MT8183, param, frame.output_disable);
43 	} else if (CFG_CHECK(MT8195, p_id)) {
44 		num = CFG_COMP(MT8195, param, num_subfrms);
45 		dis_output = CFG_COMP(MT8195, param, frame.output_disable);
46 		dis_tile = CFG_COMP(MT8195, param, frame.output_disable);
47 	}
48 
49 	return (count < num) ? (dis_output || dis_tile) : true;
50 }
51 
52 static struct mtk_mutex *__get_mutex(const struct mdp_dev *mdp_dev,
53 				     const struct mdp_pipe_info *p)
54 {
55 	return mdp_dev->mm_subsys[p->sub_id].mdp_mutex[p->mutex_id];
56 }
57 
58 static u8 __get_pp_num(enum mdp_stream_type type)
59 {
60 	switch (type) {
61 	case MDP_STREAM_TYPE_DUAL_BITBLT:
62 		return MDP_PP_USED_2;
63 	default:
64 		return MDP_PP_USED_1;
65 	}
66 }
67 
68 static enum mdp_pipe_id __get_pipe(const struct mdp_dev *mdp_dev,
69 				   enum mtk_mdp_comp_id id)
70 {
71 	enum mdp_pipe_id pipe_id;
72 
73 	switch (id) {
74 	case MDP_COMP_RDMA0:
75 		pipe_id = MDP_PIPE_RDMA0;
76 		break;
77 	case MDP_COMP_ISP_IMGI:
78 		pipe_id = MDP_PIPE_IMGI;
79 		break;
80 	case MDP_COMP_WPEI:
81 		pipe_id = MDP_PIPE_WPEI;
82 		break;
83 	case MDP_COMP_WPEI2:
84 		pipe_id = MDP_PIPE_WPEI2;
85 		break;
86 	case MDP_COMP_RDMA1:
87 		pipe_id = MDP_PIPE_RDMA1;
88 		break;
89 	case MDP_COMP_RDMA2:
90 		pipe_id = MDP_PIPE_RDMA2;
91 		break;
92 	case MDP_COMP_RDMA3:
93 		pipe_id = MDP_PIPE_RDMA3;
94 		break;
95 	default:
96 		/* Avoid exceptions when operating MUTEX */
97 		pipe_id = MDP_PIPE_RDMA0;
98 		dev_err(&mdp_dev->pdev->dev, "Unknown pipeline id %d", id);
99 		break;
100 	}
101 
102 	return pipe_id;
103 }
104 
105 static struct img_config *__get_config_offset(struct mdp_dev *mdp,
106 					      struct mdp_cmdq_param *param,
107 					      u8 pp_idx)
108 {
109 	const int p_id = mdp->mdp_data->mdp_plat_id;
110 	struct device *dev = &mdp->pdev->dev;
111 	void *cfg_c, *cfg_n;
112 	long bound = mdp->vpu.config_size;
113 
114 	if (pp_idx >= mdp->mdp_data->pp_used)
115 		goto err_param;
116 
117 	if (CFG_CHECK(MT8183, p_id))
118 		cfg_c = CFG_OFST(MT8183, param->config, pp_idx);
119 	else if (CFG_CHECK(MT8195, p_id))
120 		cfg_c = CFG_OFST(MT8195, param->config, pp_idx);
121 	else
122 		goto err_param;
123 
124 	if (CFG_CHECK(MT8183, p_id))
125 		cfg_n = CFG_OFST(MT8183, param->config, pp_idx + 1);
126 	else if (CFG_CHECK(MT8195, p_id))
127 		cfg_n = CFG_OFST(MT8195, param->config, pp_idx + 1);
128 	else
129 		goto err_param;
130 
131 	if ((long)cfg_n - (long)mdp->vpu.config > bound) {
132 		dev_err(dev, "config offset %ld OOB %ld\n", (long)cfg_n, bound);
133 		cfg_c = ERR_PTR(-EFAULT);
134 	}
135 
136 	return (struct img_config *)cfg_c;
137 
138 err_param:
139 	cfg_c = ERR_PTR(-EINVAL);
140 	return (struct img_config *)cfg_c;
141 }
142 
143 static int mdp_path_subfrm_require(const struct mdp_path *path,
144 				   struct mdp_cmdq_cmd *cmd,
145 				   struct mdp_pipe_info *p, u32 count)
146 {
147 	const int p_id = path->mdp_dev->mdp_data->mdp_plat_id;
148 	const struct mdp_comp_ctx *ctx;
149 	const struct mtk_mdp_driver_data *data = path->mdp_dev->mdp_data;
150 	struct mtk_mutex *mutex;
151 	int id, index;
152 	u32 num_comp = 0;
153 
154 	if (CFG_CHECK(MT8183, p_id))
155 		num_comp = CFG_GET(MT8183, path->config, num_components);
156 	else if (CFG_CHECK(MT8195, p_id))
157 		num_comp = CFG_GET(MT8195, path->config, num_components);
158 
159 	/* Decide which mutex to use based on the current pipeline */
160 	index = __get_pipe(path->mdp_dev, path->comps[0].comp->public_id);
161 	memcpy(p, &data->pipe_info[index], sizeof(struct mdp_pipe_info));
162 	mutex = __get_mutex(path->mdp_dev, p);
163 
164 	/* Set mutex mod */
165 	for (index = 0; index < num_comp; index++) {
166 		s32 inner_id = MDP_COMP_NONE;
167 		const u32 *mutex_idx;
168 		const struct mdp_comp_blend *b;
169 
170 		if (CFG_CHECK(MT8183, p_id))
171 			inner_id = CFG_GET(MT8183, path->config, components[index].type);
172 		else if (CFG_CHECK(MT8195, p_id))
173 			inner_id = CFG_GET(MT8195, path->config, components[index].type);
174 
175 		if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
176 			continue;
177 
178 		ctx = &path->comps[index];
179 		if (is_output_disabled(p_id, ctx->param, count))
180 			continue;
181 
182 		mutex_idx = data->mdp_mutex_table_idx;
183 		id = ctx->comp->public_id;
184 		mtk_mutex_write_mod(mutex, mutex_idx[id], false);
185 
186 		b = &data->comp_data[id].blend;
187 		if (b && b->aid_mod)
188 			mtk_mutex_write_mod(mutex, mutex_idx[b->b_id], false);
189 	}
190 
191 	mtk_mutex_write_sof(mutex, MUTEX_SOF_IDX_SINGLE_MODE);
192 
193 	return 0;
194 }
195 
196 static int mdp_path_subfrm_run(const struct mdp_path *path,
197 			       struct mdp_cmdq_cmd *cmd,
198 			       struct mdp_pipe_info *p, u32 count)
199 {
200 	const int p_id = path->mdp_dev->mdp_data->mdp_plat_id;
201 	const struct mdp_comp_ctx *ctx;
202 	struct device *dev = &path->mdp_dev->pdev->dev;
203 	struct mtk_mutex *mutex;
204 	int index;
205 	u32 num_comp = 0;
206 	s32 event;
207 	s32 inner_id = MDP_COMP_NONE;
208 
209 	if (-1 == p->mutex_id) {
210 		dev_err(dev, "Incorrect mutex id");
211 		return -EINVAL;
212 	}
213 
214 	if (CFG_CHECK(MT8183, p_id))
215 		num_comp = CFG_GET(MT8183, path->config, num_components);
216 	else if (CFG_CHECK(MT8195, p_id))
217 		num_comp = CFG_GET(MT8195, path->config, num_components);
218 
219 	/* Wait WROT SRAM shared to DISP RDMA */
220 	/* Clear SOF event for each engine */
221 	for (index = 0; index < num_comp; index++) {
222 		if (CFG_CHECK(MT8183, p_id))
223 			inner_id = CFG_GET(MT8183, path->config, components[index].type);
224 		else if (CFG_CHECK(MT8195, p_id))
225 			inner_id = CFG_GET(MT8195, path->config, components[index].type);
226 
227 		if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
228 			continue;
229 		ctx = &path->comps[index];
230 		if (is_output_disabled(p_id, ctx->param, count))
231 			continue;
232 		event = ctx->comp->gce_event[MDP_GCE_EVENT_SOF];
233 		if (event != MDP_GCE_NO_EVENT)
234 			MM_REG_CLEAR(cmd, event);
235 	}
236 
237 	/* Enable the mutex */
238 	mutex = __get_mutex(path->mdp_dev, p);
239 	mtk_mutex_enable_by_cmdq(mutex, (void *)&cmd->pkt);
240 
241 	/* Wait SOF events and clear mutex modules (optional) */
242 	for (index = 0; index < num_comp; index++) {
243 		if (CFG_CHECK(MT8183, p_id))
244 			inner_id = CFG_GET(MT8183, path->config, components[index].type);
245 		else if (CFG_CHECK(MT8195, p_id))
246 			inner_id = CFG_GET(MT8195, path->config, components[index].type);
247 
248 		if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
249 			continue;
250 		ctx = &path->comps[index];
251 		if (is_output_disabled(p_id, ctx->param, count))
252 			continue;
253 		event = ctx->comp->gce_event[MDP_GCE_EVENT_SOF];
254 		if (event != MDP_GCE_NO_EVENT)
255 			MM_REG_WAIT(cmd, event);
256 	}
257 
258 	return 0;
259 }
260 
261 static int mdp_path_ctx_init(struct mdp_dev *mdp, struct mdp_path *path)
262 {
263 	const int p_id = mdp->mdp_data->mdp_plat_id;
264 	void *param = NULL;
265 	int index, ret;
266 	u32 num_comp = 0;
267 
268 	if (CFG_CHECK(MT8183, p_id))
269 		num_comp = CFG_GET(MT8183, path->config, num_components);
270 	else if (CFG_CHECK(MT8195, p_id))
271 		num_comp = CFG_GET(MT8195, path->config, num_components);
272 
273 	if (num_comp < 1)
274 		return -EINVAL;
275 
276 	for (index = 0; index < num_comp; index++) {
277 		s32 inner_id = MDP_COMP_NONE;
278 
279 		if (CFG_CHECK(MT8183, p_id))
280 			inner_id = CFG_GET(MT8183, path->config, components[index].type);
281 		else if (CFG_CHECK(MT8195, p_id))
282 			inner_id = CFG_GET(MT8195, path->config, components[index].type);
283 
284 		if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
285 			continue;
286 		if (CFG_CHECK(MT8183, p_id))
287 			param = (void *)CFG_ADDR(MT8183, path->config, components[index]);
288 		else if (CFG_CHECK(MT8195, p_id))
289 			param = (void *)CFG_ADDR(MT8195, path->config, components[index]);
290 		ret = mdp_comp_ctx_config(mdp, &path->comps[index],
291 					  param, path->param);
292 		if (ret)
293 			return ret;
294 	}
295 
296 	return 0;
297 }
298 
299 static int mdp_path_config_subfrm(struct mdp_cmdq_cmd *cmd,
300 				  struct mdp_path *path, u32 count)
301 {
302 	const int p_id = path->mdp_dev->mdp_data->mdp_plat_id;
303 	const struct img_mmsys_ctrl *ctrl = NULL;
304 	const struct img_mux *set;
305 	struct mdp_comp_ctx *ctx;
306 	struct mdp_pipe_info pipe;
307 	int index, ret;
308 	u32 num_comp = 0;
309 	s32 inner_id = MDP_COMP_NONE;
310 
311 	if (CFG_CHECK(MT8183, p_id))
312 		num_comp = CFG_GET(MT8183, path->config, num_components);
313 	else if (CFG_CHECK(MT8195, p_id))
314 		num_comp = CFG_GET(MT8195, path->config, num_components);
315 
316 	if (CFG_CHECK(MT8183, p_id))
317 		ctrl = CFG_ADDR(MT8183, path->config, ctrls[count]);
318 	else if (CFG_CHECK(MT8195, p_id))
319 		ctrl = CFG_ADDR(MT8195, path->config, ctrls[count]);
320 
321 	/* Acquire components */
322 	ret = mdp_path_subfrm_require(path, cmd, &pipe, count);
323 	if (ret)
324 		return ret;
325 	/* Enable mux settings */
326 	for (index = 0; index < ctrl->num_sets; index++) {
327 		set = &ctrl->sets[index];
328 		cmdq_pkt_write_mask(&cmd->pkt, set->subsys_id, set->reg,
329 				    set->value, 0xFFFFFFFF);
330 	}
331 	/* Config sub-frame information */
332 	for (index = (num_comp - 1); index >= 0; index--) {
333 		if (CFG_CHECK(MT8183, p_id))
334 			inner_id = CFG_GET(MT8183, path->config, components[index].type);
335 		else if (CFG_CHECK(MT8195, p_id))
336 			inner_id = CFG_GET(MT8195, path->config, components[index].type);
337 
338 		if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
339 			continue;
340 		ctx = &path->comps[index];
341 		if (is_output_disabled(p_id, ctx->param, count))
342 			continue;
343 		ret = call_op(ctx, config_subfrm, cmd, count);
344 		if (ret)
345 			return ret;
346 	}
347 	/* Run components */
348 	ret = mdp_path_subfrm_run(path, cmd, &pipe, count);
349 	if (ret)
350 		return ret;
351 	/* Wait components done */
352 	for (index = 0; index < num_comp; index++) {
353 		if (CFG_CHECK(MT8183, p_id))
354 			inner_id = CFG_GET(MT8183, path->config, components[index].type);
355 		else if (CFG_CHECK(MT8195, p_id))
356 			inner_id = CFG_GET(MT8195, path->config, components[index].type);
357 
358 		if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
359 			continue;
360 		ctx = &path->comps[index];
361 		if (is_output_disabled(p_id, ctx->param, count))
362 			continue;
363 		ret = call_op(ctx, wait_comp_event, cmd);
364 		if (ret)
365 			return ret;
366 	}
367 	/* Advance to the next sub-frame */
368 	for (index = 0; index < num_comp; index++) {
369 		if (CFG_CHECK(MT8183, p_id))
370 			inner_id = CFG_GET(MT8183, path->config, components[index].type);
371 		else if (CFG_CHECK(MT8195, p_id))
372 			inner_id = CFG_GET(MT8195, path->config, components[index].type);
373 
374 		if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
375 			continue;
376 		ctx = &path->comps[index];
377 		ret = call_op(ctx, advance_subfrm, cmd, count);
378 		if (ret)
379 			return ret;
380 	}
381 	/* Disable mux settings */
382 	for (index = 0; index < ctrl->num_sets; index++) {
383 		set = &ctrl->sets[index];
384 		cmdq_pkt_write_mask(&cmd->pkt, set->subsys_id, set->reg,
385 				    0, 0xFFFFFFFF);
386 	}
387 
388 	return 0;
389 }
390 
391 static int mdp_path_config(struct mdp_dev *mdp, struct mdp_cmdq_cmd *cmd,
392 			   struct mdp_path *path)
393 {
394 	const int p_id = mdp->mdp_data->mdp_plat_id;
395 	struct mdp_comp_ctx *ctx;
396 	int index, count, ret;
397 	u32 num_comp = 0;
398 	u32 num_sub = 0;
399 	s32 inner_id = MDP_COMP_NONE;
400 
401 	if (CFG_CHECK(MT8183, p_id))
402 		num_comp = CFG_GET(MT8183, path->config, num_components);
403 	else if (CFG_CHECK(MT8195, p_id))
404 		num_comp = CFG_GET(MT8195, path->config, num_components);
405 
406 	if (CFG_CHECK(MT8183, p_id))
407 		num_sub = CFG_GET(MT8183, path->config, num_subfrms);
408 	else if (CFG_CHECK(MT8195, p_id))
409 		num_sub = CFG_GET(MT8195, path->config, num_subfrms);
410 
411 	/* Config path frame */
412 	/* Reset components */
413 	for (index = 0; index < num_comp; index++) {
414 		if (CFG_CHECK(MT8183, p_id))
415 			inner_id = CFG_GET(MT8183, path->config, components[index].type);
416 		else if (CFG_CHECK(MT8195, p_id))
417 			inner_id = CFG_GET(MT8195, path->config, components[index].type);
418 
419 		if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
420 			continue;
421 		ctx = &path->comps[index];
422 		ret = call_op(ctx, init_comp, cmd);
423 		if (ret)
424 			return ret;
425 	}
426 	/* Config frame mode */
427 	for (index = 0; index < num_comp; index++) {
428 		const struct v4l2_rect *compose;
429 		u32 out = 0;
430 
431 		ctx = &path->comps[index];
432 		if (CFG_CHECK(MT8183, p_id))
433 			inner_id = CFG_GET(MT8183, path->config, components[index].type);
434 		else if (CFG_CHECK(MT8195, p_id))
435 			inner_id = CFG_GET(MT8195, path->config, components[index].type);
436 
437 		if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
438 			continue;
439 
440 		if (CFG_CHECK(MT8183, p_id))
441 			out = CFG_COMP(MT8183, ctx->param, outputs[0]);
442 		else if (CFG_CHECK(MT8195, p_id))
443 			out = CFG_COMP(MT8195, ctx->param, outputs[0]);
444 
445 		compose = path->composes[out];
446 		ret = call_op(ctx, config_frame, cmd, compose);
447 		if (ret)
448 			return ret;
449 	}
450 
451 	/* Config path sub-frames */
452 	for (count = 0; count < num_sub; count++) {
453 		ret = mdp_path_config_subfrm(cmd, path, count);
454 		if (ret)
455 			return ret;
456 	}
457 	/* Post processing information */
458 	for (index = 0; index < num_comp; index++) {
459 		if (CFG_CHECK(MT8183, p_id))
460 			inner_id = CFG_GET(MT8183, path->config, components[index].type);
461 		else if (CFG_CHECK(MT8195, p_id))
462 			inner_id = CFG_GET(MT8195, path->config, components[index].type);
463 
464 		if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id))
465 			continue;
466 		ctx = &path->comps[index];
467 		ret = call_op(ctx, post_process, cmd);
468 		if (ret)
469 			return ret;
470 	}
471 	return 0;
472 }
473 
474 static int mdp_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
475 			       size_t size)
476 {
477 	struct device *dev;
478 	dma_addr_t dma_addr;
479 
480 	pkt->va_base = kzalloc(size, GFP_KERNEL);
481 	if (!pkt->va_base)
482 		return -ENOMEM;
483 
484 	pkt->buf_size = size;
485 	pkt->cl = (void *)client;
486 
487 	dev = client->chan->mbox->dev;
488 	dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
489 				  DMA_TO_DEVICE);
490 	if (dma_mapping_error(dev, dma_addr)) {
491 		dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
492 		kfree(pkt->va_base);
493 		return -ENOMEM;
494 	}
495 
496 	pkt->pa_base = dma_addr;
497 
498 	return 0;
499 }
500 
501 static void mdp_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
502 {
503 	struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
504 
505 	dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
506 			 DMA_TO_DEVICE);
507 	kfree(pkt->va_base);
508 	pkt->va_base = NULL;
509 }
510 
511 static void mdp_auto_release_work(struct work_struct *work)
512 {
513 	struct mdp_cmdq_cmd *cmd;
514 	struct mdp_dev *mdp;
515 	struct mtk_mutex *mutex;
516 	enum mdp_pipe_id pipe_id;
517 
518 	cmd = container_of(work, struct mdp_cmdq_cmd, auto_release_work);
519 	mdp = cmd->mdp;
520 
521 	pipe_id = __get_pipe(mdp, cmd->comps[0].public_id);
522 	mutex = __get_mutex(mdp, &mdp->mdp_data->pipe_info[pipe_id]);
523 	mtk_mutex_unprepare(mutex);
524 	mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps,
525 			    cmd->num_comps);
526 
527 	if (refcount_dec_and_test(&mdp->job_count)) {
528 		if (cmd->mdp_ctx)
529 			mdp_m2m_job_finish(cmd->mdp_ctx);
530 
531 		if (cmd->user_cmdq_cb) {
532 			struct cmdq_cb_data user_cb_data;
533 
534 			user_cb_data.sta = cmd->data->sta;
535 			user_cb_data.pkt = cmd->data->pkt;
536 			cmd->user_cmdq_cb(user_cb_data);
537 		}
538 		wake_up(&mdp->callback_wq);
539 	}
540 
541 	mdp_cmdq_pkt_destroy(&cmd->pkt);
542 	kfree(cmd->comps);
543 	cmd->comps = NULL;
544 	kfree(cmd);
545 	cmd = NULL;
546 }
547 
548 static void mdp_handle_cmdq_callback(struct mbox_client *cl, void *mssg)
549 {
550 	struct mdp_cmdq_cmd *cmd;
551 	struct cmdq_cb_data *data;
552 	struct mdp_dev *mdp;
553 	struct device *dev;
554 	enum mdp_pipe_id pipe_id;
555 
556 	if (!mssg) {
557 		pr_info("%s:no callback data\n", __func__);
558 		return;
559 	}
560 
561 	data = (struct cmdq_cb_data *)mssg;
562 	cmd = container_of(data->pkt, struct mdp_cmdq_cmd, pkt);
563 	cmd->data = data;
564 	mdp = cmd->mdp;
565 	dev = &mdp->pdev->dev;
566 
567 	INIT_WORK(&cmd->auto_release_work, mdp_auto_release_work);
568 	if (!queue_work(mdp->clock_wq, &cmd->auto_release_work)) {
569 		struct mtk_mutex *mutex;
570 
571 		dev_err(dev, "%s:queue_work fail!\n", __func__);
572 		pipe_id = __get_pipe(mdp, cmd->comps[0].public_id);
573 		mutex = __get_mutex(mdp, &mdp->mdp_data->pipe_info[pipe_id]);
574 		mtk_mutex_unprepare(mutex);
575 		mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps,
576 				    cmd->num_comps);
577 
578 		if (refcount_dec_and_test(&mdp->job_count))
579 			wake_up(&mdp->callback_wq);
580 
581 		mdp_cmdq_pkt_destroy(&cmd->pkt);
582 		kfree(cmd->comps);
583 		cmd->comps = NULL;
584 		kfree(cmd);
585 		cmd = NULL;
586 	}
587 }
588 
589 static struct mdp_cmdq_cmd *mdp_cmdq_prepare(struct mdp_dev *mdp,
590 					     struct mdp_cmdq_param *param,
591 					     u8 pp_idx)
592 {
593 	struct mdp_path *path = NULL;
594 	struct mdp_cmdq_cmd *cmd = NULL;
595 	struct mdp_comp *comps = NULL;
596 	struct device *dev = &mdp->pdev->dev;
597 	const int p_id = mdp->mdp_data->mdp_plat_id;
598 	struct img_config *config;
599 	struct mtk_mutex *mutex = NULL;
600 	enum mdp_pipe_id pipe_id;
601 	int i, ret = -ECANCELED;
602 	u32 num_comp;
603 
604 	config = __get_config_offset(mdp, param, pp_idx);
605 	if (IS_ERR(config)) {
606 		ret = PTR_ERR(config);
607 		goto err_uninit;
608 	}
609 
610 	if (CFG_CHECK(MT8183, p_id))
611 		num_comp = CFG_GET(MT8183, config, num_components);
612 	else if (CFG_CHECK(MT8195, p_id))
613 		num_comp = CFG_GET(MT8195, config, num_components);
614 	else
615 		goto err_uninit;
616 
617 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
618 	if (!cmd) {
619 		ret = -ENOMEM;
620 		goto err_uninit;
621 	}
622 
623 	ret = mdp_cmdq_pkt_create(mdp->cmdq_clt[pp_idx], &cmd->pkt, SZ_16K);
624 	if (ret)
625 		goto err_free_cmd;
626 
627 	if (CFG_CHECK(MT8183, p_id)) {
628 		num_comp = CFG_GET(MT8183, param->config, num_components);
629 	} else if (CFG_CHECK(MT8195, p_id)) {
630 		num_comp = CFG_GET(MT8195, param->config, num_components);
631 	} else {
632 		ret = -EINVAL;
633 		goto err_destroy_pkt;
634 	}
635 	comps = kcalloc(num_comp, sizeof(*comps), GFP_KERNEL);
636 	if (!comps) {
637 		ret = -ENOMEM;
638 		goto err_destroy_pkt;
639 	}
640 
641 	path = kzalloc(sizeof(*path), GFP_KERNEL);
642 	if (!path) {
643 		ret = -ENOMEM;
644 		goto err_free_comps;
645 	}
646 
647 	path->mdp_dev = mdp;
648 	path->config = config;
649 	path->param = param->param;
650 	for (i = 0; i < param->param->num_outputs; i++) {
651 		path->bounds[i].left = 0;
652 		path->bounds[i].top = 0;
653 		path->bounds[i].width =
654 			param->param->outputs[i].buffer.format.width;
655 		path->bounds[i].height =
656 			param->param->outputs[i].buffer.format.height;
657 		path->composes[i] = param->composes[i] ?
658 			param->composes[i] : &path->bounds[i];
659 	}
660 	ret = mdp_path_ctx_init(mdp, path);
661 	if (ret) {
662 		dev_err(dev, "mdp_path_ctx_init error %d\n", pp_idx);
663 		goto err_free_path;
664 	}
665 
666 	pipe_id = __get_pipe(mdp, path->comps[0].comp->public_id);
667 	mutex = __get_mutex(mdp, &mdp->mdp_data->pipe_info[pipe_id]);
668 	ret = mtk_mutex_prepare(mutex);
669 	if (ret) {
670 		dev_err(dev, "Fail to enable mutex %d clk\n", pp_idx);
671 		goto err_free_path;
672 	}
673 
674 	ret = mdp_path_config(mdp, cmd, path);
675 	if (ret) {
676 		dev_err(dev, "mdp_path_config error %d\n", pp_idx);
677 		goto err_free_path;
678 	}
679 	cmdq_pkt_finalize(&cmd->pkt);
680 
681 	for (i = 0; i < num_comp; i++) {
682 		s32 inner_id = MDP_COMP_NONE;
683 
684 		if (CFG_CHECK(MT8183, p_id))
685 			inner_id = CFG_GET(MT8183, path->config, components[i].type);
686 		else if (CFG_CHECK(MT8195, p_id))
687 			inner_id = CFG_GET(MT8195, path->config, components[i].type);
688 
689 		if (mdp_cfg_comp_is_dummy(mdp, inner_id))
690 			continue;
691 		memcpy(&comps[i], path->comps[i].comp,
692 		       sizeof(struct mdp_comp));
693 	}
694 
695 	mdp->cmdq_clt[pp_idx]->client.rx_callback = mdp_handle_cmdq_callback;
696 	cmd->mdp = mdp;
697 	cmd->user_cmdq_cb = param->cmdq_cb;
698 	cmd->user_cb_data = param->cb_data;
699 	cmd->comps = comps;
700 	cmd->num_comps = num_comp;
701 	cmd->mdp_ctx = param->mdp_ctx;
702 
703 	kfree(path);
704 	return cmd;
705 
706 err_free_path:
707 	if (mutex)
708 		mtk_mutex_unprepare(mutex);
709 	kfree(path);
710 err_free_comps:
711 	kfree(comps);
712 err_destroy_pkt:
713 	mdp_cmdq_pkt_destroy(&cmd->pkt);
714 err_free_cmd:
715 	kfree(cmd);
716 err_uninit:
717 	return ERR_PTR(ret);
718 }
719 
720 int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
721 {
722 	struct mdp_cmdq_cmd *cmd[MDP_PP_MAX] = {NULL};
723 	struct device *dev = &mdp->pdev->dev;
724 	int i, ret;
725 	u8 pp_used = __get_pp_num(param->param->type);
726 
727 	refcount_set(&mdp->job_count, pp_used);
728 	if (atomic_read(&mdp->suspended)) {
729 		refcount_set(&mdp->job_count, 0);
730 		return -ECANCELED;
731 	}
732 
733 	for (i = 0; i < pp_used; i++) {
734 		cmd[i] = mdp_cmdq_prepare(mdp, param, i);
735 		if (IS_ERR_OR_NULL(cmd[i])) {
736 			ret = PTR_ERR(cmd[i]);
737 			goto err_cancel_job;
738 		}
739 	}
740 
741 	for (i = 0; i < pp_used; i++) {
742 		ret = mdp_comp_clocks_on(&mdp->pdev->dev, cmd[i]->comps, cmd[i]->num_comps);
743 		if (ret)
744 			goto err_clock_off;
745 	}
746 
747 	for (i = 0; i < pp_used; i++) {
748 		dma_sync_single_for_device(mdp->cmdq_clt[i]->chan->mbox->dev,
749 					   cmd[i]->pkt.pa_base, cmd[i]->pkt.cmd_buf_size,
750 					   DMA_TO_DEVICE);
751 
752 		ret = mbox_send_message(mdp->cmdq_clt[i]->chan, &cmd[i]->pkt);
753 		if (ret < 0) {
754 			dev_err(dev, "mbox send message fail %d!\n", ret);
755 			i = pp_used;
756 			goto err_clock_off;
757 		}
758 		mbox_client_txdone(mdp->cmdq_clt[i]->chan, 0);
759 	}
760 	return 0;
761 
762 err_clock_off:
763 	while (--i >= 0)
764 		mdp_comp_clocks_off(&mdp->pdev->dev, cmd[i]->comps,
765 				    cmd[i]->num_comps);
766 err_cancel_job:
767 	refcount_set(&mdp->job_count, 0);
768 
769 	return ret;
770 }
771 EXPORT_SYMBOL_GPL(mdp_cmdq_send);
772