xref: /linux/drivers/gpu/drm/mediatek/mtk_crtc.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015 MediaTek Inc.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/mailbox_controller.h>
9 #include <linux/of.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/soc/mediatek/mtk-cmdq.h>
12 #include <linux/soc/mediatek/mtk-mmsys.h>
13 #include <linux/soc/mediatek/mtk-mutex.h>
14 
15 #include <asm/barrier.h>
16 
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_print.h>
20 #include <drm/drm_probe_helper.h>
21 #include <drm/drm_vblank.h>
22 
23 #include "mtk_crtc.h"
24 #include "mtk_ddp_comp.h"
25 #include "mtk_drm_drv.h"
26 #include "mtk_plane.h"
27 
28 /*
29  * struct mtk_crtc - MediaTek specific crtc structure.
30  * @base: crtc object.
31  * @enabled: records whether crtc_enable succeeded
32  * @planes: array of 4 drm_plane structures, one for each overlay plane
33  * @pending_planes: whether any plane has pending changes to be applied
34  * @mmsys_dev: pointer to the mmsys device for configuration registers
35  * @mutex: handle to one of the ten disp_mutex streams
36  * @ddp_comp_nr: number of components in ddp_comp
37  * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc
38  *
39  * TODO: Needs update: this header is missing a bunch of member descriptions.
40  */
41 struct mtk_crtc {
42 	struct drm_crtc			base;
43 	bool				enabled;
44 
45 	bool				pending_needs_vblank;
46 	struct drm_pending_vblank_event	*event;
47 
48 	struct drm_plane		*planes;
49 	unsigned int			layer_nr;
50 	bool				pending_planes;
51 	bool				pending_async_planes;
52 
53 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
54 	struct cmdq_client		cmdq_client;
55 	struct cmdq_pkt			cmdq_handle;
56 	u32				cmdq_event;
57 	u32				cmdq_vblank_cnt;
58 	wait_queue_head_t		cb_blocking_queue;
59 #endif
60 
61 	struct device			*mmsys_dev;
62 	struct device			*dma_dev;
63 	struct mtk_mutex		*mutex;
64 	unsigned int			ddp_comp_nr;
65 	struct mtk_ddp_comp		**ddp_comp;
66 	unsigned int			num_conn_routes;
67 	const struct mtk_drm_route	*conn_routes;
68 
69 	/* lock for display hardware access */
70 	struct mutex			hw_lock;
71 	bool				config_updating;
72 	/* lock for config_updating to cmd buffer */
73 	spinlock_t			config_lock;
74 };
75 
76 struct mtk_crtc_state {
77 	struct drm_crtc_state		base;
78 
79 	bool				pending_config;
80 	unsigned int			pending_width;
81 	unsigned int			pending_height;
82 	unsigned int			pending_vrefresh;
83 };
84 
85 static inline struct mtk_crtc *to_mtk_crtc(struct drm_crtc *c)
86 {
87 	return container_of(c, struct mtk_crtc, base);
88 }
89 
90 static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s)
91 {
92 	return container_of(s, struct mtk_crtc_state, base);
93 }
94 
95 static void mtk_crtc_finish_page_flip(struct mtk_crtc *mtk_crtc)
96 {
97 	struct drm_crtc *crtc = &mtk_crtc->base;
98 	unsigned long flags;
99 
100 	if (mtk_crtc->event) {
101 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
102 		drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
103 		drm_crtc_vblank_put(crtc);
104 		mtk_crtc->event = NULL;
105 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
106 	}
107 }
108 
109 static void mtk_drm_finish_page_flip(struct mtk_crtc *mtk_crtc)
110 {
111 	unsigned long flags;
112 
113 	drm_crtc_handle_vblank(&mtk_crtc->base);
114 
115 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
116 	if (mtk_crtc->cmdq_client.chan)
117 		return;
118 #endif
119 
120 	spin_lock_irqsave(&mtk_crtc->config_lock, flags);
121 	if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
122 		mtk_crtc_finish_page_flip(mtk_crtc);
123 		mtk_crtc->pending_needs_vblank = false;
124 	}
125 	spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
126 }
127 
128 static void mtk_crtc_destroy(struct drm_crtc *crtc)
129 {
130 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
131 	int i;
132 
133 	mtk_mutex_put(mtk_crtc->mutex);
134 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
135 	if (mtk_crtc->cmdq_client.chan) {
136 		cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle);
137 		mbox_free_channel(mtk_crtc->cmdq_client.chan);
138 		mtk_crtc->cmdq_client.chan = NULL;
139 	}
140 #endif
141 
142 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
143 		struct mtk_ddp_comp *comp;
144 
145 		comp = mtk_crtc->ddp_comp[i];
146 		mtk_ddp_comp_unregister_vblank_cb(comp);
147 	}
148 
149 	drm_crtc_cleanup(crtc);
150 }
151 
152 static void mtk_crtc_reset(struct drm_crtc *crtc)
153 {
154 	struct mtk_crtc_state *state;
155 
156 	if (crtc->state)
157 		__drm_atomic_helper_crtc_destroy_state(crtc->state);
158 
159 	kfree(to_mtk_crtc_state(crtc->state));
160 	crtc->state = NULL;
161 
162 	state = kzalloc_obj(*state);
163 	if (state)
164 		__drm_atomic_helper_crtc_reset(crtc, &state->base);
165 }
166 
167 static struct drm_crtc_state *mtk_crtc_duplicate_state(struct drm_crtc *crtc)
168 {
169 	struct mtk_crtc_state *state;
170 
171 	state = kmalloc_obj(*state);
172 	if (!state)
173 		return NULL;
174 
175 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
176 
177 	WARN_ON(state->base.crtc != crtc);
178 	state->base.crtc = crtc;
179 	state->pending_config = false;
180 
181 	return &state->base;
182 }
183 
184 static void mtk_crtc_destroy_state(struct drm_crtc *crtc,
185 				   struct drm_crtc_state *state)
186 {
187 	__drm_atomic_helper_crtc_destroy_state(state);
188 	kfree(to_mtk_crtc_state(state));
189 }
190 
191 static enum drm_mode_status
192 mtk_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
193 {
194 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
195 	enum drm_mode_status status = MODE_OK;
196 	int i;
197 
198 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
199 		status = mtk_ddp_comp_mode_valid(mtk_crtc->ddp_comp[i], mode);
200 		if (status != MODE_OK)
201 			break;
202 	}
203 	return status;
204 }
205 
206 static bool mtk_crtc_mode_fixup(struct drm_crtc *crtc,
207 				const struct drm_display_mode *mode,
208 				struct drm_display_mode *adjusted_mode)
209 {
210 	/* Nothing to do here, but this callback is mandatory. */
211 	return true;
212 }
213 
214 static void mtk_crtc_mode_set_nofb(struct drm_crtc *crtc)
215 {
216 	struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
217 
218 	state->pending_width = crtc->mode.hdisplay;
219 	state->pending_height = crtc->mode.vdisplay;
220 	state->pending_vrefresh = drm_mode_vrefresh(&crtc->mode);
221 	wmb();	/* Make sure the above parameters are set before update */
222 	state->pending_config = true;
223 }
224 
225 static int mtk_crtc_ddp_clk_enable(struct mtk_crtc *mtk_crtc)
226 {
227 	struct drm_device *dev = mtk_crtc->base.dev;
228 	int ret;
229 	int i;
230 
231 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
232 		ret = mtk_ddp_comp_clk_enable(mtk_crtc->ddp_comp[i]);
233 		if (ret) {
234 			drm_err(dev, "Failed to enable clock %d: %d\n", i, ret);
235 			goto err;
236 		}
237 	}
238 
239 	return 0;
240 err:
241 	while (--i >= 0)
242 		mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
243 	return ret;
244 }
245 
246 static void mtk_crtc_ddp_clk_disable(struct mtk_crtc *mtk_crtc)
247 {
248 	int i;
249 
250 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
251 		mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
252 }
253 
254 static
255 struct mtk_ddp_comp *mtk_ddp_comp_for_plane(struct drm_crtc *crtc,
256 					    struct drm_plane *plane,
257 					    unsigned int *local_layer)
258 {
259 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
260 	struct mtk_ddp_comp *comp;
261 	int i, count = 0;
262 	unsigned int local_index = plane - mtk_crtc->planes;
263 
264 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
265 		comp = mtk_crtc->ddp_comp[i];
266 		if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) {
267 			*local_layer = local_index - count;
268 			return comp;
269 		}
270 		count += mtk_ddp_comp_layer_nr(comp);
271 	}
272 
273 	WARN(1, "Failed to find component for plane %d\n", plane->index);
274 	return NULL;
275 }
276 
277 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
278 static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
279 {
280 	struct cmdq_cb_data *data = mssg;
281 	struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client);
282 	struct mtk_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_crtc, cmdq_client);
283 	struct mtk_crtc_state *state;
284 	unsigned int i;
285 	unsigned long flags;
286 
287 	/* release GCE HW usage and start autosuspend */
288 	pm_runtime_mark_last_busy(cmdq_cl->chan->mbox->dev);
289 	pm_runtime_put_autosuspend(cmdq_cl->chan->mbox->dev);
290 
291 	if (data->sta < 0)
292 		return;
293 
294 	state = to_mtk_crtc_state(mtk_crtc->base.state);
295 
296 	spin_lock_irqsave(&mtk_crtc->config_lock, flags);
297 	if (mtk_crtc->config_updating)
298 		goto ddp_cmdq_cb_out;
299 
300 	state->pending_config = false;
301 
302 	if (mtk_crtc->pending_planes) {
303 		for (i = 0; i < mtk_crtc->layer_nr; i++) {
304 			struct drm_plane *plane = &mtk_crtc->planes[i];
305 			struct mtk_plane_state *plane_state;
306 
307 			plane_state = to_mtk_plane_state(plane->state);
308 
309 			plane_state->pending.config = false;
310 		}
311 		mtk_crtc->pending_planes = false;
312 	}
313 
314 	if (mtk_crtc->pending_async_planes) {
315 		for (i = 0; i < mtk_crtc->layer_nr; i++) {
316 			struct drm_plane *plane = &mtk_crtc->planes[i];
317 			struct mtk_plane_state *plane_state;
318 
319 			plane_state = to_mtk_plane_state(plane->state);
320 
321 			plane_state->pending.async_config = false;
322 		}
323 		mtk_crtc->pending_async_planes = false;
324 	}
325 
326 ddp_cmdq_cb_out:
327 
328 	if (mtk_crtc->pending_needs_vblank) {
329 		mtk_crtc_finish_page_flip(mtk_crtc);
330 		mtk_crtc->pending_needs_vblank = false;
331 	}
332 
333 	spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
334 
335 	mtk_crtc->cmdq_vblank_cnt = 0;
336 	wake_up(&mtk_crtc->cb_blocking_queue);
337 }
338 #endif
339 
340 static int mtk_crtc_ddp_hw_init(struct mtk_crtc *mtk_crtc)
341 {
342 	struct drm_crtc *crtc = &mtk_crtc->base;
343 	struct drm_connector *connector;
344 	struct drm_encoder *encoder;
345 	struct drm_connector_list_iter conn_iter;
346 	struct drm_device *dev = mtk_crtc->base.dev;
347 	unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC;
348 	int ret;
349 	int i;
350 
351 	if (WARN_ON(!crtc->state))
352 		return -EINVAL;
353 
354 	width = crtc->state->adjusted_mode.hdisplay;
355 	height = crtc->state->adjusted_mode.vdisplay;
356 	vrefresh = drm_mode_vrefresh(&crtc->state->adjusted_mode);
357 
358 	drm_for_each_encoder(encoder, crtc->dev) {
359 		if (encoder->crtc != crtc)
360 			continue;
361 
362 		drm_connector_list_iter_begin(crtc->dev, &conn_iter);
363 		drm_for_each_connector_iter(connector, &conn_iter) {
364 			if (connector->encoder != encoder)
365 				continue;
366 			if (connector->display_info.bpc != 0 &&
367 			    bpc > connector->display_info.bpc)
368 				bpc = connector->display_info.bpc;
369 		}
370 		drm_connector_list_iter_end(&conn_iter);
371 	}
372 
373 	ret = pm_runtime_resume_and_get(crtc->dev->dev);
374 	if (ret < 0) {
375 		drm_err(dev, "Failed to enable power domain: %d\n", ret);
376 		return ret;
377 	}
378 
379 	ret = mtk_mutex_prepare(mtk_crtc->mutex);
380 	if (ret < 0) {
381 		drm_err(dev, "Failed to enable mutex clock: %d\n", ret);
382 		goto err_pm_runtime_put;
383 	}
384 
385 	ret = mtk_crtc_ddp_clk_enable(mtk_crtc);
386 	if (ret < 0) {
387 		drm_err(dev, "Failed to enable component clocks: %d\n", ret);
388 		goto err_mutex_unprepare;
389 	}
390 
391 	for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
392 		if (!mtk_ddp_comp_connect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev,
393 					  mtk_crtc->ddp_comp[i + 1]->id))
394 			mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
395 					      mtk_crtc->ddp_comp[i]->id,
396 					      mtk_crtc->ddp_comp[i + 1]->id);
397 		if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
398 			mtk_mutex_add_comp(mtk_crtc->mutex,
399 					   mtk_crtc->ddp_comp[i]->id);
400 	}
401 	if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
402 		mtk_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
403 	mtk_mutex_enable(mtk_crtc->mutex);
404 
405 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
406 		struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
407 
408 		if (i == 1)
409 			mtk_ddp_comp_bgclr_in_on(comp);
410 
411 		mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL);
412 		mtk_ddp_comp_start(comp);
413 	}
414 
415 	/* Initially configure all planes */
416 	for (i = 0; i < mtk_crtc->layer_nr; i++) {
417 		struct drm_plane *plane = &mtk_crtc->planes[i];
418 		struct mtk_plane_state *plane_state;
419 		struct mtk_ddp_comp *comp;
420 		unsigned int local_layer;
421 
422 		plane_state = to_mtk_plane_state(plane->state);
423 
424 		/* should not enable layer before crtc enabled */
425 		plane_state->pending.enable = false;
426 		comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer);
427 		if (comp)
428 			mtk_ddp_comp_layer_config(comp, local_layer,
429 						  plane_state, NULL);
430 	}
431 
432 	return 0;
433 
434 err_mutex_unprepare:
435 	mtk_mutex_unprepare(mtk_crtc->mutex);
436 err_pm_runtime_put:
437 	pm_runtime_put(crtc->dev->dev);
438 	return ret;
439 }
440 
441 static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc)
442 {
443 	struct drm_device *drm = mtk_crtc->base.dev;
444 	struct drm_crtc *crtc = &mtk_crtc->base;
445 	unsigned long flags;
446 	int i;
447 
448 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
449 		mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
450 		if (i == 1)
451 			mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]);
452 	}
453 
454 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
455 		if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
456 			mtk_mutex_remove_comp(mtk_crtc->mutex,
457 					      mtk_crtc->ddp_comp[i]->id);
458 	mtk_mutex_disable(mtk_crtc->mutex);
459 	for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
460 		if (!mtk_ddp_comp_disconnect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev,
461 					     mtk_crtc->ddp_comp[i + 1]->id))
462 			mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev,
463 						 mtk_crtc->ddp_comp[i]->id,
464 						 mtk_crtc->ddp_comp[i + 1]->id);
465 		if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
466 			mtk_mutex_remove_comp(mtk_crtc->mutex,
467 					      mtk_crtc->ddp_comp[i]->id);
468 	}
469 	if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
470 		mtk_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
471 	mtk_crtc_ddp_clk_disable(mtk_crtc);
472 	mtk_mutex_unprepare(mtk_crtc->mutex);
473 
474 	pm_runtime_put(drm->dev);
475 
476 	if (crtc->state->event && !crtc->state->active) {
477 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
478 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
479 		crtc->state->event = NULL;
480 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
481 	}
482 }
483 
484 static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
485 				struct cmdq_pkt *cmdq_handle)
486 {
487 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
488 	struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
489 	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
490 	unsigned int i;
491 	unsigned int local_layer;
492 
493 	/*
494 	 * TODO: instead of updating the registers here, we should prepare
495 	 * working registers in atomic_commit and let the hardware command
496 	 * queue update module registers on vblank.
497 	 */
498 	if (state->pending_config) {
499 		mtk_ddp_comp_config(comp, state->pending_width,
500 				    state->pending_height,
501 				    state->pending_vrefresh, 0,
502 				    cmdq_handle);
503 
504 		if (!cmdq_handle)
505 			state->pending_config = false;
506 	}
507 
508 	if (mtk_crtc->pending_planes) {
509 		for (i = 0; i < mtk_crtc->layer_nr; i++) {
510 			struct drm_plane *plane = &mtk_crtc->planes[i];
511 			struct mtk_plane_state *plane_state;
512 
513 			plane_state = to_mtk_plane_state(plane->state);
514 
515 			if (!plane_state->pending.config)
516 				continue;
517 
518 			comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer);
519 
520 			if (comp)
521 				mtk_ddp_comp_layer_config(comp, local_layer,
522 							  plane_state,
523 							  cmdq_handle);
524 			if (!cmdq_handle)
525 				plane_state->pending.config = false;
526 		}
527 
528 		if (!cmdq_handle)
529 			mtk_crtc->pending_planes = false;
530 	}
531 
532 	if (mtk_crtc->pending_async_planes) {
533 		for (i = 0; i < mtk_crtc->layer_nr; i++) {
534 			struct drm_plane *plane = &mtk_crtc->planes[i];
535 			struct mtk_plane_state *plane_state;
536 
537 			plane_state = to_mtk_plane_state(plane->state);
538 
539 			if (!plane_state->pending.async_config)
540 				continue;
541 
542 			comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer);
543 
544 			if (comp)
545 				mtk_ddp_comp_layer_config(comp, local_layer,
546 							  plane_state,
547 							  cmdq_handle);
548 			if (!cmdq_handle)
549 				plane_state->pending.async_config = false;
550 		}
551 
552 		if (!cmdq_handle)
553 			mtk_crtc->pending_async_planes = false;
554 	}
555 }
556 
557 static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
558 {
559 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
560 	struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
561 #endif
562 	struct drm_crtc *crtc = &mtk_crtc->base;
563 	struct mtk_drm_private *priv = crtc->dev->dev_private;
564 	unsigned int pending_planes = 0, pending_async_planes = 0;
565 	int i;
566 	unsigned long flags;
567 
568 	mutex_lock(&mtk_crtc->hw_lock);
569 
570 	spin_lock_irqsave(&mtk_crtc->config_lock, flags);
571 	mtk_crtc->config_updating = true;
572 	spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
573 
574 	if (needs_vblank)
575 		mtk_crtc->pending_needs_vblank = true;
576 
577 	for (i = 0; i < mtk_crtc->layer_nr; i++) {
578 		struct drm_plane *plane = &mtk_crtc->planes[i];
579 		struct mtk_plane_state *plane_state;
580 
581 		plane_state = to_mtk_plane_state(plane->state);
582 		if (plane_state->pending.dirty) {
583 			plane_state->pending.config = true;
584 			plane_state->pending.dirty = false;
585 			pending_planes |= BIT(i);
586 		} else if (plane_state->pending.async_dirty) {
587 			plane_state->pending.async_config = true;
588 			plane_state->pending.async_dirty = false;
589 			pending_async_planes |= BIT(i);
590 		}
591 	}
592 	if (pending_planes)
593 		mtk_crtc->pending_planes = true;
594 	if (pending_async_planes)
595 		mtk_crtc->pending_async_planes = true;
596 
597 	if (priv->data->shadow_register) {
598 		mtk_mutex_acquire(mtk_crtc->mutex);
599 		mtk_crtc_ddp_config(crtc, NULL);
600 		mtk_mutex_release(mtk_crtc->mutex);
601 	}
602 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
603 	if (mtk_crtc->cmdq_client.chan) {
604 		mbox_flush(mtk_crtc->cmdq_client.chan, 2000);
605 		cmdq_handle->cmd_buf_size = 0;
606 		cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
607 		cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
608 		mtk_crtc_ddp_config(crtc, cmdq_handle);
609 		cmdq_pkt_eoc(cmdq_handle);
610 		dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev,
611 					   cmdq_handle->pa_base,
612 					   cmdq_handle->cmd_buf_size,
613 					   DMA_TO_DEVICE);
614 		/*
615 		 * CMDQ command should execute in next 3 vblank.
616 		 * One vblank interrupt before send message (occasionally)
617 		 * and one vblank interrupt after cmdq done,
618 		 * so it's timeout after 3 vblank interrupt.
619 		 * If it fail to execute in next 3 vblank, timeout happen.
620 		 */
621 		mtk_crtc->cmdq_vblank_cnt = 3;
622 
623 		spin_lock_irqsave(&mtk_crtc->config_lock, flags);
624 		mtk_crtc->config_updating = false;
625 		spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
626 
627 		if (pm_runtime_resume_and_get(mtk_crtc->cmdq_client.chan->mbox->dev) < 0)
628 			goto update_config_out;
629 
630 		mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
631 		mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
632 		goto update_config_out;
633 	}
634 #endif
635 	spin_lock_irqsave(&mtk_crtc->config_lock, flags);
636 	mtk_crtc->config_updating = false;
637 	spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
638 
639 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
640 update_config_out:
641 #endif
642 	mutex_unlock(&mtk_crtc->hw_lock);
643 }
644 
645 static void mtk_crtc_ddp_irq(void *data)
646 {
647 	struct drm_crtc *crtc = data;
648 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
649 	struct mtk_drm_private *priv = crtc->dev->dev_private;
650 
651 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
652 	struct drm_device *dev = mtk_crtc->base.dev;
653 	if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan)
654 		mtk_crtc_ddp_config(crtc, NULL);
655 	else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
656 		drm_err(dev, "mtk_crtc %d CMDQ execute command timeout!\n",
657 			drm_crtc_index(&mtk_crtc->base));
658 #else
659 	if (!priv->data->shadow_register)
660 		mtk_crtc_ddp_config(crtc, NULL);
661 #endif
662 	mtk_drm_finish_page_flip(mtk_crtc);
663 }
664 
665 static int mtk_crtc_enable_vblank(struct drm_crtc *crtc)
666 {
667 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
668 	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
669 
670 	mtk_ddp_comp_enable_vblank(comp);
671 
672 	return 0;
673 }
674 
675 static void mtk_crtc_disable_vblank(struct drm_crtc *crtc)
676 {
677 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
678 	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
679 
680 	mtk_ddp_comp_disable_vblank(comp);
681 }
682 
683 static void mtk_crtc_update_output(struct drm_crtc *crtc,
684 				   struct drm_atomic_state *state)
685 {
686 	int crtc_index = drm_crtc_index(crtc);
687 	int i;
688 	struct device *dev;
689 	struct drm_crtc_state *crtc_state = state->crtcs[crtc_index].new_state;
690 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
691 	struct mtk_drm_private *priv;
692 	unsigned int encoder_mask = crtc_state->encoder_mask;
693 
694 	if (!crtc_state->connectors_changed)
695 		return;
696 
697 	if (!mtk_crtc->num_conn_routes)
698 		return;
699 
700 	priv = ((struct mtk_drm_private *)crtc->dev->dev_private)->all_drm_private[crtc_index];
701 	dev = priv->dev;
702 
703 	dev_dbg(dev, "connector change:%d, encoder mask:0x%x for crtc:%d\n",
704 		crtc_state->connectors_changed, encoder_mask, crtc_index);
705 
706 	for (i = 0; i < mtk_crtc->num_conn_routes; i++) {
707 		unsigned int comp_id = mtk_crtc->conn_routes[i].route_ddp;
708 		struct mtk_ddp_comp *comp = &priv->ddp_comp[comp_id];
709 
710 		if (comp->encoder_index >= 0 &&
711 		    (encoder_mask & BIT(comp->encoder_index))) {
712 			mtk_crtc->ddp_comp[mtk_crtc->ddp_comp_nr - 1] = comp;
713 			dev_dbg(dev, "Add comp_id: %d at path index %d\n",
714 				comp->id, mtk_crtc->ddp_comp_nr - 1);
715 			break;
716 		}
717 	}
718 }
719 
720 int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
721 			 struct mtk_plane_state *state)
722 {
723 	unsigned int local_layer;
724 	struct mtk_ddp_comp *comp;
725 
726 	comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer);
727 	if (comp)
728 		return mtk_ddp_comp_layer_check(comp, local_layer, state);
729 	return 0;
730 }
731 
732 void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane)
733 {
734 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
735 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
736 	struct mtk_plane_state *plane_state = to_mtk_plane_state(plane->state);
737 	int i;
738 
739 	/* no need to wait for disabling the plane by CPU */
740 	if (!mtk_crtc->cmdq_client.chan)
741 		return;
742 
743 	if (!mtk_crtc->enabled)
744 		return;
745 
746 	/* set pending plane state to disabled */
747 	for (i = 0; i < mtk_crtc->layer_nr; i++) {
748 		struct drm_plane *mtk_plane = &mtk_crtc->planes[i];
749 		struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(mtk_plane->state);
750 
751 		if (mtk_plane->index == plane->index) {
752 			memcpy(mtk_plane_state, plane_state, sizeof(*plane_state));
753 			break;
754 		}
755 	}
756 	mtk_crtc_update_config(mtk_crtc, false);
757 
758 	/* wait for planes to be disabled by CMDQ */
759 	wait_event_timeout(mtk_crtc->cb_blocking_queue,
760 			   mtk_crtc->cmdq_vblank_cnt == 0,
761 			   msecs_to_jiffies(500));
762 #endif
763 }
764 
765 void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
766 			   struct drm_atomic_state *state)
767 {
768 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
769 
770 	if (!mtk_crtc->enabled)
771 		return;
772 
773 	mtk_crtc_update_config(mtk_crtc, false);
774 }
775 
776 static void mtk_crtc_atomic_enable(struct drm_crtc *crtc,
777 				   struct drm_atomic_state *state)
778 {
779 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
780 	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
781 	struct drm_device *dev = mtk_crtc->base.dev;
782 	int ret;
783 
784 	drm_dbg_driver(dev, "%s %d\n", __func__, crtc->base.id);
785 
786 	ret = mtk_ddp_comp_power_on(comp);
787 	if (ret < 0) {
788 		DRM_DEV_ERROR(comp->dev, "Failed to enable power domain: %d\n", ret);
789 		return;
790 	}
791 
792 	mtk_crtc_update_output(crtc, state);
793 
794 	ret = mtk_crtc_ddp_hw_init(mtk_crtc);
795 	if (ret) {
796 		mtk_ddp_comp_power_off(comp);
797 		return;
798 	}
799 
800 	drm_crtc_vblank_on(crtc);
801 	mtk_crtc->enabled = true;
802 }
803 
804 static void mtk_crtc_atomic_disable(struct drm_crtc *crtc,
805 				    struct drm_atomic_state *state)
806 {
807 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
808 	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
809 	struct drm_device *dev = mtk_crtc->base.dev;
810 	int i;
811 
812 	drm_dbg_driver(dev, "%s %d\n", __func__, crtc->base.id);
813 	if (!mtk_crtc->enabled)
814 		return;
815 
816 	/* Set all pending plane state to disabled */
817 	for (i = 0; i < mtk_crtc->layer_nr; i++) {
818 		struct drm_plane *plane = &mtk_crtc->planes[i];
819 		struct mtk_plane_state *plane_state;
820 
821 		plane_state = to_mtk_plane_state(plane->state);
822 		plane_state->pending.enable = false;
823 		plane_state->pending.config = true;
824 	}
825 	mtk_crtc->pending_planes = true;
826 
827 	mtk_crtc_update_config(mtk_crtc, false);
828 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
829 	/* Wait for planes to be disabled by cmdq */
830 	if (mtk_crtc->cmdq_client.chan)
831 		wait_event_timeout(mtk_crtc->cb_blocking_queue,
832 				   mtk_crtc->cmdq_vblank_cnt == 0,
833 				   msecs_to_jiffies(500));
834 #endif
835 	/* Wait for planes to be disabled */
836 	drm_crtc_wait_one_vblank(crtc);
837 
838 	drm_crtc_vblank_off(crtc);
839 	mtk_crtc_ddp_hw_fini(mtk_crtc);
840 	mtk_ddp_comp_power_off(comp);
841 
842 	mtk_crtc->enabled = false;
843 }
844 
845 static void mtk_crtc_atomic_begin(struct drm_crtc *crtc,
846 				  struct drm_atomic_state *state)
847 {
848 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
849 									  crtc);
850 	struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
851 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
852 	struct drm_device *dev = mtk_crtc->base.dev;
853 	unsigned long flags;
854 
855 	if (mtk_crtc->event && mtk_crtc_state->base.event)
856 		drm_err(dev, "new event while there is still a pending event\n");
857 
858 	if (mtk_crtc_state->base.event) {
859 		mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc);
860 		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
861 
862 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
863 		mtk_crtc->event = mtk_crtc_state->base.event;
864 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
865 
866 		mtk_crtc_state->base.event = NULL;
867 	}
868 }
869 
870 static void mtk_crtc_atomic_flush(struct drm_crtc *crtc,
871 				  struct drm_atomic_state *state)
872 {
873 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
874 	int i;
875 
876 	if (crtc->state->color_mgmt_changed)
877 		for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
878 			mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
879 			mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state);
880 		}
881 	mtk_crtc_update_config(mtk_crtc, !!mtk_crtc->event);
882 }
883 
884 static const struct drm_crtc_funcs mtk_crtc_funcs = {
885 	.set_config		= drm_atomic_helper_set_config,
886 	.page_flip		= drm_atomic_helper_page_flip,
887 	.destroy		= mtk_crtc_destroy,
888 	.reset			= mtk_crtc_reset,
889 	.atomic_duplicate_state	= mtk_crtc_duplicate_state,
890 	.atomic_destroy_state	= mtk_crtc_destroy_state,
891 	.enable_vblank		= mtk_crtc_enable_vblank,
892 	.disable_vblank		= mtk_crtc_disable_vblank,
893 };
894 
895 static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
896 	.mode_fixup	= mtk_crtc_mode_fixup,
897 	.mode_set_nofb	= mtk_crtc_mode_set_nofb,
898 	.mode_valid	= mtk_crtc_mode_valid,
899 	.atomic_begin	= mtk_crtc_atomic_begin,
900 	.atomic_flush	= mtk_crtc_atomic_flush,
901 	.atomic_enable	= mtk_crtc_atomic_enable,
902 	.atomic_disable	= mtk_crtc_atomic_disable,
903 };
904 
905 static int mtk_crtc_init(struct drm_device *drm, struct mtk_crtc *mtk_crtc,
906 			 unsigned int pipe)
907 {
908 	struct drm_plane *primary = NULL;
909 	struct drm_plane *cursor = NULL;
910 	int i, ret;
911 
912 	for (i = 0; i < mtk_crtc->layer_nr; i++) {
913 		if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
914 			primary = &mtk_crtc->planes[i];
915 		else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
916 			cursor = &mtk_crtc->planes[i];
917 	}
918 
919 	ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
920 					&mtk_crtc_funcs, NULL);
921 	if (ret)
922 		goto err_cleanup_crtc;
923 
924 	drm_crtc_helper_add(&mtk_crtc->base, &mtk_crtc_helper_funcs);
925 
926 	return 0;
927 
928 err_cleanup_crtc:
929 	drm_crtc_cleanup(&mtk_crtc->base);
930 	return ret;
931 }
932 
933 static int mtk_crtc_num_comp_planes(struct mtk_crtc *mtk_crtc, int comp_idx)
934 {
935 	struct mtk_ddp_comp *comp;
936 
937 	if (comp_idx > 1)
938 		return 0;
939 
940 	comp = mtk_crtc->ddp_comp[comp_idx];
941 	if (!comp->funcs)
942 		return 0;
943 
944 	if (comp_idx == 1 && !comp->funcs->bgclr_in_on)
945 		return 0;
946 
947 	return mtk_ddp_comp_layer_nr(comp);
948 }
949 
950 static inline
951 enum drm_plane_type mtk_crtc_plane_type(unsigned int plane_idx,
952 					unsigned int num_planes)
953 {
954 	if (plane_idx == 0)
955 		return DRM_PLANE_TYPE_PRIMARY;
956 	else if (plane_idx == (num_planes - 1))
957 		return DRM_PLANE_TYPE_CURSOR;
958 	else
959 		return DRM_PLANE_TYPE_OVERLAY;
960 
961 }
962 
963 static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev,
964 				     struct mtk_crtc *mtk_crtc,
965 				     int comp_idx, int pipe)
966 {
967 	int num_planes = mtk_crtc_num_comp_planes(mtk_crtc, comp_idx);
968 	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx];
969 	int i, ret;
970 
971 	for (i = 0; i < num_planes; i++) {
972 		ret = mtk_plane_init(drm_dev,
973 				&mtk_crtc->planes[mtk_crtc->layer_nr],
974 				BIT(pipe),
975 				mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes),
976 				mtk_ddp_comp_supported_rotations(comp),
977 				mtk_ddp_comp_get_blend_modes(comp),
978 				mtk_ddp_comp_get_formats(comp),
979 				mtk_ddp_comp_get_num_formats(comp),
980 				mtk_ddp_comp_is_afbc_supported(comp), i);
981 		if (ret)
982 			return ret;
983 
984 		mtk_crtc->layer_nr++;
985 	}
986 	return 0;
987 }
988 
989 struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc)
990 {
991 	struct mtk_crtc *mtk_crtc = NULL;
992 
993 	if (!crtc)
994 		return NULL;
995 
996 	mtk_crtc = to_mtk_crtc(crtc);
997 	if (!mtk_crtc)
998 		return NULL;
999 
1000 	return mtk_crtc->dma_dev;
1001 }
1002 
1003 int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
1004 		    unsigned int path_len, int priv_data_index,
1005 		    const struct mtk_drm_route *conn_routes,
1006 		    unsigned int num_conn_routes)
1007 {
1008 	struct mtk_drm_private *priv = drm_dev->dev_private;
1009 	struct device *dev = drm_dev->dev;
1010 	struct mtk_crtc *mtk_crtc;
1011 	unsigned int num_comp_planes = 0;
1012 	int ret;
1013 	int i;
1014 	bool has_ctm = false;
1015 	uint gamma_lut_size = 0;
1016 	struct drm_crtc *tmp;
1017 	int crtc_i = 0;
1018 
1019 	if (!path)
1020 		return 0;
1021 
1022 	priv = priv->all_drm_private[priv_data_index];
1023 
1024 	drm_for_each_crtc(tmp, drm_dev)
1025 		crtc_i++;
1026 
1027 	for (i = 0; i < path_len; i++) {
1028 		enum mtk_ddp_comp_id comp_id = path[i];
1029 		struct device_node *node;
1030 		struct mtk_ddp_comp *comp;
1031 
1032 		node = priv->comp_node[comp_id];
1033 		comp = &priv->ddp_comp[comp_id];
1034 
1035 		/* Not all drm components have a DTS device node, such as ovl_adaptor,
1036 		 * which is the drm bring up sub driver
1037 		 */
1038 		if (!node && comp_id != DDP_COMPONENT_DRM_OVL_ADAPTOR) {
1039 			dev_info(dev,
1040 				"Not creating crtc %d because component %d is disabled or missing\n",
1041 				crtc_i, comp_id);
1042 			return 0;
1043 		}
1044 
1045 		if (!comp->dev) {
1046 			dev_err(dev, "Component %pOF not initialized\n", node);
1047 			return -ENODEV;
1048 		}
1049 	}
1050 
1051 	mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL);
1052 	if (!mtk_crtc)
1053 		return -ENOMEM;
1054 
1055 	mtk_crtc->mmsys_dev = priv->mmsys_dev;
1056 	mtk_crtc->ddp_comp_nr = path_len;
1057 	mtk_crtc->ddp_comp = devm_kcalloc(dev,
1058 					  mtk_crtc->ddp_comp_nr + (conn_routes ? 1 : 0),
1059 					  sizeof(*mtk_crtc->ddp_comp),
1060 					  GFP_KERNEL);
1061 	if (!mtk_crtc->ddp_comp)
1062 		return -ENOMEM;
1063 
1064 	mtk_crtc->mutex = mtk_mutex_get(priv->mutex_dev);
1065 	if (IS_ERR(mtk_crtc->mutex)) {
1066 		ret = PTR_ERR(mtk_crtc->mutex);
1067 		dev_err(dev, "Failed to get mutex: %d\n", ret);
1068 		return ret;
1069 	}
1070 
1071 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
1072 		unsigned int comp_id = path[i];
1073 		struct mtk_ddp_comp *comp;
1074 
1075 		comp = &priv->ddp_comp[comp_id];
1076 		mtk_crtc->ddp_comp[i] = comp;
1077 
1078 		if (comp->funcs) {
1079 			if (comp->funcs->gamma_set && comp->funcs->gamma_get_lut_size) {
1080 				unsigned int lut_sz = mtk_ddp_gamma_get_lut_size(comp);
1081 
1082 				if (lut_sz)
1083 					gamma_lut_size = lut_sz;
1084 			}
1085 
1086 			if (comp->funcs->ctm_set)
1087 				has_ctm = true;
1088 		}
1089 
1090 		mtk_ddp_comp_register_vblank_cb(comp, mtk_crtc_ddp_irq,
1091 						&mtk_crtc->base);
1092 	}
1093 
1094 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
1095 		num_comp_planes += mtk_crtc_num_comp_planes(mtk_crtc, i);
1096 
1097 	mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
1098 					sizeof(struct drm_plane), GFP_KERNEL);
1099 	if (!mtk_crtc->planes)
1100 		return -ENOMEM;
1101 
1102 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
1103 		ret = mtk_crtc_init_comp_planes(drm_dev, mtk_crtc, i, crtc_i);
1104 		if (ret)
1105 			return ret;
1106 	}
1107 
1108 	/*
1109 	 * Default to use the first component as the dma dev.
1110 	 * In the case of ovl_adaptor sub driver, it needs to use the
1111 	 * dma_dev_get function to get representative dma dev.
1112 	 */
1113 	mtk_crtc->dma_dev = mtk_ddp_comp_dma_dev_get(&priv->ddp_comp[path[0]]);
1114 
1115 	ret = mtk_crtc_init(drm_dev, mtk_crtc, crtc_i);
1116 	if (ret < 0)
1117 		return ret;
1118 
1119 	if (gamma_lut_size)
1120 		drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
1121 	drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
1122 	mutex_init(&mtk_crtc->hw_lock);
1123 	spin_lock_init(&mtk_crtc->config_lock);
1124 
1125 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
1126 	i = priv->mbox_index++;
1127 	mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev;
1128 	mtk_crtc->cmdq_client.client.tx_block = false;
1129 	mtk_crtc->cmdq_client.client.knows_txdone = true;
1130 	mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb;
1131 	mtk_crtc->cmdq_client.chan =
1132 			mbox_request_channel(&mtk_crtc->cmdq_client.client, i);
1133 	if (IS_ERR(mtk_crtc->cmdq_client.chan)) {
1134 		dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
1135 			drm_crtc_index(&mtk_crtc->base));
1136 		mtk_crtc->cmdq_client.chan = NULL;
1137 	}
1138 
1139 	if (mtk_crtc->cmdq_client.chan) {
1140 		ret = of_property_read_u32_index(priv->mutex_node,
1141 						 "mediatek,gce-events",
1142 						 i,
1143 						 &mtk_crtc->cmdq_event);
1144 		if (ret) {
1145 			dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
1146 				drm_crtc_index(&mtk_crtc->base));
1147 			mbox_free_channel(mtk_crtc->cmdq_client.chan);
1148 			mtk_crtc->cmdq_client.chan = NULL;
1149 		} else {
1150 			ret = cmdq_pkt_create(&mtk_crtc->cmdq_client,
1151 					      &mtk_crtc->cmdq_handle,
1152 					      PAGE_SIZE);
1153 			if (ret) {
1154 				dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
1155 					drm_crtc_index(&mtk_crtc->base));
1156 				mbox_free_channel(mtk_crtc->cmdq_client.chan);
1157 				mtk_crtc->cmdq_client.chan = NULL;
1158 			}
1159 		}
1160 
1161 		/* for sending blocking cmd in crtc disable */
1162 		init_waitqueue_head(&mtk_crtc->cb_blocking_queue);
1163 	}
1164 #endif
1165 
1166 	if (conn_routes) {
1167 		for (i = 0; i < num_conn_routes; i++) {
1168 			unsigned int comp_id = conn_routes[i].route_ddp;
1169 			struct device_node *node = priv->comp_node[comp_id];
1170 			struct mtk_ddp_comp *comp = &priv->ddp_comp[comp_id];
1171 
1172 			if (!comp->dev) {
1173 				dev_dbg(dev, "comp_id:%d, Component %pOF not initialized\n",
1174 					comp_id, node);
1175 				/* mark encoder_index to -1, if route comp device is not enabled */
1176 				comp->encoder_index = -1;
1177 				continue;
1178 			}
1179 
1180 			mtk_ddp_comp_encoder_index_set(&priv->ddp_comp[comp_id]);
1181 		}
1182 
1183 		mtk_crtc->num_conn_routes = num_conn_routes;
1184 		mtk_crtc->conn_routes = conn_routes;
1185 
1186 		/* increase ddp_comp_nr at the end of mtk_crtc_create */
1187 		mtk_crtc->ddp_comp_nr++;
1188 	}
1189 
1190 	return 0;
1191 }
1192