xref: /linux/drivers/gpu/drm/mediatek/mtk_crtc.c (revision e811c33b1f137be26a20444b79db8cbc1fca1c89)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015 MediaTek Inc.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/mailbox_controller.h>
9 #include <linux/of.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/soc/mediatek/mtk-cmdq.h>
12 #include <linux/soc/mediatek/mtk-mmsys.h>
13 #include <linux/soc/mediatek/mtk-mutex.h>
14 
15 #include <asm/barrier.h>
16 
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_probe_helper.h>
20 #include <drm/drm_vblank.h>
21 
22 #include "mtk_crtc.h"
23 #include "mtk_ddp_comp.h"
24 #include "mtk_drm_drv.h"
25 #include "mtk_gem.h"
26 #include "mtk_plane.h"
27 
28 /*
29  * struct mtk_crtc - MediaTek specific crtc structure.
30  * @base: crtc object.
31  * @enabled: records whether crtc_enable succeeded
32  * @planes: array of 4 drm_plane structures, one for each overlay plane
33  * @pending_planes: whether any plane has pending changes to be applied
34  * @mmsys_dev: pointer to the mmsys device for configuration registers
35  * @mutex: handle to one of the ten disp_mutex streams
36  * @ddp_comp_nr: number of components in ddp_comp
37  * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc
38  *
39  * TODO: Needs update: this header is missing a bunch of member descriptions.
40  */
41 struct mtk_crtc {
42 	struct drm_crtc			base;
43 	bool				enabled;
44 
45 	bool				pending_needs_vblank;
46 	struct drm_pending_vblank_event	*event;
47 
48 	struct drm_plane		*planes;
49 	unsigned int			layer_nr;
50 	bool				pending_planes;
51 	bool				pending_async_planes;
52 
53 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
54 	struct cmdq_client		cmdq_client;
55 	struct cmdq_pkt			cmdq_handle;
56 	u32				cmdq_event;
57 	u32				cmdq_vblank_cnt;
58 	wait_queue_head_t		cb_blocking_queue;
59 #endif
60 
61 	struct device			*mmsys_dev;
62 	struct device			*dma_dev;
63 	struct mtk_mutex		*mutex;
64 	unsigned int			ddp_comp_nr;
65 	struct mtk_ddp_comp		**ddp_comp;
66 	unsigned int			num_conn_routes;
67 	const struct mtk_drm_route	*conn_routes;
68 
69 	/* lock for display hardware access */
70 	struct mutex			hw_lock;
71 	bool				config_updating;
72 	/* lock for config_updating to cmd buffer */
73 	spinlock_t			config_lock;
74 };
75 
76 struct mtk_crtc_state {
77 	struct drm_crtc_state		base;
78 
79 	bool				pending_config;
80 	unsigned int			pending_width;
81 	unsigned int			pending_height;
82 	unsigned int			pending_vrefresh;
83 };
84 
to_mtk_crtc(struct drm_crtc * c)85 static inline struct mtk_crtc *to_mtk_crtc(struct drm_crtc *c)
86 {
87 	return container_of(c, struct mtk_crtc, base);
88 }
89 
to_mtk_crtc_state(struct drm_crtc_state * s)90 static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s)
91 {
92 	return container_of(s, struct mtk_crtc_state, base);
93 }
94 
mtk_crtc_finish_page_flip(struct mtk_crtc * mtk_crtc)95 static void mtk_crtc_finish_page_flip(struct mtk_crtc *mtk_crtc)
96 {
97 	struct drm_crtc *crtc = &mtk_crtc->base;
98 	unsigned long flags;
99 
100 	if (mtk_crtc->event) {
101 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
102 		drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
103 		drm_crtc_vblank_put(crtc);
104 		mtk_crtc->event = NULL;
105 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
106 	}
107 }
108 
mtk_drm_finish_page_flip(struct mtk_crtc * mtk_crtc)109 static void mtk_drm_finish_page_flip(struct mtk_crtc *mtk_crtc)
110 {
111 	unsigned long flags;
112 
113 	drm_crtc_handle_vblank(&mtk_crtc->base);
114 
115 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
116 	if (mtk_crtc->cmdq_client.chan)
117 		return;
118 #endif
119 
120 	spin_lock_irqsave(&mtk_crtc->config_lock, flags);
121 	if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
122 		mtk_crtc_finish_page_flip(mtk_crtc);
123 		mtk_crtc->pending_needs_vblank = false;
124 	}
125 	spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
126 }
127 
mtk_crtc_destroy(struct drm_crtc * crtc)128 static void mtk_crtc_destroy(struct drm_crtc *crtc)
129 {
130 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
131 	int i;
132 
133 	mtk_mutex_put(mtk_crtc->mutex);
134 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
135 	if (mtk_crtc->cmdq_client.chan) {
136 		cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle);
137 		mbox_free_channel(mtk_crtc->cmdq_client.chan);
138 		mtk_crtc->cmdq_client.chan = NULL;
139 	}
140 #endif
141 
142 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
143 		struct mtk_ddp_comp *comp;
144 
145 		comp = mtk_crtc->ddp_comp[i];
146 		mtk_ddp_comp_unregister_vblank_cb(comp);
147 	}
148 
149 	drm_crtc_cleanup(crtc);
150 }
151 
mtk_crtc_reset(struct drm_crtc * crtc)152 static void mtk_crtc_reset(struct drm_crtc *crtc)
153 {
154 	struct mtk_crtc_state *state;
155 
156 	if (crtc->state)
157 		__drm_atomic_helper_crtc_destroy_state(crtc->state);
158 
159 	kfree(to_mtk_crtc_state(crtc->state));
160 	crtc->state = NULL;
161 
162 	state = kzalloc(sizeof(*state), GFP_KERNEL);
163 	if (state)
164 		__drm_atomic_helper_crtc_reset(crtc, &state->base);
165 }
166 
mtk_crtc_duplicate_state(struct drm_crtc * crtc)167 static struct drm_crtc_state *mtk_crtc_duplicate_state(struct drm_crtc *crtc)
168 {
169 	struct mtk_crtc_state *state;
170 
171 	state = kmalloc(sizeof(*state), GFP_KERNEL);
172 	if (!state)
173 		return NULL;
174 
175 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
176 
177 	WARN_ON(state->base.crtc != crtc);
178 	state->base.crtc = crtc;
179 	state->pending_config = false;
180 
181 	return &state->base;
182 }
183 
mtk_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)184 static void mtk_crtc_destroy_state(struct drm_crtc *crtc,
185 				   struct drm_crtc_state *state)
186 {
187 	__drm_atomic_helper_crtc_destroy_state(state);
188 	kfree(to_mtk_crtc_state(state));
189 }
190 
191 static enum drm_mode_status
mtk_crtc_mode_valid(struct drm_crtc * crtc,const struct drm_display_mode * mode)192 mtk_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
193 {
194 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
195 	enum drm_mode_status status = MODE_OK;
196 	int i;
197 
198 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
199 		status = mtk_ddp_comp_mode_valid(mtk_crtc->ddp_comp[i], mode);
200 		if (status != MODE_OK)
201 			break;
202 	}
203 	return status;
204 }
205 
mtk_crtc_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)206 static bool mtk_crtc_mode_fixup(struct drm_crtc *crtc,
207 				const struct drm_display_mode *mode,
208 				struct drm_display_mode *adjusted_mode)
209 {
210 	/* Nothing to do here, but this callback is mandatory. */
211 	return true;
212 }
213 
mtk_crtc_mode_set_nofb(struct drm_crtc * crtc)214 static void mtk_crtc_mode_set_nofb(struct drm_crtc *crtc)
215 {
216 	struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
217 
218 	state->pending_width = crtc->mode.hdisplay;
219 	state->pending_height = crtc->mode.vdisplay;
220 	state->pending_vrefresh = drm_mode_vrefresh(&crtc->mode);
221 	wmb();	/* Make sure the above parameters are set before update */
222 	state->pending_config = true;
223 }
224 
mtk_crtc_ddp_clk_enable(struct mtk_crtc * mtk_crtc)225 static int mtk_crtc_ddp_clk_enable(struct mtk_crtc *mtk_crtc)
226 {
227 	int ret;
228 	int i;
229 
230 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
231 		ret = mtk_ddp_comp_clk_enable(mtk_crtc->ddp_comp[i]);
232 		if (ret) {
233 			DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
234 			goto err;
235 		}
236 	}
237 
238 	return 0;
239 err:
240 	while (--i >= 0)
241 		mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
242 	return ret;
243 }
244 
mtk_crtc_ddp_clk_disable(struct mtk_crtc * mtk_crtc)245 static void mtk_crtc_ddp_clk_disable(struct mtk_crtc *mtk_crtc)
246 {
247 	int i;
248 
249 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
250 		mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
251 }
252 
253 static
mtk_ddp_comp_for_plane(struct drm_crtc * crtc,struct drm_plane * plane,unsigned int * local_layer)254 struct mtk_ddp_comp *mtk_ddp_comp_for_plane(struct drm_crtc *crtc,
255 					    struct drm_plane *plane,
256 					    unsigned int *local_layer)
257 {
258 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
259 	struct mtk_ddp_comp *comp;
260 	int i, count = 0;
261 	unsigned int local_index = plane - mtk_crtc->planes;
262 
263 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
264 		comp = mtk_crtc->ddp_comp[i];
265 		if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) {
266 			*local_layer = local_index - count;
267 			return comp;
268 		}
269 		count += mtk_ddp_comp_layer_nr(comp);
270 	}
271 
272 	WARN(1, "Failed to find component for plane %d\n", plane->index);
273 	return NULL;
274 }
275 
276 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
ddp_cmdq_cb(struct mbox_client * cl,void * mssg)277 static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
278 {
279 	struct cmdq_cb_data *data = mssg;
280 	struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client);
281 	struct mtk_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_crtc, cmdq_client);
282 	struct mtk_crtc_state *state;
283 	unsigned int i;
284 	unsigned long flags;
285 
286 	/* release GCE HW usage and start autosuspend */
287 	pm_runtime_mark_last_busy(cmdq_cl->chan->mbox->dev);
288 	pm_runtime_put_autosuspend(cmdq_cl->chan->mbox->dev);
289 
290 	if (data->sta < 0)
291 		return;
292 
293 	state = to_mtk_crtc_state(mtk_crtc->base.state);
294 
295 	spin_lock_irqsave(&mtk_crtc->config_lock, flags);
296 	if (mtk_crtc->config_updating)
297 		goto ddp_cmdq_cb_out;
298 
299 	state->pending_config = false;
300 
301 	if (mtk_crtc->pending_planes) {
302 		for (i = 0; i < mtk_crtc->layer_nr; i++) {
303 			struct drm_plane *plane = &mtk_crtc->planes[i];
304 			struct mtk_plane_state *plane_state;
305 
306 			plane_state = to_mtk_plane_state(plane->state);
307 
308 			plane_state->pending.config = false;
309 		}
310 		mtk_crtc->pending_planes = false;
311 	}
312 
313 	if (mtk_crtc->pending_async_planes) {
314 		for (i = 0; i < mtk_crtc->layer_nr; i++) {
315 			struct drm_plane *plane = &mtk_crtc->planes[i];
316 			struct mtk_plane_state *plane_state;
317 
318 			plane_state = to_mtk_plane_state(plane->state);
319 
320 			plane_state->pending.async_config = false;
321 		}
322 		mtk_crtc->pending_async_planes = false;
323 	}
324 
325 ddp_cmdq_cb_out:
326 
327 	if (mtk_crtc->pending_needs_vblank) {
328 		mtk_crtc_finish_page_flip(mtk_crtc);
329 		mtk_crtc->pending_needs_vblank = false;
330 	}
331 
332 	spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
333 
334 	mtk_crtc->cmdq_vblank_cnt = 0;
335 	wake_up(&mtk_crtc->cb_blocking_queue);
336 }
337 #endif
338 
mtk_crtc_ddp_hw_init(struct mtk_crtc * mtk_crtc)339 static int mtk_crtc_ddp_hw_init(struct mtk_crtc *mtk_crtc)
340 {
341 	struct drm_crtc *crtc = &mtk_crtc->base;
342 	struct drm_connector *connector;
343 	struct drm_encoder *encoder;
344 	struct drm_connector_list_iter conn_iter;
345 	unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC;
346 	int ret;
347 	int i;
348 
349 	if (WARN_ON(!crtc->state))
350 		return -EINVAL;
351 
352 	width = crtc->state->adjusted_mode.hdisplay;
353 	height = crtc->state->adjusted_mode.vdisplay;
354 	vrefresh = drm_mode_vrefresh(&crtc->state->adjusted_mode);
355 
356 	drm_for_each_encoder(encoder, crtc->dev) {
357 		if (encoder->crtc != crtc)
358 			continue;
359 
360 		drm_connector_list_iter_begin(crtc->dev, &conn_iter);
361 		drm_for_each_connector_iter(connector, &conn_iter) {
362 			if (connector->encoder != encoder)
363 				continue;
364 			if (connector->display_info.bpc != 0 &&
365 			    bpc > connector->display_info.bpc)
366 				bpc = connector->display_info.bpc;
367 		}
368 		drm_connector_list_iter_end(&conn_iter);
369 	}
370 
371 	ret = pm_runtime_resume_and_get(crtc->dev->dev);
372 	if (ret < 0) {
373 		DRM_ERROR("Failed to enable power domain: %d\n", ret);
374 		return ret;
375 	}
376 
377 	ret = mtk_mutex_prepare(mtk_crtc->mutex);
378 	if (ret < 0) {
379 		DRM_ERROR("Failed to enable mutex clock: %d\n", ret);
380 		goto err_pm_runtime_put;
381 	}
382 
383 	ret = mtk_crtc_ddp_clk_enable(mtk_crtc);
384 	if (ret < 0) {
385 		DRM_ERROR("Failed to enable component clocks: %d\n", ret);
386 		goto err_mutex_unprepare;
387 	}
388 
389 	for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
390 		if (!mtk_ddp_comp_connect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev,
391 					  mtk_crtc->ddp_comp[i + 1]->id))
392 			mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
393 					      mtk_crtc->ddp_comp[i]->id,
394 					      mtk_crtc->ddp_comp[i + 1]->id);
395 		if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
396 			mtk_mutex_add_comp(mtk_crtc->mutex,
397 					   mtk_crtc->ddp_comp[i]->id);
398 	}
399 	if (!mtk_ddp_comp_add(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
400 		mtk_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
401 	mtk_mutex_enable(mtk_crtc->mutex);
402 
403 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
404 		struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
405 
406 		if (i == 1)
407 			mtk_ddp_comp_bgclr_in_on(comp);
408 
409 		mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL);
410 		mtk_ddp_comp_start(comp);
411 	}
412 
413 	/* Initially configure all planes */
414 	for (i = 0; i < mtk_crtc->layer_nr; i++) {
415 		struct drm_plane *plane = &mtk_crtc->planes[i];
416 		struct mtk_plane_state *plane_state;
417 		struct mtk_ddp_comp *comp;
418 		unsigned int local_layer;
419 
420 		plane_state = to_mtk_plane_state(plane->state);
421 
422 		/* should not enable layer before crtc enabled */
423 		plane_state->pending.enable = false;
424 		comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer);
425 		if (comp)
426 			mtk_ddp_comp_layer_config(comp, local_layer,
427 						  plane_state, NULL);
428 	}
429 
430 	return 0;
431 
432 err_mutex_unprepare:
433 	mtk_mutex_unprepare(mtk_crtc->mutex);
434 err_pm_runtime_put:
435 	pm_runtime_put(crtc->dev->dev);
436 	return ret;
437 }
438 
mtk_crtc_ddp_hw_fini(struct mtk_crtc * mtk_crtc)439 static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc)
440 {
441 	struct drm_device *drm = mtk_crtc->base.dev;
442 	struct drm_crtc *crtc = &mtk_crtc->base;
443 	unsigned long flags;
444 	int i;
445 
446 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
447 		mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
448 		if (i == 1)
449 			mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]);
450 	}
451 
452 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
453 		if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
454 			mtk_mutex_remove_comp(mtk_crtc->mutex,
455 					      mtk_crtc->ddp_comp[i]->id);
456 	mtk_mutex_disable(mtk_crtc->mutex);
457 	for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
458 		if (!mtk_ddp_comp_disconnect(mtk_crtc->ddp_comp[i], mtk_crtc->mmsys_dev,
459 					     mtk_crtc->ddp_comp[i + 1]->id))
460 			mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev,
461 						 mtk_crtc->ddp_comp[i]->id,
462 						 mtk_crtc->ddp_comp[i + 1]->id);
463 		if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
464 			mtk_mutex_remove_comp(mtk_crtc->mutex,
465 					      mtk_crtc->ddp_comp[i]->id);
466 	}
467 	if (!mtk_ddp_comp_remove(mtk_crtc->ddp_comp[i], mtk_crtc->mutex))
468 		mtk_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
469 	mtk_crtc_ddp_clk_disable(mtk_crtc);
470 	mtk_mutex_unprepare(mtk_crtc->mutex);
471 
472 	pm_runtime_put(drm->dev);
473 
474 	if (crtc->state->event && !crtc->state->active) {
475 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
476 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
477 		crtc->state->event = NULL;
478 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
479 	}
480 }
481 
mtk_crtc_ddp_config(struct drm_crtc * crtc,struct cmdq_pkt * cmdq_handle)482 static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
483 				struct cmdq_pkt *cmdq_handle)
484 {
485 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
486 	struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
487 	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
488 	unsigned int i;
489 	unsigned int local_layer;
490 
491 	/*
492 	 * TODO: instead of updating the registers here, we should prepare
493 	 * working registers in atomic_commit and let the hardware command
494 	 * queue update module registers on vblank.
495 	 */
496 	if (state->pending_config) {
497 		mtk_ddp_comp_config(comp, state->pending_width,
498 				    state->pending_height,
499 				    state->pending_vrefresh, 0,
500 				    cmdq_handle);
501 
502 		if (!cmdq_handle)
503 			state->pending_config = false;
504 	}
505 
506 	if (mtk_crtc->pending_planes) {
507 		for (i = 0; i < mtk_crtc->layer_nr; i++) {
508 			struct drm_plane *plane = &mtk_crtc->planes[i];
509 			struct mtk_plane_state *plane_state;
510 
511 			plane_state = to_mtk_plane_state(plane->state);
512 
513 			if (!plane_state->pending.config)
514 				continue;
515 
516 			comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer);
517 
518 			if (comp)
519 				mtk_ddp_comp_layer_config(comp, local_layer,
520 							  plane_state,
521 							  cmdq_handle);
522 			if (!cmdq_handle)
523 				plane_state->pending.config = false;
524 		}
525 
526 		if (!cmdq_handle)
527 			mtk_crtc->pending_planes = false;
528 	}
529 
530 	if (mtk_crtc->pending_async_planes) {
531 		for (i = 0; i < mtk_crtc->layer_nr; i++) {
532 			struct drm_plane *plane = &mtk_crtc->planes[i];
533 			struct mtk_plane_state *plane_state;
534 
535 			plane_state = to_mtk_plane_state(plane->state);
536 
537 			if (!plane_state->pending.async_config)
538 				continue;
539 
540 			comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer);
541 
542 			if (comp)
543 				mtk_ddp_comp_layer_config(comp, local_layer,
544 							  plane_state,
545 							  cmdq_handle);
546 			if (!cmdq_handle)
547 				plane_state->pending.async_config = false;
548 		}
549 
550 		if (!cmdq_handle)
551 			mtk_crtc->pending_async_planes = false;
552 	}
553 }
554 
mtk_crtc_update_config(struct mtk_crtc * mtk_crtc,bool needs_vblank)555 static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
556 {
557 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
558 	struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
559 #endif
560 	struct drm_crtc *crtc = &mtk_crtc->base;
561 	struct mtk_drm_private *priv = crtc->dev->dev_private;
562 	unsigned int pending_planes = 0, pending_async_planes = 0;
563 	int i;
564 	unsigned long flags;
565 
566 	mutex_lock(&mtk_crtc->hw_lock);
567 
568 	spin_lock_irqsave(&mtk_crtc->config_lock, flags);
569 	mtk_crtc->config_updating = true;
570 	spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
571 
572 	if (needs_vblank)
573 		mtk_crtc->pending_needs_vblank = true;
574 
575 	for (i = 0; i < mtk_crtc->layer_nr; i++) {
576 		struct drm_plane *plane = &mtk_crtc->planes[i];
577 		struct mtk_plane_state *plane_state;
578 
579 		plane_state = to_mtk_plane_state(plane->state);
580 		if (plane_state->pending.dirty) {
581 			plane_state->pending.config = true;
582 			plane_state->pending.dirty = false;
583 			pending_planes |= BIT(i);
584 		} else if (plane_state->pending.async_dirty) {
585 			plane_state->pending.async_config = true;
586 			plane_state->pending.async_dirty = false;
587 			pending_async_planes |= BIT(i);
588 		}
589 	}
590 	if (pending_planes)
591 		mtk_crtc->pending_planes = true;
592 	if (pending_async_planes)
593 		mtk_crtc->pending_async_planes = true;
594 
595 	if (priv->data->shadow_register) {
596 		mtk_mutex_acquire(mtk_crtc->mutex);
597 		mtk_crtc_ddp_config(crtc, NULL);
598 		mtk_mutex_release(mtk_crtc->mutex);
599 	}
600 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
601 	if (mtk_crtc->cmdq_client.chan) {
602 		mbox_flush(mtk_crtc->cmdq_client.chan, 2000);
603 		cmdq_handle->cmd_buf_size = 0;
604 		cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
605 		cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
606 		mtk_crtc_ddp_config(crtc, cmdq_handle);
607 		cmdq_pkt_eoc(cmdq_handle);
608 		dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev,
609 					   cmdq_handle->pa_base,
610 					   cmdq_handle->cmd_buf_size,
611 					   DMA_TO_DEVICE);
612 		/*
613 		 * CMDQ command should execute in next 3 vblank.
614 		 * One vblank interrupt before send message (occasionally)
615 		 * and one vblank interrupt after cmdq done,
616 		 * so it's timeout after 3 vblank interrupt.
617 		 * If it fail to execute in next 3 vblank, timeout happen.
618 		 */
619 		mtk_crtc->cmdq_vblank_cnt = 3;
620 
621 		spin_lock_irqsave(&mtk_crtc->config_lock, flags);
622 		mtk_crtc->config_updating = false;
623 		spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
624 
625 		if (pm_runtime_resume_and_get(mtk_crtc->cmdq_client.chan->mbox->dev) < 0)
626 			goto update_config_out;
627 
628 		mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
629 		mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
630 		goto update_config_out;
631 	}
632 #endif
633 	spin_lock_irqsave(&mtk_crtc->config_lock, flags);
634 	mtk_crtc->config_updating = false;
635 	spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
636 
637 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
638 update_config_out:
639 #endif
640 	mutex_unlock(&mtk_crtc->hw_lock);
641 }
642 
mtk_crtc_ddp_irq(void * data)643 static void mtk_crtc_ddp_irq(void *data)
644 {
645 	struct drm_crtc *crtc = data;
646 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
647 	struct mtk_drm_private *priv = crtc->dev->dev_private;
648 
649 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
650 	if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan)
651 		mtk_crtc_ddp_config(crtc, NULL);
652 	else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
653 		DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
654 			  drm_crtc_index(&mtk_crtc->base));
655 #else
656 	if (!priv->data->shadow_register)
657 		mtk_crtc_ddp_config(crtc, NULL);
658 #endif
659 	mtk_drm_finish_page_flip(mtk_crtc);
660 }
661 
mtk_crtc_enable_vblank(struct drm_crtc * crtc)662 static int mtk_crtc_enable_vblank(struct drm_crtc *crtc)
663 {
664 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
665 	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
666 
667 	mtk_ddp_comp_enable_vblank(comp);
668 
669 	return 0;
670 }
671 
mtk_crtc_disable_vblank(struct drm_crtc * crtc)672 static void mtk_crtc_disable_vblank(struct drm_crtc *crtc)
673 {
674 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
675 	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
676 
677 	mtk_ddp_comp_disable_vblank(comp);
678 }
679 
mtk_crtc_update_output(struct drm_crtc * crtc,struct drm_atomic_state * state)680 static void mtk_crtc_update_output(struct drm_crtc *crtc,
681 				   struct drm_atomic_state *state)
682 {
683 	int crtc_index = drm_crtc_index(crtc);
684 	int i;
685 	struct device *dev;
686 	struct drm_crtc_state *crtc_state = state->crtcs[crtc_index].new_state;
687 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
688 	struct mtk_drm_private *priv;
689 	unsigned int encoder_mask = crtc_state->encoder_mask;
690 
691 	if (!crtc_state->connectors_changed)
692 		return;
693 
694 	if (!mtk_crtc->num_conn_routes)
695 		return;
696 
697 	priv = ((struct mtk_drm_private *)crtc->dev->dev_private)->all_drm_private[crtc_index];
698 	dev = priv->dev;
699 
700 	dev_dbg(dev, "connector change:%d, encoder mask:0x%x for crtc:%d\n",
701 		crtc_state->connectors_changed, encoder_mask, crtc_index);
702 
703 	for (i = 0; i < mtk_crtc->num_conn_routes; i++) {
704 		unsigned int comp_id = mtk_crtc->conn_routes[i].route_ddp;
705 		struct mtk_ddp_comp *comp = &priv->ddp_comp[comp_id];
706 
707 		if (comp->encoder_index >= 0 &&
708 		    (encoder_mask & BIT(comp->encoder_index))) {
709 			mtk_crtc->ddp_comp[mtk_crtc->ddp_comp_nr - 1] = comp;
710 			dev_dbg(dev, "Add comp_id: %d at path index %d\n",
711 				comp->id, mtk_crtc->ddp_comp_nr - 1);
712 			break;
713 		}
714 	}
715 }
716 
mtk_crtc_plane_check(struct drm_crtc * crtc,struct drm_plane * plane,struct mtk_plane_state * state)717 int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
718 			 struct mtk_plane_state *state)
719 {
720 	unsigned int local_layer;
721 	struct mtk_ddp_comp *comp;
722 
723 	comp = mtk_ddp_comp_for_plane(crtc, plane, &local_layer);
724 	if (comp)
725 		return mtk_ddp_comp_layer_check(comp, local_layer, state);
726 	return 0;
727 }
728 
mtk_crtc_plane_disable(struct drm_crtc * crtc,struct drm_plane * plane)729 void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane)
730 {
731 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
732 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
733 	struct mtk_plane_state *plane_state = to_mtk_plane_state(plane->state);
734 	int i;
735 
736 	/* no need to wait for disabling the plane by CPU */
737 	if (!mtk_crtc->cmdq_client.chan)
738 		return;
739 
740 	if (!mtk_crtc->enabled)
741 		return;
742 
743 	/* set pending plane state to disabled */
744 	for (i = 0; i < mtk_crtc->layer_nr; i++) {
745 		struct drm_plane *mtk_plane = &mtk_crtc->planes[i];
746 		struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(mtk_plane->state);
747 
748 		if (mtk_plane->index == plane->index) {
749 			memcpy(mtk_plane_state, plane_state, sizeof(*plane_state));
750 			break;
751 		}
752 	}
753 	mtk_crtc_update_config(mtk_crtc, false);
754 
755 	/* wait for planes to be disabled by CMDQ */
756 	wait_event_timeout(mtk_crtc->cb_blocking_queue,
757 			   mtk_crtc->cmdq_vblank_cnt == 0,
758 			   msecs_to_jiffies(500));
759 #endif
760 }
761 
mtk_crtc_async_update(struct drm_crtc * crtc,struct drm_plane * plane,struct drm_atomic_state * state)762 void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
763 			   struct drm_atomic_state *state)
764 {
765 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
766 
767 	if (!mtk_crtc->enabled)
768 		return;
769 
770 	mtk_crtc_update_config(mtk_crtc, false);
771 }
772 
mtk_crtc_atomic_enable(struct drm_crtc * crtc,struct drm_atomic_state * state)773 static void mtk_crtc_atomic_enable(struct drm_crtc *crtc,
774 				   struct drm_atomic_state *state)
775 {
776 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
777 	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
778 	int ret;
779 
780 	DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
781 
782 	ret = mtk_ddp_comp_power_on(comp);
783 	if (ret < 0) {
784 		DRM_DEV_ERROR(comp->dev, "Failed to enable power domain: %d\n", ret);
785 		return;
786 	}
787 
788 	mtk_crtc_update_output(crtc, state);
789 
790 	ret = mtk_crtc_ddp_hw_init(mtk_crtc);
791 	if (ret) {
792 		mtk_ddp_comp_power_off(comp);
793 		return;
794 	}
795 
796 	drm_crtc_vblank_on(crtc);
797 	mtk_crtc->enabled = true;
798 }
799 
mtk_crtc_atomic_disable(struct drm_crtc * crtc,struct drm_atomic_state * state)800 static void mtk_crtc_atomic_disable(struct drm_crtc *crtc,
801 				    struct drm_atomic_state *state)
802 {
803 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
804 	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
805 	int i;
806 
807 	DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
808 	if (!mtk_crtc->enabled)
809 		return;
810 
811 	/* Set all pending plane state to disabled */
812 	for (i = 0; i < mtk_crtc->layer_nr; i++) {
813 		struct drm_plane *plane = &mtk_crtc->planes[i];
814 		struct mtk_plane_state *plane_state;
815 
816 		plane_state = to_mtk_plane_state(plane->state);
817 		plane_state->pending.enable = false;
818 		plane_state->pending.config = true;
819 	}
820 	mtk_crtc->pending_planes = true;
821 
822 	mtk_crtc_update_config(mtk_crtc, false);
823 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
824 	/* Wait for planes to be disabled by cmdq */
825 	if (mtk_crtc->cmdq_client.chan)
826 		wait_event_timeout(mtk_crtc->cb_blocking_queue,
827 				   mtk_crtc->cmdq_vblank_cnt == 0,
828 				   msecs_to_jiffies(500));
829 #endif
830 	/* Wait for planes to be disabled */
831 	drm_crtc_wait_one_vblank(crtc);
832 
833 	drm_crtc_vblank_off(crtc);
834 	mtk_crtc_ddp_hw_fini(mtk_crtc);
835 	mtk_ddp_comp_power_off(comp);
836 
837 	mtk_crtc->enabled = false;
838 }
839 
mtk_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)840 static void mtk_crtc_atomic_begin(struct drm_crtc *crtc,
841 				  struct drm_atomic_state *state)
842 {
843 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
844 									  crtc);
845 	struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
846 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
847 	unsigned long flags;
848 
849 	if (mtk_crtc->event && mtk_crtc_state->base.event)
850 		DRM_ERROR("new event while there is still a pending event\n");
851 
852 	if (mtk_crtc_state->base.event) {
853 		mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc);
854 		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
855 
856 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
857 		mtk_crtc->event = mtk_crtc_state->base.event;
858 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
859 
860 		mtk_crtc_state->base.event = NULL;
861 	}
862 }
863 
mtk_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)864 static void mtk_crtc_atomic_flush(struct drm_crtc *crtc,
865 				  struct drm_atomic_state *state)
866 {
867 	struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
868 	int i;
869 
870 	if (crtc->state->color_mgmt_changed)
871 		for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
872 			mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
873 			mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state);
874 		}
875 	mtk_crtc_update_config(mtk_crtc, !!mtk_crtc->event);
876 }
877 
878 static const struct drm_crtc_funcs mtk_crtc_funcs = {
879 	.set_config		= drm_atomic_helper_set_config,
880 	.page_flip		= drm_atomic_helper_page_flip,
881 	.destroy		= mtk_crtc_destroy,
882 	.reset			= mtk_crtc_reset,
883 	.atomic_duplicate_state	= mtk_crtc_duplicate_state,
884 	.atomic_destroy_state	= mtk_crtc_destroy_state,
885 	.enable_vblank		= mtk_crtc_enable_vblank,
886 	.disable_vblank		= mtk_crtc_disable_vblank,
887 };
888 
889 static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
890 	.mode_fixup	= mtk_crtc_mode_fixup,
891 	.mode_set_nofb	= mtk_crtc_mode_set_nofb,
892 	.mode_valid	= mtk_crtc_mode_valid,
893 	.atomic_begin	= mtk_crtc_atomic_begin,
894 	.atomic_flush	= mtk_crtc_atomic_flush,
895 	.atomic_enable	= mtk_crtc_atomic_enable,
896 	.atomic_disable	= mtk_crtc_atomic_disable,
897 };
898 
mtk_crtc_init(struct drm_device * drm,struct mtk_crtc * mtk_crtc,unsigned int pipe)899 static int mtk_crtc_init(struct drm_device *drm, struct mtk_crtc *mtk_crtc,
900 			 unsigned int pipe)
901 {
902 	struct drm_plane *primary = NULL;
903 	struct drm_plane *cursor = NULL;
904 	int i, ret;
905 
906 	for (i = 0; i < mtk_crtc->layer_nr; i++) {
907 		if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
908 			primary = &mtk_crtc->planes[i];
909 		else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
910 			cursor = &mtk_crtc->planes[i];
911 	}
912 
913 	ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
914 					&mtk_crtc_funcs, NULL);
915 	if (ret)
916 		goto err_cleanup_crtc;
917 
918 	drm_crtc_helper_add(&mtk_crtc->base, &mtk_crtc_helper_funcs);
919 
920 	return 0;
921 
922 err_cleanup_crtc:
923 	drm_crtc_cleanup(&mtk_crtc->base);
924 	return ret;
925 }
926 
mtk_crtc_num_comp_planes(struct mtk_crtc * mtk_crtc,int comp_idx)927 static int mtk_crtc_num_comp_planes(struct mtk_crtc *mtk_crtc, int comp_idx)
928 {
929 	struct mtk_ddp_comp *comp;
930 
931 	if (comp_idx > 1)
932 		return 0;
933 
934 	comp = mtk_crtc->ddp_comp[comp_idx];
935 	if (!comp->funcs)
936 		return 0;
937 
938 	if (comp_idx == 1 && !comp->funcs->bgclr_in_on)
939 		return 0;
940 
941 	return mtk_ddp_comp_layer_nr(comp);
942 }
943 
944 static inline
mtk_crtc_plane_type(unsigned int plane_idx,unsigned int num_planes)945 enum drm_plane_type mtk_crtc_plane_type(unsigned int plane_idx,
946 					unsigned int num_planes)
947 {
948 	if (plane_idx == 0)
949 		return DRM_PLANE_TYPE_PRIMARY;
950 	else if (plane_idx == (num_planes - 1))
951 		return DRM_PLANE_TYPE_CURSOR;
952 	else
953 		return DRM_PLANE_TYPE_OVERLAY;
954 
955 }
956 
mtk_crtc_init_comp_planes(struct drm_device * drm_dev,struct mtk_crtc * mtk_crtc,int comp_idx,int pipe)957 static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev,
958 				     struct mtk_crtc *mtk_crtc,
959 				     int comp_idx, int pipe)
960 {
961 	int num_planes = mtk_crtc_num_comp_planes(mtk_crtc, comp_idx);
962 	struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx];
963 	int i, ret;
964 
965 	for (i = 0; i < num_planes; i++) {
966 		ret = mtk_plane_init(drm_dev,
967 				&mtk_crtc->planes[mtk_crtc->layer_nr],
968 				BIT(pipe),
969 				mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes),
970 				mtk_ddp_comp_supported_rotations(comp),
971 				mtk_ddp_comp_get_blend_modes(comp),
972 				mtk_ddp_comp_get_formats(comp),
973 				mtk_ddp_comp_get_num_formats(comp),
974 				mtk_ddp_comp_is_afbc_supported(comp), i);
975 		if (ret)
976 			return ret;
977 
978 		mtk_crtc->layer_nr++;
979 	}
980 	return 0;
981 }
982 
mtk_crtc_dma_dev_get(struct drm_crtc * crtc)983 struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc)
984 {
985 	struct mtk_crtc *mtk_crtc = NULL;
986 
987 	if (!crtc)
988 		return NULL;
989 
990 	mtk_crtc = to_mtk_crtc(crtc);
991 	if (!mtk_crtc)
992 		return NULL;
993 
994 	return mtk_crtc->dma_dev;
995 }
996 
mtk_crtc_create(struct drm_device * drm_dev,const unsigned int * path,unsigned int path_len,int priv_data_index,const struct mtk_drm_route * conn_routes,unsigned int num_conn_routes)997 int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
998 		    unsigned int path_len, int priv_data_index,
999 		    const struct mtk_drm_route *conn_routes,
1000 		    unsigned int num_conn_routes)
1001 {
1002 	struct mtk_drm_private *priv = drm_dev->dev_private;
1003 	struct device *dev = drm_dev->dev;
1004 	struct mtk_crtc *mtk_crtc;
1005 	unsigned int num_comp_planes = 0;
1006 	int ret;
1007 	int i;
1008 	bool has_ctm = false;
1009 	uint gamma_lut_size = 0;
1010 	struct drm_crtc *tmp;
1011 	int crtc_i = 0;
1012 
1013 	if (!path)
1014 		return 0;
1015 
1016 	priv = priv->all_drm_private[priv_data_index];
1017 
1018 	drm_for_each_crtc(tmp, drm_dev)
1019 		crtc_i++;
1020 
1021 	for (i = 0; i < path_len; i++) {
1022 		enum mtk_ddp_comp_id comp_id = path[i];
1023 		struct device_node *node;
1024 		struct mtk_ddp_comp *comp;
1025 
1026 		node = priv->comp_node[comp_id];
1027 		comp = &priv->ddp_comp[comp_id];
1028 
1029 		/* Not all drm components have a DTS device node, such as ovl_adaptor,
1030 		 * which is the drm bring up sub driver
1031 		 */
1032 		if (!node && comp_id != DDP_COMPONENT_DRM_OVL_ADAPTOR) {
1033 			dev_info(dev,
1034 				"Not creating crtc %d because component %d is disabled or missing\n",
1035 				crtc_i, comp_id);
1036 			return 0;
1037 		}
1038 
1039 		if (!comp->dev) {
1040 			dev_err(dev, "Component %pOF not initialized\n", node);
1041 			return -ENODEV;
1042 		}
1043 	}
1044 
1045 	mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL);
1046 	if (!mtk_crtc)
1047 		return -ENOMEM;
1048 
1049 	mtk_crtc->mmsys_dev = priv->mmsys_dev;
1050 	mtk_crtc->ddp_comp_nr = path_len;
1051 	mtk_crtc->ddp_comp = devm_kcalloc(dev,
1052 					  mtk_crtc->ddp_comp_nr + (conn_routes ? 1 : 0),
1053 					  sizeof(*mtk_crtc->ddp_comp),
1054 					  GFP_KERNEL);
1055 	if (!mtk_crtc->ddp_comp)
1056 		return -ENOMEM;
1057 
1058 	mtk_crtc->mutex = mtk_mutex_get(priv->mutex_dev);
1059 	if (IS_ERR(mtk_crtc->mutex)) {
1060 		ret = PTR_ERR(mtk_crtc->mutex);
1061 		dev_err(dev, "Failed to get mutex: %d\n", ret);
1062 		return ret;
1063 	}
1064 
1065 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
1066 		unsigned int comp_id = path[i];
1067 		struct mtk_ddp_comp *comp;
1068 
1069 		comp = &priv->ddp_comp[comp_id];
1070 		mtk_crtc->ddp_comp[i] = comp;
1071 
1072 		if (comp->funcs) {
1073 			if (comp->funcs->gamma_set && comp->funcs->gamma_get_lut_size) {
1074 				unsigned int lut_sz = mtk_ddp_gamma_get_lut_size(comp);
1075 
1076 				if (lut_sz)
1077 					gamma_lut_size = lut_sz;
1078 			}
1079 
1080 			if (comp->funcs->ctm_set)
1081 				has_ctm = true;
1082 		}
1083 
1084 		mtk_ddp_comp_register_vblank_cb(comp, mtk_crtc_ddp_irq,
1085 						&mtk_crtc->base);
1086 	}
1087 
1088 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
1089 		num_comp_planes += mtk_crtc_num_comp_planes(mtk_crtc, i);
1090 
1091 	mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
1092 					sizeof(struct drm_plane), GFP_KERNEL);
1093 	if (!mtk_crtc->planes)
1094 		return -ENOMEM;
1095 
1096 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
1097 		ret = mtk_crtc_init_comp_planes(drm_dev, mtk_crtc, i, crtc_i);
1098 		if (ret)
1099 			return ret;
1100 	}
1101 
1102 	/*
1103 	 * Default to use the first component as the dma dev.
1104 	 * In the case of ovl_adaptor sub driver, it needs to use the
1105 	 * dma_dev_get function to get representative dma dev.
1106 	 */
1107 	mtk_crtc->dma_dev = mtk_ddp_comp_dma_dev_get(&priv->ddp_comp[path[0]]);
1108 
1109 	ret = mtk_crtc_init(drm_dev, mtk_crtc, crtc_i);
1110 	if (ret < 0)
1111 		return ret;
1112 
1113 	if (gamma_lut_size)
1114 		drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
1115 	drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
1116 	mutex_init(&mtk_crtc->hw_lock);
1117 	spin_lock_init(&mtk_crtc->config_lock);
1118 
1119 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
1120 	i = priv->mbox_index++;
1121 	mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev;
1122 	mtk_crtc->cmdq_client.client.tx_block = false;
1123 	mtk_crtc->cmdq_client.client.knows_txdone = true;
1124 	mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb;
1125 	mtk_crtc->cmdq_client.chan =
1126 			mbox_request_channel(&mtk_crtc->cmdq_client.client, i);
1127 	if (IS_ERR(mtk_crtc->cmdq_client.chan)) {
1128 		dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
1129 			drm_crtc_index(&mtk_crtc->base));
1130 		mtk_crtc->cmdq_client.chan = NULL;
1131 	}
1132 
1133 	if (mtk_crtc->cmdq_client.chan) {
1134 		ret = of_property_read_u32_index(priv->mutex_node,
1135 						 "mediatek,gce-events",
1136 						 i,
1137 						 &mtk_crtc->cmdq_event);
1138 		if (ret) {
1139 			dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
1140 				drm_crtc_index(&mtk_crtc->base));
1141 			mbox_free_channel(mtk_crtc->cmdq_client.chan);
1142 			mtk_crtc->cmdq_client.chan = NULL;
1143 		} else {
1144 			ret = cmdq_pkt_create(&mtk_crtc->cmdq_client,
1145 					      &mtk_crtc->cmdq_handle,
1146 					      PAGE_SIZE);
1147 			if (ret) {
1148 				dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
1149 					drm_crtc_index(&mtk_crtc->base));
1150 				mbox_free_channel(mtk_crtc->cmdq_client.chan);
1151 				mtk_crtc->cmdq_client.chan = NULL;
1152 			}
1153 		}
1154 
1155 		/* for sending blocking cmd in crtc disable */
1156 		init_waitqueue_head(&mtk_crtc->cb_blocking_queue);
1157 	}
1158 #endif
1159 
1160 	if (conn_routes) {
1161 		for (i = 0; i < num_conn_routes; i++) {
1162 			unsigned int comp_id = conn_routes[i].route_ddp;
1163 			struct device_node *node = priv->comp_node[comp_id];
1164 			struct mtk_ddp_comp *comp = &priv->ddp_comp[comp_id];
1165 
1166 			if (!comp->dev) {
1167 				dev_dbg(dev, "comp_id:%d, Component %pOF not initialized\n",
1168 					comp_id, node);
1169 				/* mark encoder_index to -1, if route comp device is not enabled */
1170 				comp->encoder_index = -1;
1171 				continue;
1172 			}
1173 
1174 			mtk_ddp_comp_encoder_index_set(&priv->ddp_comp[comp_id]);
1175 		}
1176 
1177 		mtk_crtc->num_conn_routes = num_conn_routes;
1178 		mtk_crtc->conn_routes = conn_routes;
1179 
1180 		/* increase ddp_comp_nr at the end of mtk_crtc_create */
1181 		mtk_crtc->ddp_comp_nr++;
1182 	}
1183 
1184 	return 0;
1185 }
1186