xref: /linux/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c (revision fb7399cf2d0b33825b8039f95c45395c7deba25c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include <drm/drm_managed.h>
9 
10 #include "dpu_hwio.h"
11 #include "dpu_hw_ctl.h"
12 #include "dpu_kms.h"
13 #include "dpu_trace.h"
14 
15 #define   CTL_LAYER(lm)                 \
16 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
17 #define   CTL_LAYER_EXT(lm)             \
18 	(0x40 + (((lm) - LM_0) * 0x004))
19 #define   CTL_LAYER_EXT2(lm)             \
20 	(0x70 + (((lm) - LM_0) * 0x004))
21 #define   CTL_LAYER_EXT3(lm)             \
22 	(0xA0 + (((lm) - LM_0) * 0x004))
23 #define CTL_LAYER_EXT4(lm)             \
24 	(0xB8 + (((lm) - LM_0) * 0x004))
25 #define   CTL_TOP                       0x014
26 #define   CTL_FLUSH                     0x018
27 #define   CTL_START                     0x01C
28 #define   CTL_PREPARE                   0x0d0
29 #define   CTL_SW_RESET                  0x030
30 #define   CTL_LAYER_EXTN_OFFSET         0x40
31 #define   CTL_MERGE_3D_ACTIVE           0x0E4
32 #define   CTL_DSC_ACTIVE                0x0E8
33 #define   CTL_WB_ACTIVE                 0x0EC
34 #define   CTL_CWB_ACTIVE                0x0F0
35 #define   CTL_INTF_ACTIVE               0x0F4
36 #define   CTL_CDM_ACTIVE                0x0F8
37 #define   CTL_FETCH_PIPE_ACTIVE         0x0FC
38 #define   CTL_MERGE_3D_FLUSH            0x100
39 #define   CTL_DSC_FLUSH                0x104
40 #define   CTL_WB_FLUSH                  0x108
41 #define   CTL_CWB_FLUSH                 0x10C
42 #define   CTL_INTF_FLUSH                0x110
43 #define   CTL_CDM_FLUSH                0x114
44 #define   CTL_PERIPH_FLUSH              0x128
45 #define   CTL_INTF_MASTER               0x134
46 #define   CTL_DSPP_n_FLUSH(n)           ((0x13C) + ((n) * 4))
47 
48 #define CTL_MIXER_BORDER_OUT            BIT(24)
49 #define CTL_FLUSH_MASK_CTL              BIT(17)
50 
51 #define DPU_REG_RESET_TIMEOUT_US        2000
52 #define  MERGE_3D_IDX   23
53 #define  DSC_IDX        22
54 #define CDM_IDX         26
55 #define  PERIPH_IDX     30
56 #define  INTF_IDX       31
57 #define WB_IDX          16
58 #define CWB_IDX         28
59 #define  DSPP_IDX       29  /* From DPU hw rev 7.x.x */
60 #define CTL_INVALID_BIT                 0xffff
61 #define CTL_DEFAULT_GROUP_ID		0xf
62 
63 static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
64 	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
65 	1, 2, 3, 4, 5};
66 
67 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
68 		enum dpu_lm lm)
69 {
70 	int i;
71 	int stages = -EINVAL;
72 
73 	for (i = 0; i < count; i++) {
74 		if (lm == mixer[i].id) {
75 			stages = mixer[i].sblk->maxblendstages;
76 			break;
77 		}
78 	}
79 
80 	return stages;
81 }
82 
83 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
84 {
85 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
86 
87 	return DPU_REG_READ(c, CTL_FLUSH);
88 }
89 
90 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
91 {
92 	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
93 				       dpu_hw_ctl_get_flush_register(ctx));
94 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
95 }
96 
97 static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
98 {
99 	return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
100 }
101 
102 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
103 {
104 	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
105 					 dpu_hw_ctl_get_flush_register(ctx));
106 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
107 }
108 
109 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
110 {
111 	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
112 				     dpu_hw_ctl_get_flush_register(ctx));
113 	ctx->pending_flush_mask = 0x0;
114 	ctx->pending_intf_flush_mask = 0;
115 	ctx->pending_wb_flush_mask = 0;
116 	ctx->pending_cwb_flush_mask = 0;
117 	ctx->pending_merge_3d_flush_mask = 0;
118 	ctx->pending_dsc_flush_mask = 0;
119 	ctx->pending_cdm_flush_mask = 0;
120 
121 	memset(ctx->pending_dspp_flush_mask, 0,
122 		sizeof(ctx->pending_dspp_flush_mask));
123 }
124 
125 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
126 		u32 flushbits)
127 {
128 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
129 					      ctx->pending_flush_mask);
130 	ctx->pending_flush_mask |= flushbits;
131 }
132 
133 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
134 {
135 	return ctx->pending_flush_mask;
136 }
137 
138 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
139 {
140 	int dspp;
141 
142 	if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
143 		DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
144 				ctx->pending_merge_3d_flush_mask);
145 	if (ctx->pending_flush_mask & BIT(INTF_IDX))
146 		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
147 				ctx->pending_intf_flush_mask);
148 	if (ctx->pending_flush_mask & BIT(WB_IDX))
149 		DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
150 				ctx->pending_wb_flush_mask);
151 	if (ctx->pending_flush_mask & BIT(CWB_IDX))
152 		DPU_REG_WRITE(&ctx->hw, CTL_CWB_FLUSH,
153 				ctx->pending_cwb_flush_mask);
154 
155 	if (ctx->pending_flush_mask & BIT(DSPP_IDX))
156 		for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) {
157 			if (ctx->pending_dspp_flush_mask[dspp - DSPP_0])
158 				DPU_REG_WRITE(&ctx->hw,
159 				CTL_DSPP_n_FLUSH(dspp - DSPP_0),
160 				ctx->pending_dspp_flush_mask[dspp - DSPP_0]);
161 		}
162 
163 	if (ctx->pending_flush_mask & BIT(PERIPH_IDX))
164 		DPU_REG_WRITE(&ctx->hw, CTL_PERIPH_FLUSH,
165 			      ctx->pending_periph_flush_mask);
166 
167 	if (ctx->pending_flush_mask & BIT(DSC_IDX))
168 		DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
169 			      ctx->pending_dsc_flush_mask);
170 
171 	if (ctx->pending_flush_mask & BIT(CDM_IDX))
172 		DPU_REG_WRITE(&ctx->hw, CTL_CDM_FLUSH,
173 			      ctx->pending_cdm_flush_mask);
174 
175 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
176 }
177 
178 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
179 {
180 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
181 				     dpu_hw_ctl_get_flush_register(ctx));
182 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
183 }
184 
185 static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
186 	enum dpu_sspp sspp)
187 {
188 	switch (sspp) {
189 	case SSPP_VIG0:
190 		ctx->pending_flush_mask |=  BIT(0);
191 		break;
192 	case SSPP_VIG1:
193 		ctx->pending_flush_mask |= BIT(1);
194 		break;
195 	case SSPP_VIG2:
196 		ctx->pending_flush_mask |= BIT(2);
197 		break;
198 	case SSPP_VIG3:
199 		ctx->pending_flush_mask |= BIT(18);
200 		break;
201 	case SSPP_RGB0:
202 		ctx->pending_flush_mask |= BIT(3);
203 		break;
204 	case SSPP_RGB1:
205 		ctx->pending_flush_mask |= BIT(4);
206 		break;
207 	case SSPP_RGB2:
208 		ctx->pending_flush_mask |= BIT(5);
209 		break;
210 	case SSPP_RGB3:
211 		ctx->pending_flush_mask |= BIT(19);
212 		break;
213 	case SSPP_DMA0:
214 		ctx->pending_flush_mask |= BIT(11);
215 		break;
216 	case SSPP_DMA1:
217 		ctx->pending_flush_mask |= BIT(12);
218 		break;
219 	case SSPP_DMA2:
220 		ctx->pending_flush_mask |= BIT(24);
221 		break;
222 	case SSPP_DMA3:
223 		ctx->pending_flush_mask |= BIT(25);
224 		break;
225 	case SSPP_DMA4:
226 		ctx->pending_flush_mask |= BIT(13);
227 		break;
228 	case SSPP_DMA5:
229 		ctx->pending_flush_mask |= BIT(14);
230 		break;
231 	case SSPP_CURSOR0:
232 		ctx->pending_flush_mask |= BIT(22);
233 		break;
234 	case SSPP_CURSOR1:
235 		ctx->pending_flush_mask |= BIT(23);
236 		break;
237 	default:
238 		break;
239 	}
240 }
241 
242 static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
243 	enum dpu_lm lm)
244 {
245 	switch (lm) {
246 	case LM_0:
247 		ctx->pending_flush_mask |= BIT(6);
248 		break;
249 	case LM_1:
250 		ctx->pending_flush_mask |= BIT(7);
251 		break;
252 	case LM_2:
253 		ctx->pending_flush_mask |= BIT(8);
254 		break;
255 	case LM_3:
256 		ctx->pending_flush_mask |= BIT(9);
257 		break;
258 	case LM_4:
259 		ctx->pending_flush_mask |= BIT(10);
260 		break;
261 	case LM_5:
262 		ctx->pending_flush_mask |= BIT(20);
263 		break;
264 	case LM_6:
265 		ctx->pending_flush_mask |= BIT(21);
266 		break;
267 	case LM_7:
268 		ctx->pending_flush_mask |= BIT(27);
269 		break;
270 	default:
271 		break;
272 	}
273 
274 	ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
275 }
276 
277 static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
278 		enum dpu_intf intf)
279 {
280 	switch (intf) {
281 	case INTF_0:
282 		ctx->pending_flush_mask |= BIT(31);
283 		break;
284 	case INTF_1:
285 		ctx->pending_flush_mask |= BIT(30);
286 		break;
287 	case INTF_2:
288 		ctx->pending_flush_mask |= BIT(29);
289 		break;
290 	case INTF_3:
291 		ctx->pending_flush_mask |= BIT(28);
292 		break;
293 	default:
294 		break;
295 	}
296 }
297 
298 static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
299 		enum dpu_wb wb)
300 {
301 	switch (wb) {
302 	case WB_0:
303 	case WB_1:
304 	case WB_2:
305 		ctx->pending_flush_mask |= BIT(WB_IDX);
306 		break;
307 	default:
308 		break;
309 	}
310 }
311 
312 static void dpu_hw_ctl_update_pending_flush_cdm(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
313 {
314 	/* update pending flush only if CDM_0 is flushed */
315 	if (cdm_num == CDM_0)
316 		ctx->pending_flush_mask |= BIT(CDM_IDX);
317 }
318 
319 static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
320 		enum dpu_wb wb)
321 {
322 	ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
323 	ctx->pending_flush_mask |= BIT(WB_IDX);
324 }
325 
326 static void dpu_hw_ctl_update_pending_flush_cwb_v1(struct dpu_hw_ctl *ctx,
327 		enum dpu_cwb cwb)
328 {
329 	ctx->pending_cwb_flush_mask |= BIT(cwb - CWB_0);
330 	ctx->pending_flush_mask |= BIT(CWB_IDX);
331 }
332 
333 static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
334 		enum dpu_intf intf)
335 {
336 	ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
337 	ctx->pending_flush_mask |= BIT(INTF_IDX);
338 }
339 
340 static void dpu_hw_ctl_update_pending_flush_periph_v1(struct dpu_hw_ctl *ctx,
341 						      enum dpu_intf intf)
342 {
343 	ctx->pending_periph_flush_mask |= BIT(intf - INTF_0);
344 	ctx->pending_flush_mask |= BIT(PERIPH_IDX);
345 }
346 
347 static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
348 		enum dpu_merge_3d merge_3d)
349 {
350 	ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
351 	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
352 }
353 
354 static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx,
355 						   enum dpu_dsc dsc_num)
356 {
357 	ctx->pending_dsc_flush_mask |= BIT(dsc_num - DSC_0);
358 	ctx->pending_flush_mask |= BIT(DSC_IDX);
359 }
360 
361 static void dpu_hw_ctl_update_pending_flush_cdm_v1(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
362 {
363 	ctx->pending_cdm_flush_mask |= BIT(cdm_num - CDM_0);
364 	ctx->pending_flush_mask |= BIT(CDM_IDX);
365 }
366 
367 static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
368 	enum dpu_dspp dspp, u32 dspp_sub_blk)
369 {
370 	switch (dspp) {
371 	case DSPP_0:
372 		ctx->pending_flush_mask |= BIT(13);
373 		break;
374 	case DSPP_1:
375 		ctx->pending_flush_mask |= BIT(14);
376 		break;
377 	case DSPP_2:
378 		ctx->pending_flush_mask |= BIT(15);
379 		break;
380 	case DSPP_3:
381 		ctx->pending_flush_mask |= BIT(21);
382 		break;
383 	default:
384 		break;
385 	}
386 }
387 
388 static void dpu_hw_ctl_update_pending_flush_dspp_sub_blocks(
389 	struct dpu_hw_ctl *ctx,	enum dpu_dspp dspp, u32 dspp_sub_blk)
390 {
391 	if (dspp >= DSPP_MAX)
392 		return;
393 
394 	switch (dspp_sub_blk) {
395 	case DPU_DSPP_PCC:
396 		ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(4);
397 		break;
398 	default:
399 		return;
400 	}
401 
402 	ctx->pending_flush_mask |= BIT(DSPP_IDX);
403 }
404 
405 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
406 {
407 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
408 	ktime_t timeout;
409 	u32 status;
410 
411 	timeout = ktime_add_us(ktime_get(), timeout_us);
412 
413 	/*
414 	 * it takes around 30us to have mdp finish resetting its ctl path
415 	 * poll every 50us so that reset should be completed at 1st poll
416 	 */
417 	do {
418 		status = DPU_REG_READ(c, CTL_SW_RESET);
419 		status &= 0x1;
420 		if (status)
421 			usleep_range(20, 50);
422 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
423 
424 	return status;
425 }
426 
427 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
428 {
429 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
430 
431 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
432 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
433 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
434 		return -EINVAL;
435 
436 	return 0;
437 }
438 
439 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
440 {
441 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
442 	u32 status;
443 
444 	status = DPU_REG_READ(c, CTL_SW_RESET);
445 	status &= 0x01;
446 	if (!status)
447 		return 0;
448 
449 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
450 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
451 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
452 		return -EINVAL;
453 	}
454 
455 	return 0;
456 }
457 
458 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
459 {
460 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
461 	int i;
462 
463 	for (i = 0; i < ctx->mixer_count; i++) {
464 		enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
465 
466 		DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
467 		DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
468 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
469 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
470 	}
471 
472 	DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
473 }
474 
475 struct ctl_blend_config {
476 	int idx, shift, ext_shift;
477 };
478 
479 static const struct ctl_blend_config ctl_blend_config[][2] = {
480 	[SSPP_NONE] = { { -1 }, { -1 } },
481 	[SSPP_MAX] =  { { -1 }, { -1 } },
482 	[SSPP_VIG0] = { { 0, 0,  0  }, { 3, 0 } },
483 	[SSPP_VIG1] = { { 0, 3,  2  }, { 3, 4 } },
484 	[SSPP_VIG2] = { { 0, 6,  4  }, { 3, 8 } },
485 	[SSPP_VIG3] = { { 0, 26, 6  }, { 3, 12 } },
486 	[SSPP_RGB0] = { { 0, 9,  8  }, { -1 } },
487 	[SSPP_RGB1] = { { 0, 12, 10 }, { -1 } },
488 	[SSPP_RGB2] = { { 0, 15, 12 }, { -1 } },
489 	[SSPP_RGB3] = { { 0, 29, 14 }, { -1 } },
490 	[SSPP_DMA0] = { { 0, 18, 16 }, { 2, 8 } },
491 	[SSPP_DMA1] = { { 0, 21, 18 }, { 2, 12 } },
492 	[SSPP_DMA2] = { { 2, 0      }, { 2, 16 } },
493 	[SSPP_DMA3] = { { 2, 4      }, { 2, 20 } },
494 	[SSPP_DMA4] = { { 4, 0      }, { 4, 8 } },
495 	[SSPP_DMA5] = { { 4, 4      }, { 4, 12 } },
496 	[SSPP_CURSOR0] =  { { 1, 20 }, { -1 } },
497 	[SSPP_CURSOR1] =  { { 1, 26 }, { -1 } },
498 };
499 
500 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
501 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
502 {
503 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
504 	u32 mix, ext, mix_ext;
505 	u32 mixercfg[5] = { 0 };
506 	int i, j;
507 	int stages;
508 	int pipes_per_stage;
509 
510 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
511 	if (stages < 0)
512 		return;
513 
514 	if (test_bit(DPU_MIXER_SOURCESPLIT,
515 		&ctx->mixer_hw_caps->features))
516 		pipes_per_stage = PIPES_PER_STAGE;
517 	else
518 		pipes_per_stage = 1;
519 
520 	mixercfg[0] = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
521 
522 	if (!stage_cfg)
523 		goto exit;
524 
525 	for (i = 0; i <= stages; i++) {
526 		/* overflow to ext register if 'i + 1 > 7' */
527 		mix = (i + 1) & 0x7;
528 		ext = i >= 7;
529 		mix_ext = (i + 1) & 0xf;
530 
531 		for (j = 0 ; j < pipes_per_stage; j++) {
532 			enum dpu_sspp_multirect_index rect_index =
533 				stage_cfg->multirect_index[i][j];
534 			enum dpu_sspp pipe = stage_cfg->stage[i][j];
535 			const struct ctl_blend_config *cfg =
536 				&ctl_blend_config[pipe][rect_index == DPU_SSPP_RECT_1];
537 
538 			/*
539 			 * CTL_LAYER has 3-bit field (and extra bits in EXT register),
540 			 * all EXT registers has 4-bit fields.
541 			 */
542 			if (cfg->idx == -1) {
543 				continue;
544 			} else if (cfg->idx == 0) {
545 				mixercfg[0] |= mix << cfg->shift;
546 				mixercfg[1] |= ext << cfg->ext_shift;
547 			} else {
548 				mixercfg[cfg->idx] |= mix_ext << cfg->shift;
549 			}
550 		}
551 	}
552 
553 exit:
554 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg[0]);
555 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]);
556 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]);
557 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]);
558 	if ((test_bit(DPU_CTL_HAS_LAYER_EXT4, &ctx->caps->features)))
559 		DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]);
560 }
561 
562 
563 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
564 		struct dpu_hw_intf_cfg *cfg)
565 {
566 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
567 	u32 intf_active = 0;
568 	u32 dsc_active = 0;
569 	u32 wb_active = 0;
570 	u32 cwb_active = 0;
571 	u32 mode_sel = 0;
572 	u32 merge_3d_active = 0;
573 
574 	/* CTL_TOP[31:28] carries group_id to collate CTL paths
575 	 * per VM. Explicitly disable it until VM support is
576 	 * added in SW. Power on reset value is not disable.
577 	 */
578 	if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
579 		mode_sel = CTL_DEFAULT_GROUP_ID  << 28;
580 
581 	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
582 		mode_sel |= BIT(17);
583 
584 	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
585 	wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
586 	cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
587 	dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
588 	merge_3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
589 
590 	if (cfg->intf)
591 		intf_active |= BIT(cfg->intf - INTF_0);
592 
593 	if (cfg->wb)
594 		wb_active |= BIT(cfg->wb - WB_0);
595 
596 	if (cfg->cwb)
597 		cwb_active |= cfg->cwb;
598 
599 	if (cfg->dsc)
600 		dsc_active |= cfg->dsc;
601 
602 	if (cfg->merge_3d)
603 		merge_3d_active |= BIT(cfg->merge_3d - MERGE_3D_0);
604 
605 	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
606 	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
607 	DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
608 	DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
609 	DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
610 	DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, merge_3d_active);
611 
612 	if (cfg->intf_master)
613 		DPU_REG_WRITE(c, CTL_INTF_MASTER, BIT(cfg->intf_master - INTF_0));
614 
615 	if (cfg->cdm)
616 		DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm);
617 }
618 
619 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
620 		struct dpu_hw_intf_cfg *cfg)
621 {
622 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
623 	u32 intf_cfg = 0;
624 
625 	intf_cfg |= (cfg->intf & 0xF) << 4;
626 
627 	if (cfg->mode_3d) {
628 		intf_cfg |= BIT(19);
629 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
630 	}
631 
632 	if (cfg->wb)
633 		intf_cfg |= (cfg->wb & 0x3) + 2;
634 
635 	switch (cfg->intf_mode_sel) {
636 	case DPU_CTL_MODE_SEL_VID:
637 		intf_cfg &= ~BIT(17);
638 		intf_cfg &= ~(0x3 << 15);
639 		break;
640 	case DPU_CTL_MODE_SEL_CMD:
641 		intf_cfg |= BIT(17);
642 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
643 		break;
644 	default:
645 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
646 		return;
647 	}
648 
649 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
650 }
651 
652 static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
653 		struct dpu_hw_intf_cfg *cfg)
654 {
655 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
656 	u32 intf_active = 0;
657 	u32 intf_master = 0;
658 	u32 wb_active = 0;
659 	u32 cwb_active = 0;
660 	u32 merge3d_active = 0;
661 	u32 dsc_active;
662 	u32 cdm_active;
663 
664 	/*
665 	 * This API resets each portion of the CTL path namely,
666 	 * clearing the sspps staged on the lm, merge_3d block,
667 	 * interfaces , writeback etc to ensure clean teardown of the pipeline.
668 	 * This will be used for writeback to begin with to have a
669 	 * proper teardown of the writeback session but upon further
670 	 * validation, this can be extended to all interfaces.
671 	 */
672 	if (cfg->merge_3d) {
673 		merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
674 		merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
675 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
676 				merge3d_active);
677 	}
678 
679 	dpu_hw_ctl_clear_all_blendstages(ctx);
680 
681 	if (ctx->ops.set_active_fetch_pipes)
682 		ctx->ops.set_active_fetch_pipes(ctx, NULL);
683 
684 	if (cfg->intf) {
685 		intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
686 		intf_active &= ~BIT(cfg->intf - INTF_0);
687 		DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
688 
689 		intf_master = DPU_REG_READ(c, CTL_INTF_MASTER);
690 
691 		/* Unset this intf as master, if it is the current master */
692 		if (intf_master == BIT(cfg->intf - INTF_0)) {
693 			DPU_DEBUG_DRIVER("Unsetting INTF_%d master\n", cfg->intf - INTF_0);
694 			DPU_REG_WRITE(c, CTL_INTF_MASTER, 0);
695 		}
696 	}
697 
698 	if (cfg->cwb) {
699 		cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
700 		cwb_active &= ~cfg->cwb;
701 		DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
702 	}
703 
704 	if (cfg->wb) {
705 		wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
706 		wb_active &= ~BIT(cfg->wb - WB_0);
707 		DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
708 	}
709 
710 	if (cfg->dsc) {
711 		dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
712 		dsc_active &= ~cfg->dsc;
713 		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
714 	}
715 
716 	if (cfg->cdm) {
717 		cdm_active = DPU_REG_READ(c, CTL_CDM_ACTIVE);
718 		cdm_active &= ~cfg->cdm;
719 		DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cdm_active);
720 	}
721 }
722 
723 static void dpu_hw_ctl_set_active_fetch_pipes(struct dpu_hw_ctl *ctx,
724 					      unsigned long *fetch_active)
725 {
726 	int i;
727 	u32 val = 0;
728 
729 	if (fetch_active) {
730 		for (i = 0; i < SSPP_MAX; i++) {
731 			if (test_bit(i, fetch_active) &&
732 				fetch_tbl[i] != CTL_INVALID_BIT)
733 				val |= BIT(fetch_tbl[i]);
734 		}
735 	}
736 
737 	DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
738 }
739 
740 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
741 		unsigned long cap)
742 {
743 	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
744 		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
745 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
746 		ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
747 		ops->update_pending_flush_intf =
748 			dpu_hw_ctl_update_pending_flush_intf_v1;
749 
750 		ops->update_pending_flush_periph =
751 			dpu_hw_ctl_update_pending_flush_periph_v1;
752 
753 		ops->update_pending_flush_merge_3d =
754 			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
755 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
756 		ops->update_pending_flush_cwb = dpu_hw_ctl_update_pending_flush_cwb_v1;
757 		ops->update_pending_flush_dsc =
758 			dpu_hw_ctl_update_pending_flush_dsc_v1;
759 		ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
760 	} else {
761 		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
762 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
763 		ops->update_pending_flush_intf =
764 			dpu_hw_ctl_update_pending_flush_intf;
765 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
766 		ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm;
767 	}
768 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
769 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
770 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
771 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
772 	ops->trigger_start = dpu_hw_ctl_trigger_start;
773 	ops->is_started = dpu_hw_ctl_is_started;
774 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
775 	ops->reset = dpu_hw_ctl_reset_control;
776 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
777 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
778 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
779 	ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
780 	ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
781 	if (cap & BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
782 		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
783 	else
784 		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
785 
786 	if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
787 		ops->set_active_fetch_pipes = dpu_hw_ctl_set_active_fetch_pipes;
788 };
789 
790 /**
791  * dpu_hw_ctl_init() - Initializes the ctl_path hw driver object.
792  * Should be called before accessing any ctl_path register.
793  * @dev:  Corresponding device for devres management
794  * @cfg:  ctl_path catalog entry for which driver object is required
795  * @addr: mapped register io address of MDP
796  * @mixer_count: Number of mixers in @mixer
797  * @mixer: Pointer to an array of Layer Mixers defined in the catalog
798  */
799 struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
800 				   const struct dpu_ctl_cfg *cfg,
801 				   void __iomem *addr,
802 				   u32 mixer_count,
803 				   const struct dpu_lm_cfg *mixer)
804 {
805 	struct dpu_hw_ctl *c;
806 
807 	c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
808 	if (!c)
809 		return ERR_PTR(-ENOMEM);
810 
811 	c->hw.blk_addr = addr + cfg->base;
812 	c->hw.log_mask = DPU_DBG_MASK_CTL;
813 
814 	c->caps = cfg;
815 	_setup_ctl_ops(&c->ops, c->caps->features);
816 	c->idx = cfg->id;
817 	c->mixer_count = mixer_count;
818 	c->mixer_hw_caps = mixer;
819 
820 	return c;
821 }
822