xref: /linux/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c (revision f5db8841ebe59dbdf07fda797c88ccb51e0c893d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include <drm/drm_managed.h>
9 
10 #include "dpu_hwio.h"
11 #include "dpu_hw_ctl.h"
12 #include "dpu_kms.h"
13 #include "dpu_trace.h"
14 
15 #define   CTL_LAYER(lm)                 \
16 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
17 #define   CTL_LAYER_EXT(lm)             \
18 	(0x40 + (((lm) - LM_0) * 0x004))
19 #define   CTL_LAYER_EXT2(lm)             \
20 	(0x70 + (((lm) - LM_0) * 0x004))
21 #define   CTL_LAYER_EXT3(lm)             \
22 	(0xA0 + (((lm) - LM_0) * 0x004))
23 #define CTL_LAYER_EXT4(lm)             \
24 	(0xB8 + (((lm) - LM_0) * 0x004))
25 #define   CTL_TOP                       0x014
26 #define   CTL_FLUSH                     0x018
27 #define   CTL_START                     0x01C
28 #define   CTL_PREPARE                   0x0d0
29 #define   CTL_SW_RESET                  0x030
30 #define   CTL_LAYER_EXTN_OFFSET         0x40
31 #define   CTL_MERGE_3D_ACTIVE           0x0E4
32 #define   CTL_DSC_ACTIVE                0x0E8
33 #define   CTL_WB_ACTIVE                 0x0EC
34 #define   CTL_INTF_ACTIVE               0x0F4
35 #define   CTL_CDM_ACTIVE                0x0F8
36 #define   CTL_FETCH_PIPE_ACTIVE         0x0FC
37 #define   CTL_MERGE_3D_FLUSH            0x100
38 #define   CTL_DSC_FLUSH                0x104
39 #define   CTL_WB_FLUSH                  0x108
40 #define   CTL_INTF_FLUSH                0x110
41 #define   CTL_CDM_FLUSH                0x114
42 #define   CTL_INTF_MASTER               0x134
43 #define   CTL_DSPP_n_FLUSH(n)           ((0x13C) + ((n) * 4))
44 
45 #define CTL_MIXER_BORDER_OUT            BIT(24)
46 #define CTL_FLUSH_MASK_CTL              BIT(17)
47 
48 #define DPU_REG_RESET_TIMEOUT_US        2000
49 #define  MERGE_3D_IDX   23
50 #define  DSC_IDX        22
51 #define CDM_IDX         26
52 #define  INTF_IDX       31
53 #define WB_IDX          16
54 #define  DSPP_IDX       29  /* From DPU hw rev 7.x.x */
55 #define CTL_INVALID_BIT                 0xffff
56 #define CTL_DEFAULT_GROUP_ID		0xf
57 
58 static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
59 	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
60 	1, 2, 3, 4, 5};
61 
62 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
63 		enum dpu_lm lm)
64 {
65 	int i;
66 	int stages = -EINVAL;
67 
68 	for (i = 0; i < count; i++) {
69 		if (lm == mixer[i].id) {
70 			stages = mixer[i].sblk->maxblendstages;
71 			break;
72 		}
73 	}
74 
75 	return stages;
76 }
77 
78 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
79 {
80 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
81 
82 	return DPU_REG_READ(c, CTL_FLUSH);
83 }
84 
85 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
86 {
87 	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
88 				       dpu_hw_ctl_get_flush_register(ctx));
89 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
90 }
91 
92 static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
93 {
94 	return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
95 }
96 
97 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
98 {
99 	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
100 					 dpu_hw_ctl_get_flush_register(ctx));
101 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
102 }
103 
104 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
105 {
106 	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
107 				     dpu_hw_ctl_get_flush_register(ctx));
108 	ctx->pending_flush_mask = 0x0;
109 	ctx->pending_intf_flush_mask = 0;
110 	ctx->pending_wb_flush_mask = 0;
111 	ctx->pending_merge_3d_flush_mask = 0;
112 	ctx->pending_dsc_flush_mask = 0;
113 	ctx->pending_cdm_flush_mask = 0;
114 
115 	memset(ctx->pending_dspp_flush_mask, 0,
116 		sizeof(ctx->pending_dspp_flush_mask));
117 }
118 
119 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
120 		u32 flushbits)
121 {
122 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
123 					      ctx->pending_flush_mask);
124 	ctx->pending_flush_mask |= flushbits;
125 }
126 
127 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
128 {
129 	return ctx->pending_flush_mask;
130 }
131 
132 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
133 {
134 	int dspp;
135 
136 	if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
137 		DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
138 				ctx->pending_merge_3d_flush_mask);
139 	if (ctx->pending_flush_mask & BIT(INTF_IDX))
140 		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
141 				ctx->pending_intf_flush_mask);
142 	if (ctx->pending_flush_mask & BIT(WB_IDX))
143 		DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
144 				ctx->pending_wb_flush_mask);
145 
146 	if (ctx->pending_flush_mask & BIT(DSPP_IDX))
147 		for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) {
148 			if (ctx->pending_dspp_flush_mask[dspp - DSPP_0])
149 				DPU_REG_WRITE(&ctx->hw,
150 				CTL_DSPP_n_FLUSH(dspp - DSPP_0),
151 				ctx->pending_dspp_flush_mask[dspp - DSPP_0]);
152 		}
153 
154 	if (ctx->pending_flush_mask & BIT(DSC_IDX))
155 		DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
156 			      ctx->pending_dsc_flush_mask);
157 
158 	if (ctx->pending_flush_mask & BIT(CDM_IDX))
159 		DPU_REG_WRITE(&ctx->hw, CTL_CDM_FLUSH,
160 			      ctx->pending_cdm_flush_mask);
161 
162 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
163 }
164 
165 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
166 {
167 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
168 				     dpu_hw_ctl_get_flush_register(ctx));
169 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
170 }
171 
172 static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
173 	enum dpu_sspp sspp)
174 {
175 	switch (sspp) {
176 	case SSPP_VIG0:
177 		ctx->pending_flush_mask |=  BIT(0);
178 		break;
179 	case SSPP_VIG1:
180 		ctx->pending_flush_mask |= BIT(1);
181 		break;
182 	case SSPP_VIG2:
183 		ctx->pending_flush_mask |= BIT(2);
184 		break;
185 	case SSPP_VIG3:
186 		ctx->pending_flush_mask |= BIT(18);
187 		break;
188 	case SSPP_RGB0:
189 		ctx->pending_flush_mask |= BIT(3);
190 		break;
191 	case SSPP_RGB1:
192 		ctx->pending_flush_mask |= BIT(4);
193 		break;
194 	case SSPP_RGB2:
195 		ctx->pending_flush_mask |= BIT(5);
196 		break;
197 	case SSPP_RGB3:
198 		ctx->pending_flush_mask |= BIT(19);
199 		break;
200 	case SSPP_DMA0:
201 		ctx->pending_flush_mask |= BIT(11);
202 		break;
203 	case SSPP_DMA1:
204 		ctx->pending_flush_mask |= BIT(12);
205 		break;
206 	case SSPP_DMA2:
207 		ctx->pending_flush_mask |= BIT(24);
208 		break;
209 	case SSPP_DMA3:
210 		ctx->pending_flush_mask |= BIT(25);
211 		break;
212 	case SSPP_DMA4:
213 		ctx->pending_flush_mask |= BIT(13);
214 		break;
215 	case SSPP_DMA5:
216 		ctx->pending_flush_mask |= BIT(14);
217 		break;
218 	case SSPP_CURSOR0:
219 		ctx->pending_flush_mask |= BIT(22);
220 		break;
221 	case SSPP_CURSOR1:
222 		ctx->pending_flush_mask |= BIT(23);
223 		break;
224 	default:
225 		break;
226 	}
227 }
228 
229 static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
230 	enum dpu_lm lm)
231 {
232 	switch (lm) {
233 	case LM_0:
234 		ctx->pending_flush_mask |= BIT(6);
235 		break;
236 	case LM_1:
237 		ctx->pending_flush_mask |= BIT(7);
238 		break;
239 	case LM_2:
240 		ctx->pending_flush_mask |= BIT(8);
241 		break;
242 	case LM_3:
243 		ctx->pending_flush_mask |= BIT(9);
244 		break;
245 	case LM_4:
246 		ctx->pending_flush_mask |= BIT(10);
247 		break;
248 	case LM_5:
249 		ctx->pending_flush_mask |= BIT(20);
250 		break;
251 	default:
252 		break;
253 	}
254 
255 	ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
256 }
257 
258 static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
259 		enum dpu_intf intf)
260 {
261 	switch (intf) {
262 	case INTF_0:
263 		ctx->pending_flush_mask |= BIT(31);
264 		break;
265 	case INTF_1:
266 		ctx->pending_flush_mask |= BIT(30);
267 		break;
268 	case INTF_2:
269 		ctx->pending_flush_mask |= BIT(29);
270 		break;
271 	case INTF_3:
272 		ctx->pending_flush_mask |= BIT(28);
273 		break;
274 	default:
275 		break;
276 	}
277 }
278 
279 static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
280 		enum dpu_wb wb)
281 {
282 	switch (wb) {
283 	case WB_0:
284 	case WB_1:
285 	case WB_2:
286 		ctx->pending_flush_mask |= BIT(WB_IDX);
287 		break;
288 	default:
289 		break;
290 	}
291 }
292 
293 static void dpu_hw_ctl_update_pending_flush_cdm(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
294 {
295 	/* update pending flush only if CDM_0 is flushed */
296 	if (cdm_num == CDM_0)
297 		ctx->pending_flush_mask |= BIT(CDM_IDX);
298 }
299 
300 static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
301 		enum dpu_wb wb)
302 {
303 	ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
304 	ctx->pending_flush_mask |= BIT(WB_IDX);
305 }
306 
307 static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
308 		enum dpu_intf intf)
309 {
310 	ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
311 	ctx->pending_flush_mask |= BIT(INTF_IDX);
312 }
313 
314 static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
315 		enum dpu_merge_3d merge_3d)
316 {
317 	ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
318 	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
319 }
320 
321 static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx,
322 						   enum dpu_dsc dsc_num)
323 {
324 	ctx->pending_dsc_flush_mask |= BIT(dsc_num - DSC_0);
325 	ctx->pending_flush_mask |= BIT(DSC_IDX);
326 }
327 
328 static void dpu_hw_ctl_update_pending_flush_cdm_v1(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
329 {
330 	ctx->pending_cdm_flush_mask |= BIT(cdm_num - CDM_0);
331 	ctx->pending_flush_mask |= BIT(CDM_IDX);
332 }
333 
334 static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
335 	enum dpu_dspp dspp, u32 dspp_sub_blk)
336 {
337 	switch (dspp) {
338 	case DSPP_0:
339 		ctx->pending_flush_mask |= BIT(13);
340 		break;
341 	case DSPP_1:
342 		ctx->pending_flush_mask |= BIT(14);
343 		break;
344 	case DSPP_2:
345 		ctx->pending_flush_mask |= BIT(15);
346 		break;
347 	case DSPP_3:
348 		ctx->pending_flush_mask |= BIT(21);
349 		break;
350 	default:
351 		break;
352 	}
353 }
354 
355 static void dpu_hw_ctl_update_pending_flush_dspp_sub_blocks(
356 	struct dpu_hw_ctl *ctx,	enum dpu_dspp dspp, u32 dspp_sub_blk)
357 {
358 	if (dspp >= DSPP_MAX)
359 		return;
360 
361 	switch (dspp_sub_blk) {
362 	case DPU_DSPP_PCC:
363 		ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(4);
364 		break;
365 	default:
366 		return;
367 	}
368 
369 	ctx->pending_flush_mask |= BIT(DSPP_IDX);
370 }
371 
372 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
373 {
374 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
375 	ktime_t timeout;
376 	u32 status;
377 
378 	timeout = ktime_add_us(ktime_get(), timeout_us);
379 
380 	/*
381 	 * it takes around 30us to have mdp finish resetting its ctl path
382 	 * poll every 50us so that reset should be completed at 1st poll
383 	 */
384 	do {
385 		status = DPU_REG_READ(c, CTL_SW_RESET);
386 		status &= 0x1;
387 		if (status)
388 			usleep_range(20, 50);
389 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
390 
391 	return status;
392 }
393 
394 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
395 {
396 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
397 
398 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
399 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
400 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
401 		return -EINVAL;
402 
403 	return 0;
404 }
405 
406 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
407 {
408 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
409 	u32 status;
410 
411 	status = DPU_REG_READ(c, CTL_SW_RESET);
412 	status &= 0x01;
413 	if (!status)
414 		return 0;
415 
416 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
417 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
418 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
419 		return -EINVAL;
420 	}
421 
422 	return 0;
423 }
424 
425 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
426 {
427 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
428 	int i;
429 
430 	for (i = 0; i < ctx->mixer_count; i++) {
431 		enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
432 
433 		DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
434 		DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
435 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
436 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
437 	}
438 
439 	DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
440 }
441 
442 struct ctl_blend_config {
443 	int idx, shift, ext_shift;
444 };
445 
446 static const struct ctl_blend_config ctl_blend_config[][2] = {
447 	[SSPP_NONE] = { { -1 }, { -1 } },
448 	[SSPP_MAX] =  { { -1 }, { -1 } },
449 	[SSPP_VIG0] = { { 0, 0,  0  }, { 3, 0 } },
450 	[SSPP_VIG1] = { { 0, 3,  2  }, { 3, 4 } },
451 	[SSPP_VIG2] = { { 0, 6,  4  }, { 3, 8 } },
452 	[SSPP_VIG3] = { { 0, 26, 6  }, { 3, 12 } },
453 	[SSPP_RGB0] = { { 0, 9,  8  }, { -1 } },
454 	[SSPP_RGB1] = { { 0, 12, 10 }, { -1 } },
455 	[SSPP_RGB2] = { { 0, 15, 12 }, { -1 } },
456 	[SSPP_RGB3] = { { 0, 29, 14 }, { -1 } },
457 	[SSPP_DMA0] = { { 0, 18, 16 }, { 2, 8 } },
458 	[SSPP_DMA1] = { { 0, 21, 18 }, { 2, 12 } },
459 	[SSPP_DMA2] = { { 2, 0      }, { 2, 16 } },
460 	[SSPP_DMA3] = { { 2, 4      }, { 2, 20 } },
461 	[SSPP_DMA4] = { { 4, 0      }, { 4, 8 } },
462 	[SSPP_DMA5] = { { 4, 4      }, { 4, 12 } },
463 	[SSPP_CURSOR0] =  { { 1, 20 }, { -1 } },
464 	[SSPP_CURSOR1] =  { { 1, 26 }, { -1 } },
465 };
466 
467 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
468 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
469 {
470 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
471 	u32 mix, ext, mix_ext;
472 	u32 mixercfg[5] = { 0 };
473 	int i, j;
474 	int stages;
475 	int pipes_per_stage;
476 
477 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
478 	if (stages < 0)
479 		return;
480 
481 	if (test_bit(DPU_MIXER_SOURCESPLIT,
482 		&ctx->mixer_hw_caps->features))
483 		pipes_per_stage = PIPES_PER_STAGE;
484 	else
485 		pipes_per_stage = 1;
486 
487 	mixercfg[0] = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
488 
489 	if (!stage_cfg)
490 		goto exit;
491 
492 	for (i = 0; i <= stages; i++) {
493 		/* overflow to ext register if 'i + 1 > 7' */
494 		mix = (i + 1) & 0x7;
495 		ext = i >= 7;
496 		mix_ext = (i + 1) & 0xf;
497 
498 		for (j = 0 ; j < pipes_per_stage; j++) {
499 			enum dpu_sspp_multirect_index rect_index =
500 				stage_cfg->multirect_index[i][j];
501 			enum dpu_sspp pipe = stage_cfg->stage[i][j];
502 			const struct ctl_blend_config *cfg =
503 				&ctl_blend_config[pipe][rect_index == DPU_SSPP_RECT_1];
504 
505 			/*
506 			 * CTL_LAYER has 3-bit field (and extra bits in EXT register),
507 			 * all EXT registers has 4-bit fields.
508 			 */
509 			if (cfg->idx == -1) {
510 				continue;
511 			} else if (cfg->idx == 0) {
512 				mixercfg[0] |= mix << cfg->shift;
513 				mixercfg[1] |= ext << cfg->ext_shift;
514 			} else {
515 				mixercfg[cfg->idx] |= mix_ext << cfg->shift;
516 			}
517 		}
518 	}
519 
520 exit:
521 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg[0]);
522 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]);
523 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]);
524 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]);
525 	if ((test_bit(DPU_CTL_HAS_LAYER_EXT4, &ctx->caps->features)))
526 		DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]);
527 }
528 
529 
530 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
531 		struct dpu_hw_intf_cfg *cfg)
532 {
533 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
534 	u32 intf_active = 0;
535 	u32 wb_active = 0;
536 	u32 mode_sel = 0;
537 
538 	/* CTL_TOP[31:28] carries group_id to collate CTL paths
539 	 * per VM. Explicitly disable it until VM support is
540 	 * added in SW. Power on reset value is not disable.
541 	 */
542 	if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
543 		mode_sel = CTL_DEFAULT_GROUP_ID  << 28;
544 
545 	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
546 		mode_sel |= BIT(17);
547 
548 	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
549 	wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
550 
551 	if (cfg->intf)
552 		intf_active |= BIT(cfg->intf - INTF_0);
553 
554 	if (cfg->wb)
555 		wb_active |= BIT(cfg->wb - WB_0);
556 
557 	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
558 	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
559 	DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
560 
561 	if (cfg->merge_3d)
562 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
563 			      BIT(cfg->merge_3d - MERGE_3D_0));
564 
565 	if (cfg->dsc)
566 		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
567 
568 	if (cfg->cdm)
569 		DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm);
570 }
571 
572 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
573 		struct dpu_hw_intf_cfg *cfg)
574 {
575 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
576 	u32 intf_cfg = 0;
577 
578 	intf_cfg |= (cfg->intf & 0xF) << 4;
579 
580 	if (cfg->mode_3d) {
581 		intf_cfg |= BIT(19);
582 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
583 	}
584 
585 	if (cfg->wb)
586 		intf_cfg |= (cfg->wb & 0x3) + 2;
587 
588 	switch (cfg->intf_mode_sel) {
589 	case DPU_CTL_MODE_SEL_VID:
590 		intf_cfg &= ~BIT(17);
591 		intf_cfg &= ~(0x3 << 15);
592 		break;
593 	case DPU_CTL_MODE_SEL_CMD:
594 		intf_cfg |= BIT(17);
595 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
596 		break;
597 	default:
598 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
599 		return;
600 	}
601 
602 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
603 }
604 
605 static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
606 		struct dpu_hw_intf_cfg *cfg)
607 {
608 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
609 	u32 intf_active = 0;
610 	u32 wb_active = 0;
611 	u32 merge3d_active = 0;
612 	u32 dsc_active;
613 	u32 cdm_active;
614 
615 	/*
616 	 * This API resets each portion of the CTL path namely,
617 	 * clearing the sspps staged on the lm, merge_3d block,
618 	 * interfaces , writeback etc to ensure clean teardown of the pipeline.
619 	 * This will be used for writeback to begin with to have a
620 	 * proper teardown of the writeback session but upon further
621 	 * validation, this can be extended to all interfaces.
622 	 */
623 	if (cfg->merge_3d) {
624 		merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
625 		merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
626 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
627 				merge3d_active);
628 	}
629 
630 	dpu_hw_ctl_clear_all_blendstages(ctx);
631 
632 	if (cfg->intf) {
633 		intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
634 		intf_active &= ~BIT(cfg->intf - INTF_0);
635 		DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
636 	}
637 
638 	if (cfg->wb) {
639 		wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
640 		wb_active &= ~BIT(cfg->wb - WB_0);
641 		DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
642 	}
643 
644 	if (cfg->dsc) {
645 		dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
646 		dsc_active &= ~cfg->dsc;
647 		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
648 	}
649 
650 	if (cfg->cdm) {
651 		cdm_active = DPU_REG_READ(c, CTL_CDM_ACTIVE);
652 		cdm_active &= ~cfg->cdm;
653 		DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cdm_active);
654 	}
655 }
656 
657 static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
658 	unsigned long *fetch_active)
659 {
660 	int i;
661 	u32 val = 0;
662 
663 	if (fetch_active) {
664 		for (i = 0; i < SSPP_MAX; i++) {
665 			if (test_bit(i, fetch_active) &&
666 				fetch_tbl[i] != CTL_INVALID_BIT)
667 				val |= BIT(fetch_tbl[i]);
668 		}
669 	}
670 
671 	DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
672 }
673 
674 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
675 		unsigned long cap)
676 {
677 	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
678 		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
679 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
680 		ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
681 		ops->update_pending_flush_intf =
682 			dpu_hw_ctl_update_pending_flush_intf_v1;
683 		ops->update_pending_flush_merge_3d =
684 			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
685 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
686 		ops->update_pending_flush_dsc =
687 			dpu_hw_ctl_update_pending_flush_dsc_v1;
688 		ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
689 	} else {
690 		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
691 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
692 		ops->update_pending_flush_intf =
693 			dpu_hw_ctl_update_pending_flush_intf;
694 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
695 		ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm;
696 	}
697 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
698 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
699 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
700 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
701 	ops->trigger_start = dpu_hw_ctl_trigger_start;
702 	ops->is_started = dpu_hw_ctl_is_started;
703 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
704 	ops->reset = dpu_hw_ctl_reset_control;
705 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
706 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
707 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
708 	ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
709 	ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
710 	if (cap & BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
711 		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
712 	else
713 		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
714 
715 	if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
716 		ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
717 };
718 
719 struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
720 				   const struct dpu_ctl_cfg *cfg,
721 				   void __iomem *addr,
722 				   u32 mixer_count,
723 				   const struct dpu_lm_cfg *mixer)
724 {
725 	struct dpu_hw_ctl *c;
726 
727 	c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
728 	if (!c)
729 		return ERR_PTR(-ENOMEM);
730 
731 	c->hw.blk_addr = addr + cfg->base;
732 	c->hw.log_mask = DPU_DBG_MASK_CTL;
733 
734 	c->caps = cfg;
735 	_setup_ctl_ops(&c->ops, c->caps->features);
736 	c->idx = cfg->id;
737 	c->mixer_count = mixer_count;
738 	c->mixer_hw_caps = mixer;
739 
740 	return c;
741 }
742