xref: /linux/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c (revision fb2ac84f8acccdec644d26dfc8ba6554f30cd6c0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  */
5 
6 #include <linux/delay.h>
7 #include "dpu_hwio.h"
8 #include "dpu_hw_ctl.h"
9 #include "dpu_kms.h"
10 #include "dpu_trace.h"
11 
12 #define   CTL_LAYER(lm)                 \
13 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
14 #define   CTL_LAYER_EXT(lm)             \
15 	(0x40 + (((lm) - LM_0) * 0x004))
16 #define   CTL_LAYER_EXT2(lm)             \
17 	(0x70 + (((lm) - LM_0) * 0x004))
18 #define   CTL_LAYER_EXT3(lm)             \
19 	(0xA0 + (((lm) - LM_0) * 0x004))
20 #define   CTL_TOP                       0x014
21 #define   CTL_FLUSH                     0x018
22 #define   CTL_START                     0x01C
23 #define   CTL_PREPARE                   0x0d0
24 #define   CTL_SW_RESET                  0x030
25 #define   CTL_LAYER_EXTN_OFFSET         0x40
26 #define   CTL_MERGE_3D_ACTIVE           0x0E4
27 #define   CTL_WB_ACTIVE                 0x0EC
28 #define   CTL_INTF_ACTIVE               0x0F4
29 #define   CTL_MERGE_3D_FLUSH            0x100
30 #define   CTL_DSC_ACTIVE                0x0E8
31 #define   CTL_DSC_FLUSH                0x104
32 #define   CTL_WB_FLUSH                  0x108
33 #define   CTL_INTF_FLUSH                0x110
34 #define   CTL_INTF_MASTER               0x134
35 #define   CTL_FETCH_PIPE_ACTIVE         0x0FC
36 
37 #define CTL_MIXER_BORDER_OUT            BIT(24)
38 #define CTL_FLUSH_MASK_CTL              BIT(17)
39 
40 #define DPU_REG_RESET_TIMEOUT_US        2000
41 #define  MERGE_3D_IDX   23
42 #define  DSC_IDX        22
43 #define  INTF_IDX       31
44 #define WB_IDX          16
45 #define CTL_INVALID_BIT                 0xffff
46 #define CTL_DEFAULT_GROUP_ID		0xf
47 
48 static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
49 	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
50 	1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT};
51 
52 static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
53 		const struct dpu_mdss_cfg *m,
54 		void __iomem *addr,
55 		struct dpu_hw_blk_reg_map *b)
56 {
57 	int i;
58 
59 	for (i = 0; i < m->ctl_count; i++) {
60 		if (ctl == m->ctl[i].id) {
61 			b->blk_addr = addr + m->ctl[i].base;
62 			b->log_mask = DPU_DBG_MASK_CTL;
63 			return &m->ctl[i];
64 		}
65 	}
66 	return ERR_PTR(-ENOMEM);
67 }
68 
69 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
70 		enum dpu_lm lm)
71 {
72 	int i;
73 	int stages = -EINVAL;
74 
75 	for (i = 0; i < count; i++) {
76 		if (lm == mixer[i].id) {
77 			stages = mixer[i].sblk->maxblendstages;
78 			break;
79 		}
80 	}
81 
82 	return stages;
83 }
84 
85 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
86 {
87 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
88 
89 	return DPU_REG_READ(c, CTL_FLUSH);
90 }
91 
92 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
93 {
94 	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
95 				       dpu_hw_ctl_get_flush_register(ctx));
96 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
97 }
98 
99 static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
100 {
101 	return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
102 }
103 
104 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
105 {
106 	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
107 					 dpu_hw_ctl_get_flush_register(ctx));
108 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
109 }
110 
111 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
112 {
113 	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
114 				     dpu_hw_ctl_get_flush_register(ctx));
115 	ctx->pending_flush_mask = 0x0;
116 }
117 
118 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
119 		u32 flushbits)
120 {
121 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
122 					      ctx->pending_flush_mask);
123 	ctx->pending_flush_mask |= flushbits;
124 }
125 
126 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
127 {
128 	return ctx->pending_flush_mask;
129 }
130 
131 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
132 {
133 	if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
134 		DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
135 				ctx->pending_merge_3d_flush_mask);
136 	if (ctx->pending_flush_mask & BIT(INTF_IDX))
137 		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
138 				ctx->pending_intf_flush_mask);
139 	if (ctx->pending_flush_mask & BIT(WB_IDX))
140 		DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
141 				ctx->pending_wb_flush_mask);
142 
143 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
144 }
145 
146 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
147 {
148 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
149 				     dpu_hw_ctl_get_flush_register(ctx));
150 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
151 }
152 
153 static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
154 	enum dpu_sspp sspp)
155 {
156 	uint32_t flushbits = 0;
157 
158 	switch (sspp) {
159 	case SSPP_VIG0:
160 		flushbits =  BIT(0);
161 		break;
162 	case SSPP_VIG1:
163 		flushbits = BIT(1);
164 		break;
165 	case SSPP_VIG2:
166 		flushbits = BIT(2);
167 		break;
168 	case SSPP_VIG3:
169 		flushbits = BIT(18);
170 		break;
171 	case SSPP_RGB0:
172 		flushbits = BIT(3);
173 		break;
174 	case SSPP_RGB1:
175 		flushbits = BIT(4);
176 		break;
177 	case SSPP_RGB2:
178 		flushbits = BIT(5);
179 		break;
180 	case SSPP_RGB3:
181 		flushbits = BIT(19);
182 		break;
183 	case SSPP_DMA0:
184 		flushbits = BIT(11);
185 		break;
186 	case SSPP_DMA1:
187 		flushbits = BIT(12);
188 		break;
189 	case SSPP_DMA2:
190 		flushbits = BIT(24);
191 		break;
192 	case SSPP_DMA3:
193 		flushbits = BIT(25);
194 		break;
195 	case SSPP_CURSOR0:
196 		flushbits = BIT(22);
197 		break;
198 	case SSPP_CURSOR1:
199 		flushbits = BIT(23);
200 		break;
201 	default:
202 		break;
203 	}
204 
205 	return flushbits;
206 }
207 
208 static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
209 	enum dpu_lm lm)
210 {
211 	uint32_t flushbits = 0;
212 
213 	switch (lm) {
214 	case LM_0:
215 		flushbits = BIT(6);
216 		break;
217 	case LM_1:
218 		flushbits = BIT(7);
219 		break;
220 	case LM_2:
221 		flushbits = BIT(8);
222 		break;
223 	case LM_3:
224 		flushbits = BIT(9);
225 		break;
226 	case LM_4:
227 		flushbits = BIT(10);
228 		break;
229 	case LM_5:
230 		flushbits = BIT(20);
231 		break;
232 	default:
233 		return -EINVAL;
234 	}
235 
236 	flushbits |= CTL_FLUSH_MASK_CTL;
237 
238 	return flushbits;
239 }
240 
241 static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
242 		enum dpu_intf intf)
243 {
244 	switch (intf) {
245 	case INTF_0:
246 		ctx->pending_flush_mask |= BIT(31);
247 		break;
248 	case INTF_1:
249 		ctx->pending_flush_mask |= BIT(30);
250 		break;
251 	case INTF_2:
252 		ctx->pending_flush_mask |= BIT(29);
253 		break;
254 	case INTF_3:
255 		ctx->pending_flush_mask |= BIT(28);
256 		break;
257 	default:
258 		break;
259 	}
260 }
261 
262 static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
263 		enum dpu_wb wb)
264 {
265 	switch (wb) {
266 	case WB_0:
267 	case WB_1:
268 	case WB_2:
269 		ctx->pending_flush_mask |= BIT(WB_IDX);
270 		break;
271 	default:
272 		break;
273 	}
274 }
275 
276 static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
277 		enum dpu_wb wb)
278 {
279 	ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
280 	ctx->pending_flush_mask |= BIT(WB_IDX);
281 }
282 
283 static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
284 		enum dpu_intf intf)
285 {
286 	ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
287 	ctx->pending_flush_mask |= BIT(INTF_IDX);
288 }
289 
290 static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
291 		enum dpu_merge_3d merge_3d)
292 {
293 	ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
294 	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
295 }
296 
297 static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx,
298 	enum dpu_dspp dspp)
299 {
300 	uint32_t flushbits = 0;
301 
302 	switch (dspp) {
303 	case DSPP_0:
304 		flushbits = BIT(13);
305 		break;
306 	case DSPP_1:
307 		flushbits = BIT(14);
308 		break;
309 	case DSPP_2:
310 		flushbits = BIT(15);
311 		break;
312 	case DSPP_3:
313 		flushbits = BIT(21);
314 		break;
315 	default:
316 		return 0;
317 	}
318 
319 	return flushbits;
320 }
321 
322 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
323 {
324 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
325 	ktime_t timeout;
326 	u32 status;
327 
328 	timeout = ktime_add_us(ktime_get(), timeout_us);
329 
330 	/*
331 	 * it takes around 30us to have mdp finish resetting its ctl path
332 	 * poll every 50us so that reset should be completed at 1st poll
333 	 */
334 	do {
335 		status = DPU_REG_READ(c, CTL_SW_RESET);
336 		status &= 0x1;
337 		if (status)
338 			usleep_range(20, 50);
339 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
340 
341 	return status;
342 }
343 
344 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
345 {
346 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
347 
348 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
349 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
350 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
351 		return -EINVAL;
352 
353 	return 0;
354 }
355 
356 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
357 {
358 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
359 	u32 status;
360 
361 	status = DPU_REG_READ(c, CTL_SW_RESET);
362 	status &= 0x01;
363 	if (!status)
364 		return 0;
365 
366 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
367 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
368 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
369 		return -EINVAL;
370 	}
371 
372 	return 0;
373 }
374 
375 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
376 {
377 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
378 	int i;
379 
380 	for (i = 0; i < ctx->mixer_count; i++) {
381 		enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
382 
383 		DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
384 		DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
385 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
386 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
387 	}
388 
389 	DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
390 }
391 
392 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
393 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
394 {
395 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
396 	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
397 	u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
398 	int i, j;
399 	int stages;
400 	int pipes_per_stage;
401 
402 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
403 	if (stages < 0)
404 		return;
405 
406 	if (test_bit(DPU_MIXER_SOURCESPLIT,
407 		&ctx->mixer_hw_caps->features))
408 		pipes_per_stage = PIPES_PER_STAGE;
409 	else
410 		pipes_per_stage = 1;
411 
412 	mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
413 
414 	if (!stage_cfg)
415 		goto exit;
416 
417 	for (i = 0; i <= stages; i++) {
418 		/* overflow to ext register if 'i + 1 > 7' */
419 		mix = (i + 1) & 0x7;
420 		ext = i >= 7;
421 
422 		for (j = 0 ; j < pipes_per_stage; j++) {
423 			enum dpu_sspp_multirect_index rect_index =
424 				stage_cfg->multirect_index[i][j];
425 
426 			switch (stage_cfg->stage[i][j]) {
427 			case SSPP_VIG0:
428 				if (rect_index == DPU_SSPP_RECT_1) {
429 					mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
430 				} else {
431 					mixercfg |= mix << 0;
432 					mixercfg_ext |= ext << 0;
433 				}
434 				break;
435 			case SSPP_VIG1:
436 				if (rect_index == DPU_SSPP_RECT_1) {
437 					mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
438 				} else {
439 					mixercfg |= mix << 3;
440 					mixercfg_ext |= ext << 2;
441 				}
442 				break;
443 			case SSPP_VIG2:
444 				if (rect_index == DPU_SSPP_RECT_1) {
445 					mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
446 				} else {
447 					mixercfg |= mix << 6;
448 					mixercfg_ext |= ext << 4;
449 				}
450 				break;
451 			case SSPP_VIG3:
452 				if (rect_index == DPU_SSPP_RECT_1) {
453 					mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
454 				} else {
455 					mixercfg |= mix << 26;
456 					mixercfg_ext |= ext << 6;
457 				}
458 				break;
459 			case SSPP_RGB0:
460 				mixercfg |= mix << 9;
461 				mixercfg_ext |= ext << 8;
462 				break;
463 			case SSPP_RGB1:
464 				mixercfg |= mix << 12;
465 				mixercfg_ext |= ext << 10;
466 				break;
467 			case SSPP_RGB2:
468 				mixercfg |= mix << 15;
469 				mixercfg_ext |= ext << 12;
470 				break;
471 			case SSPP_RGB3:
472 				mixercfg |= mix << 29;
473 				mixercfg_ext |= ext << 14;
474 				break;
475 			case SSPP_DMA0:
476 				if (rect_index == DPU_SSPP_RECT_1) {
477 					mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
478 				} else {
479 					mixercfg |= mix << 18;
480 					mixercfg_ext |= ext << 16;
481 				}
482 				break;
483 			case SSPP_DMA1:
484 				if (rect_index == DPU_SSPP_RECT_1) {
485 					mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
486 				} else {
487 					mixercfg |= mix << 21;
488 					mixercfg_ext |= ext << 18;
489 				}
490 				break;
491 			case SSPP_DMA2:
492 				if (rect_index == DPU_SSPP_RECT_1) {
493 					mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
494 				} else {
495 					mix |= (i + 1) & 0xF;
496 					mixercfg_ext2 |= mix << 0;
497 				}
498 				break;
499 			case SSPP_DMA3:
500 				if (rect_index == DPU_SSPP_RECT_1) {
501 					mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
502 				} else {
503 					mix |= (i + 1) & 0xF;
504 					mixercfg_ext2 |= mix << 4;
505 				}
506 				break;
507 			case SSPP_CURSOR0:
508 				mixercfg_ext |= ((i + 1) & 0xF) << 20;
509 				break;
510 			case SSPP_CURSOR1:
511 				mixercfg_ext |= ((i + 1) & 0xF) << 26;
512 				break;
513 			default:
514 				break;
515 			}
516 		}
517 	}
518 
519 exit:
520 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
521 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
522 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
523 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
524 }
525 
526 
527 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
528 		struct dpu_hw_intf_cfg *cfg)
529 {
530 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
531 	u32 intf_active = 0;
532 	u32 wb_active = 0;
533 	u32 mode_sel = 0;
534 
535 	/* CTL_TOP[31:28] carries group_id to collate CTL paths
536 	 * per VM. Explicitly disable it until VM support is
537 	 * added in SW. Power on reset value is not disable.
538 	 */
539 	if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
540 		mode_sel = CTL_DEFAULT_GROUP_ID  << 28;
541 
542 	if (cfg->dsc)
543 		DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH, cfg->dsc);
544 
545 	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
546 		mode_sel |= BIT(17);
547 
548 	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
549 	wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
550 
551 	if (cfg->intf)
552 		intf_active |= BIT(cfg->intf - INTF_0);
553 
554 	if (cfg->wb)
555 		wb_active |= BIT(cfg->wb - WB_0);
556 
557 	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
558 	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
559 	DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
560 
561 	if (cfg->merge_3d)
562 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
563 			      BIT(cfg->merge_3d - MERGE_3D_0));
564 	if (cfg->dsc) {
565 		DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, DSC_IDX);
566 		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
567 	}
568 }
569 
570 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
571 		struct dpu_hw_intf_cfg *cfg)
572 {
573 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
574 	u32 intf_cfg = 0;
575 
576 	intf_cfg |= (cfg->intf & 0xF) << 4;
577 
578 	if (cfg->mode_3d) {
579 		intf_cfg |= BIT(19);
580 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
581 	}
582 
583 	if (cfg->wb)
584 		intf_cfg |= (cfg->wb & 0x3) + 2;
585 
586 	switch (cfg->intf_mode_sel) {
587 	case DPU_CTL_MODE_SEL_VID:
588 		intf_cfg &= ~BIT(17);
589 		intf_cfg &= ~(0x3 << 15);
590 		break;
591 	case DPU_CTL_MODE_SEL_CMD:
592 		intf_cfg |= BIT(17);
593 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
594 		break;
595 	default:
596 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
597 		return;
598 	}
599 
600 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
601 }
602 
603 static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
604 		struct dpu_hw_intf_cfg *cfg)
605 {
606 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
607 	u32 intf_active = 0;
608 	u32 wb_active = 0;
609 	u32 merge3d_active = 0;
610 
611 	/*
612 	 * This API resets each portion of the CTL path namely,
613 	 * clearing the sspps staged on the lm, merge_3d block,
614 	 * interfaces , writeback etc to ensure clean teardown of the pipeline.
615 	 * This will be used for writeback to begin with to have a
616 	 * proper teardown of the writeback session but upon further
617 	 * validation, this can be extended to all interfaces.
618 	 */
619 	if (cfg->merge_3d) {
620 		merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
621 		merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
622 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
623 				merge3d_active);
624 	}
625 
626 	dpu_hw_ctl_clear_all_blendstages(ctx);
627 
628 	if (cfg->intf) {
629 		intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
630 		intf_active &= ~BIT(cfg->intf - INTF_0);
631 		DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
632 	}
633 
634 	if (cfg->wb) {
635 		wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
636 		wb_active &= ~BIT(cfg->wb - WB_0);
637 		DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
638 	}
639 }
640 
641 static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
642 	unsigned long *fetch_active)
643 {
644 	int i;
645 	u32 val = 0;
646 
647 	if (fetch_active) {
648 		for (i = 0; i < SSPP_MAX; i++) {
649 			if (test_bit(i, fetch_active) &&
650 				fetch_tbl[i] != CTL_INVALID_BIT)
651 				val |= BIT(fetch_tbl[i]);
652 		}
653 	}
654 
655 	DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
656 }
657 
658 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
659 		unsigned long cap)
660 {
661 	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
662 		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
663 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
664 		ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
665 		ops->update_pending_flush_intf =
666 			dpu_hw_ctl_update_pending_flush_intf_v1;
667 		ops->update_pending_flush_merge_3d =
668 			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
669 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
670 	} else {
671 		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
672 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
673 		ops->update_pending_flush_intf =
674 			dpu_hw_ctl_update_pending_flush_intf;
675 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
676 	}
677 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
678 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
679 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
680 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
681 	ops->trigger_start = dpu_hw_ctl_trigger_start;
682 	ops->is_started = dpu_hw_ctl_is_started;
683 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
684 	ops->reset = dpu_hw_ctl_reset_control;
685 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
686 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
687 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
688 	ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
689 	ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
690 	ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
691 	if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
692 		ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
693 };
694 
695 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
696 		void __iomem *addr,
697 		const struct dpu_mdss_cfg *m)
698 {
699 	struct dpu_hw_ctl *c;
700 	const struct dpu_ctl_cfg *cfg;
701 
702 	c = kzalloc(sizeof(*c), GFP_KERNEL);
703 	if (!c)
704 		return ERR_PTR(-ENOMEM);
705 
706 	cfg = _ctl_offset(idx, m, addr, &c->hw);
707 	if (IS_ERR_OR_NULL(cfg)) {
708 		kfree(c);
709 		pr_err("failed to create dpu_hw_ctl %d\n", idx);
710 		return ERR_PTR(-EINVAL);
711 	}
712 
713 	c->caps = cfg;
714 	_setup_ctl_ops(&c->ops, c->caps->features);
715 	c->idx = idx;
716 	c->mixer_count = m->mixer_count;
717 	c->mixer_hw_caps = m->mixer;
718 
719 	return c;
720 }
721 
722 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
723 {
724 	kfree(ctx);
725 }
726