xref: /linux/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c (revision 746680ec6696585e30db3e18c93a63df9cbec39c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include <drm/drm_managed.h>
9 
10 #include "dpu_hwio.h"
11 #include "dpu_hw_ctl.h"
12 #include "dpu_kms.h"
13 #include "dpu_trace.h"
14 
15 #define   CTL_LAYER(lm)                 \
16 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
17 #define   CTL_LAYER_EXT(lm)             \
18 	(0x40 + (((lm) - LM_0) * 0x004))
19 #define   CTL_LAYER_EXT2(lm)             \
20 	(0x70 + (((lm) - LM_0) * 0x004))
21 #define   CTL_LAYER_EXT3(lm)             \
22 	(0xA0 + (((lm) - LM_0) * 0x004))
23 #define CTL_LAYER_EXT4(lm)             \
24 	(0xB8 + (((lm) - LM_0) * 0x004))
25 #define   CTL_TOP                       0x014
26 #define   CTL_FLUSH                     0x018
27 #define   CTL_START                     0x01C
28 #define   CTL_PREPARE                   0x0d0
29 #define   CTL_SW_RESET                  0x030
30 #define   CTL_LAYER_EXTN_OFFSET         0x40
31 #define   CTL_MERGE_3D_ACTIVE           0x0E4
32 #define   CTL_DSC_ACTIVE                0x0E8
33 #define   CTL_WB_ACTIVE                 0x0EC
34 #define   CTL_CWB_ACTIVE                0x0F0
35 #define   CTL_INTF_ACTIVE               0x0F4
36 #define   CTL_CDM_ACTIVE                0x0F8
37 #define   CTL_FETCH_PIPE_ACTIVE         0x0FC
38 #define   CTL_MERGE_3D_FLUSH            0x100
39 #define   CTL_DSC_FLUSH                0x104
40 #define   CTL_WB_FLUSH                  0x108
41 #define   CTL_CWB_FLUSH                 0x10C
42 #define   CTL_INTF_FLUSH                0x110
43 #define   CTL_CDM_FLUSH                0x114
44 #define   CTL_PERIPH_FLUSH              0x128
45 #define   CTL_PIPE_ACTIVE               0x12c
46 #define   CTL_LAYER_ACTIVE              0x130
47 #define   CTL_INTF_MASTER               0x134
48 #define   CTL_DSPP_n_FLUSH(n)           ((0x13C) + ((n) * 4))
49 
50 #define CTL_MIXER_BORDER_OUT            BIT(24)
51 #define CTL_FLUSH_MASK_CTL              BIT(17)
52 
53 #define DPU_REG_RESET_TIMEOUT_US        2000
54 #define  MERGE_3D_IDX   23
55 #define  DSC_IDX        22
56 #define CDM_IDX         26
57 #define  PERIPH_IDX     30
58 #define  INTF_IDX       31
59 #define WB_IDX          16
60 #define CWB_IDX         28
61 #define  DSPP_IDX       29  /* From DPU hw rev 7.x.x */
62 #define CTL_INVALID_BIT                 0xffff
63 #define CTL_DEFAULT_GROUP_ID		0xf
64 
65 static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
66 	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
67 	1, 2, 3, 4, 5};
68 
69 static const u32 lm_tbl[LM_MAX] = {CTL_INVALID_BIT, 0, 1, 2, 3, 4, 5, 6, 7};
70 
71 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
72 		enum dpu_lm lm)
73 {
74 	int i;
75 	int stages = -EINVAL;
76 
77 	for (i = 0; i < count; i++) {
78 		if (lm == mixer[i].id) {
79 			stages = mixer[i].sblk->maxblendstages;
80 			break;
81 		}
82 	}
83 
84 	return stages;
85 }
86 
87 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
88 {
89 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
90 
91 	return DPU_REG_READ(c, CTL_FLUSH);
92 }
93 
94 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
95 {
96 	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
97 				       dpu_hw_ctl_get_flush_register(ctx));
98 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
99 }
100 
101 static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
102 {
103 	return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
104 }
105 
106 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
107 {
108 	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
109 					 dpu_hw_ctl_get_flush_register(ctx));
110 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
111 }
112 
113 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
114 {
115 	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
116 				     dpu_hw_ctl_get_flush_register(ctx));
117 	ctx->pending_flush_mask = 0x0;
118 	ctx->pending_intf_flush_mask = 0;
119 	ctx->pending_wb_flush_mask = 0;
120 	ctx->pending_cwb_flush_mask = 0;
121 	ctx->pending_merge_3d_flush_mask = 0;
122 	ctx->pending_dsc_flush_mask = 0;
123 	ctx->pending_cdm_flush_mask = 0;
124 
125 	memset(ctx->pending_dspp_flush_mask, 0,
126 		sizeof(ctx->pending_dspp_flush_mask));
127 }
128 
129 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
130 		u32 flushbits)
131 {
132 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
133 					      ctx->pending_flush_mask);
134 	ctx->pending_flush_mask |= flushbits;
135 }
136 
137 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
138 {
139 	return ctx->pending_flush_mask;
140 }
141 
142 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
143 {
144 	int dspp;
145 
146 	if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
147 		DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
148 				ctx->pending_merge_3d_flush_mask);
149 	if (ctx->pending_flush_mask & BIT(INTF_IDX))
150 		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
151 				ctx->pending_intf_flush_mask);
152 	if (ctx->pending_flush_mask & BIT(WB_IDX))
153 		DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
154 				ctx->pending_wb_flush_mask);
155 	if (ctx->pending_flush_mask & BIT(CWB_IDX))
156 		DPU_REG_WRITE(&ctx->hw, CTL_CWB_FLUSH,
157 				ctx->pending_cwb_flush_mask);
158 
159 	if (ctx->pending_flush_mask & BIT(DSPP_IDX))
160 		for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) {
161 			if (ctx->pending_dspp_flush_mask[dspp - DSPP_0])
162 				DPU_REG_WRITE(&ctx->hw,
163 				CTL_DSPP_n_FLUSH(dspp - DSPP_0),
164 				ctx->pending_dspp_flush_mask[dspp - DSPP_0]);
165 		}
166 
167 	if (ctx->pending_flush_mask & BIT(PERIPH_IDX))
168 		DPU_REG_WRITE(&ctx->hw, CTL_PERIPH_FLUSH,
169 			      ctx->pending_periph_flush_mask);
170 
171 	if (ctx->pending_flush_mask & BIT(DSC_IDX))
172 		DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
173 			      ctx->pending_dsc_flush_mask);
174 
175 	if (ctx->pending_flush_mask & BIT(CDM_IDX))
176 		DPU_REG_WRITE(&ctx->hw, CTL_CDM_FLUSH,
177 			      ctx->pending_cdm_flush_mask);
178 
179 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
180 }
181 
182 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
183 {
184 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
185 				     dpu_hw_ctl_get_flush_register(ctx));
186 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
187 }
188 
189 static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
190 	enum dpu_sspp sspp)
191 {
192 	switch (sspp) {
193 	case SSPP_VIG0:
194 		ctx->pending_flush_mask |=  BIT(0);
195 		break;
196 	case SSPP_VIG1:
197 		ctx->pending_flush_mask |= BIT(1);
198 		break;
199 	case SSPP_VIG2:
200 		ctx->pending_flush_mask |= BIT(2);
201 		break;
202 	case SSPP_VIG3:
203 		ctx->pending_flush_mask |= BIT(18);
204 		break;
205 	case SSPP_RGB0:
206 		ctx->pending_flush_mask |= BIT(3);
207 		break;
208 	case SSPP_RGB1:
209 		ctx->pending_flush_mask |= BIT(4);
210 		break;
211 	case SSPP_RGB2:
212 		ctx->pending_flush_mask |= BIT(5);
213 		break;
214 	case SSPP_RGB3:
215 		ctx->pending_flush_mask |= BIT(19);
216 		break;
217 	case SSPP_DMA0:
218 		ctx->pending_flush_mask |= BIT(11);
219 		break;
220 	case SSPP_DMA1:
221 		ctx->pending_flush_mask |= BIT(12);
222 		break;
223 	case SSPP_DMA2:
224 		ctx->pending_flush_mask |= BIT(24);
225 		break;
226 	case SSPP_DMA3:
227 		ctx->pending_flush_mask |= BIT(25);
228 		break;
229 	case SSPP_DMA4:
230 		ctx->pending_flush_mask |= BIT(13);
231 		break;
232 	case SSPP_DMA5:
233 		ctx->pending_flush_mask |= BIT(14);
234 		break;
235 	case SSPP_CURSOR0:
236 		ctx->pending_flush_mask |= BIT(22);
237 		break;
238 	case SSPP_CURSOR1:
239 		ctx->pending_flush_mask |= BIT(23);
240 		break;
241 	default:
242 		break;
243 	}
244 }
245 
246 static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
247 	enum dpu_lm lm)
248 {
249 	switch (lm) {
250 	case LM_0:
251 		ctx->pending_flush_mask |= BIT(6);
252 		break;
253 	case LM_1:
254 		ctx->pending_flush_mask |= BIT(7);
255 		break;
256 	case LM_2:
257 		ctx->pending_flush_mask |= BIT(8);
258 		break;
259 	case LM_3:
260 		ctx->pending_flush_mask |= BIT(9);
261 		break;
262 	case LM_4:
263 		ctx->pending_flush_mask |= BIT(10);
264 		break;
265 	case LM_5:
266 		ctx->pending_flush_mask |= BIT(20);
267 		break;
268 	case LM_6:
269 		ctx->pending_flush_mask |= BIT(21);
270 		break;
271 	case LM_7:
272 		ctx->pending_flush_mask |= BIT(27);
273 		break;
274 	default:
275 		break;
276 	}
277 
278 	ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
279 }
280 
281 static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
282 		enum dpu_intf intf)
283 {
284 	switch (intf) {
285 	case INTF_0:
286 		ctx->pending_flush_mask |= BIT(31);
287 		break;
288 	case INTF_1:
289 		ctx->pending_flush_mask |= BIT(30);
290 		break;
291 	case INTF_2:
292 		ctx->pending_flush_mask |= BIT(29);
293 		break;
294 	case INTF_3:
295 		ctx->pending_flush_mask |= BIT(28);
296 		break;
297 	default:
298 		break;
299 	}
300 }
301 
302 static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
303 		enum dpu_wb wb)
304 {
305 	switch (wb) {
306 	case WB_0:
307 	case WB_1:
308 	case WB_2:
309 		ctx->pending_flush_mask |= BIT(WB_IDX);
310 		break;
311 	default:
312 		break;
313 	}
314 }
315 
316 static void dpu_hw_ctl_update_pending_flush_cdm(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
317 {
318 	/* update pending flush only if CDM_0 is flushed */
319 	if (cdm_num == CDM_0)
320 		ctx->pending_flush_mask |= BIT(CDM_IDX);
321 }
322 
323 static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
324 		enum dpu_wb wb)
325 {
326 	ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
327 	ctx->pending_flush_mask |= BIT(WB_IDX);
328 }
329 
330 static void dpu_hw_ctl_update_pending_flush_cwb_v1(struct dpu_hw_ctl *ctx,
331 		enum dpu_cwb cwb)
332 {
333 	ctx->pending_cwb_flush_mask |= BIT(cwb - CWB_0);
334 	ctx->pending_flush_mask |= BIT(CWB_IDX);
335 }
336 
337 static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
338 		enum dpu_intf intf)
339 {
340 	ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
341 	ctx->pending_flush_mask |= BIT(INTF_IDX);
342 }
343 
344 static void dpu_hw_ctl_update_pending_flush_periph_v1(struct dpu_hw_ctl *ctx,
345 						      enum dpu_intf intf)
346 {
347 	ctx->pending_periph_flush_mask |= BIT(intf - INTF_0);
348 	ctx->pending_flush_mask |= BIT(PERIPH_IDX);
349 }
350 
351 static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
352 		enum dpu_merge_3d merge_3d)
353 {
354 	ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
355 	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
356 }
357 
358 static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx,
359 						   enum dpu_dsc dsc_num)
360 {
361 	ctx->pending_dsc_flush_mask |= BIT(dsc_num - DSC_0);
362 	ctx->pending_flush_mask |= BIT(DSC_IDX);
363 }
364 
365 static void dpu_hw_ctl_update_pending_flush_cdm_v1(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
366 {
367 	ctx->pending_cdm_flush_mask |= BIT(cdm_num - CDM_0);
368 	ctx->pending_flush_mask |= BIT(CDM_IDX);
369 }
370 
371 static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
372 	enum dpu_dspp dspp, u32 dspp_sub_blk)
373 {
374 	switch (dspp) {
375 	case DSPP_0:
376 		ctx->pending_flush_mask |= BIT(13);
377 		break;
378 	case DSPP_1:
379 		ctx->pending_flush_mask |= BIT(14);
380 		break;
381 	case DSPP_2:
382 		ctx->pending_flush_mask |= BIT(15);
383 		break;
384 	case DSPP_3:
385 		ctx->pending_flush_mask |= BIT(21);
386 		break;
387 	default:
388 		break;
389 	}
390 }
391 
392 static void dpu_hw_ctl_update_pending_flush_dspp_sub_blocks(
393 	struct dpu_hw_ctl *ctx,	enum dpu_dspp dspp, u32 dspp_sub_blk)
394 {
395 	if (dspp >= DSPP_MAX)
396 		return;
397 
398 	switch (dspp_sub_blk) {
399 	case DPU_DSPP_PCC:
400 		ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(4);
401 		break;
402 	default:
403 		return;
404 	}
405 
406 	ctx->pending_flush_mask |= BIT(DSPP_IDX);
407 }
408 
409 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
410 {
411 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
412 	ktime_t timeout;
413 	u32 status;
414 
415 	timeout = ktime_add_us(ktime_get(), timeout_us);
416 
417 	/*
418 	 * it takes around 30us to have mdp finish resetting its ctl path
419 	 * poll every 50us so that reset should be completed at 1st poll
420 	 */
421 	do {
422 		status = DPU_REG_READ(c, CTL_SW_RESET);
423 		status &= 0x1;
424 		if (status)
425 			usleep_range(20, 50);
426 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
427 
428 	return status;
429 }
430 
431 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
432 {
433 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
434 
435 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
436 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
437 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
438 		return -EINVAL;
439 
440 	return 0;
441 }
442 
443 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
444 {
445 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
446 	u32 status;
447 
448 	status = DPU_REG_READ(c, CTL_SW_RESET);
449 	status &= 0x01;
450 	if (!status)
451 		return 0;
452 
453 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
454 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
455 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
456 		return -EINVAL;
457 	}
458 
459 	return 0;
460 }
461 
462 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
463 {
464 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
465 	int i;
466 
467 	for (i = 0; i < ctx->mixer_count; i++) {
468 		enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
469 
470 		DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
471 		DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
472 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
473 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
474 	}
475 
476 	DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
477 }
478 
479 struct ctl_blend_config {
480 	int idx, shift, ext_shift;
481 };
482 
483 static const struct ctl_blend_config ctl_blend_config[][2] = {
484 	[SSPP_NONE] = { { -1 }, { -1 } },
485 	[SSPP_MAX] =  { { -1 }, { -1 } },
486 	[SSPP_VIG0] = { { 0, 0,  0  }, { 3, 0 } },
487 	[SSPP_VIG1] = { { 0, 3,  2  }, { 3, 4 } },
488 	[SSPP_VIG2] = { { 0, 6,  4  }, { 3, 8 } },
489 	[SSPP_VIG3] = { { 0, 26, 6  }, { 3, 12 } },
490 	[SSPP_RGB0] = { { 0, 9,  8  }, { -1 } },
491 	[SSPP_RGB1] = { { 0, 12, 10 }, { -1 } },
492 	[SSPP_RGB2] = { { 0, 15, 12 }, { -1 } },
493 	[SSPP_RGB3] = { { 0, 29, 14 }, { -1 } },
494 	[SSPP_DMA0] = { { 0, 18, 16 }, { 2, 8 } },
495 	[SSPP_DMA1] = { { 0, 21, 18 }, { 2, 12 } },
496 	[SSPP_DMA2] = { { 2, 0      }, { 2, 16 } },
497 	[SSPP_DMA3] = { { 2, 4      }, { 2, 20 } },
498 	[SSPP_DMA4] = { { 4, 0      }, { 4, 8 } },
499 	[SSPP_DMA5] = { { 4, 4      }, { 4, 12 } },
500 	[SSPP_CURSOR0] =  { { 1, 20 }, { -1 } },
501 	[SSPP_CURSOR1] =  { { 1, 26 }, { -1 } },
502 };
503 
504 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
505 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
506 {
507 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
508 	u32 mix, ext, mix_ext;
509 	u32 mixercfg[5] = { 0 };
510 	int i, j;
511 	int stages;
512 	int pipes_per_stage;
513 
514 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
515 	if (stages < 0)
516 		return;
517 
518 	if (test_bit(DPU_MIXER_SOURCESPLIT,
519 		&ctx->mixer_hw_caps->features))
520 		pipes_per_stage = PIPES_PER_STAGE;
521 	else
522 		pipes_per_stage = 1;
523 
524 	mixercfg[0] = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
525 
526 	if (!stage_cfg)
527 		goto exit;
528 
529 	for (i = 0; i <= stages; i++) {
530 		/* overflow to ext register if 'i + 1 > 7' */
531 		mix = (i + 1) & 0x7;
532 		ext = i >= 7;
533 		mix_ext = (i + 1) & 0xf;
534 
535 		for (j = 0 ; j < pipes_per_stage; j++) {
536 			enum dpu_sspp_multirect_index rect_index =
537 				stage_cfg->multirect_index[i][j];
538 			enum dpu_sspp pipe = stage_cfg->stage[i][j];
539 			const struct ctl_blend_config *cfg =
540 				&ctl_blend_config[pipe][rect_index == DPU_SSPP_RECT_1];
541 
542 			/*
543 			 * CTL_LAYER has 3-bit field (and extra bits in EXT register),
544 			 * all EXT registers has 4-bit fields.
545 			 */
546 			if (cfg->idx == -1) {
547 				continue;
548 			} else if (cfg->idx == 0) {
549 				mixercfg[0] |= mix << cfg->shift;
550 				mixercfg[1] |= ext << cfg->ext_shift;
551 			} else {
552 				mixercfg[cfg->idx] |= mix_ext << cfg->shift;
553 			}
554 		}
555 	}
556 
557 exit:
558 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg[0]);
559 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]);
560 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]);
561 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]);
562 	if (ctx->mdss_ver->core_major_ver >= 9)
563 		DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]);
564 }
565 
566 
567 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
568 		struct dpu_hw_intf_cfg *cfg)
569 {
570 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
571 	u32 intf_active = 0;
572 	u32 dsc_active = 0;
573 	u32 wb_active = 0;
574 	u32 cwb_active = 0;
575 	u32 mode_sel = 0;
576 	u32 merge_3d_active = 0;
577 
578 	/* CTL_TOP[31:28] carries group_id to collate CTL paths
579 	 * per VM. Explicitly disable it until VM support is
580 	 * added in SW. Power on reset value is not disable.
581 	 */
582 	if (ctx->mdss_ver->core_major_ver >= 7)
583 		mode_sel = CTL_DEFAULT_GROUP_ID  << 28;
584 
585 	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
586 		mode_sel |= BIT(17);
587 
588 	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
589 	wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
590 	cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
591 	dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
592 	merge_3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
593 
594 	if (cfg->intf)
595 		intf_active |= BIT(cfg->intf - INTF_0);
596 
597 	if (cfg->wb)
598 		wb_active |= BIT(cfg->wb - WB_0);
599 
600 	if (cfg->cwb)
601 		cwb_active |= cfg->cwb;
602 
603 	if (cfg->dsc)
604 		dsc_active |= cfg->dsc;
605 
606 	if (cfg->merge_3d)
607 		merge_3d_active |= BIT(cfg->merge_3d - MERGE_3D_0);
608 
609 	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
610 	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
611 	DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
612 	DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
613 	DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
614 	DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, merge_3d_active);
615 
616 	if (cfg->intf_master)
617 		DPU_REG_WRITE(c, CTL_INTF_MASTER, BIT(cfg->intf_master - INTF_0));
618 
619 	if (cfg->cdm)
620 		DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm);
621 }
622 
623 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
624 		struct dpu_hw_intf_cfg *cfg)
625 {
626 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
627 	u32 intf_cfg = 0;
628 
629 	intf_cfg |= (cfg->intf & 0xF) << 4;
630 
631 	if (cfg->mode_3d) {
632 		intf_cfg |= BIT(19);
633 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
634 	}
635 
636 	if (cfg->wb)
637 		intf_cfg |= (cfg->wb & 0x3) + 2;
638 
639 	switch (cfg->intf_mode_sel) {
640 	case DPU_CTL_MODE_SEL_VID:
641 		intf_cfg &= ~BIT(17);
642 		intf_cfg &= ~(0x3 << 15);
643 		break;
644 	case DPU_CTL_MODE_SEL_CMD:
645 		intf_cfg |= BIT(17);
646 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
647 		break;
648 	default:
649 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
650 		return;
651 	}
652 
653 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
654 }
655 
656 static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
657 		struct dpu_hw_intf_cfg *cfg)
658 {
659 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
660 	u32 intf_active = 0;
661 	u32 intf_master = 0;
662 	u32 wb_active = 0;
663 	u32 cwb_active = 0;
664 	u32 merge3d_active = 0;
665 	u32 dsc_active;
666 	u32 cdm_active;
667 
668 	/*
669 	 * This API resets each portion of the CTL path namely,
670 	 * clearing the sspps staged on the lm, merge_3d block,
671 	 * interfaces , writeback etc to ensure clean teardown of the pipeline.
672 	 * This will be used for writeback to begin with to have a
673 	 * proper teardown of the writeback session but upon further
674 	 * validation, this can be extended to all interfaces.
675 	 */
676 	if (cfg->merge_3d) {
677 		merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
678 		merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
679 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
680 				merge3d_active);
681 	}
682 
683 	if (ctx->ops.clear_all_blendstages)
684 		ctx->ops.clear_all_blendstages(ctx);
685 
686 	if (ctx->ops.set_active_lms)
687 		ctx->ops.set_active_lms(ctx, NULL);
688 
689 	if (ctx->ops.set_active_fetch_pipes)
690 		ctx->ops.set_active_fetch_pipes(ctx, NULL);
691 
692 	if (ctx->ops.set_active_pipes)
693 		ctx->ops.set_active_pipes(ctx, NULL);
694 
695 	if (cfg->intf) {
696 		intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
697 		intf_active &= ~BIT(cfg->intf - INTF_0);
698 		DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
699 
700 		intf_master = DPU_REG_READ(c, CTL_INTF_MASTER);
701 
702 		/* Unset this intf as master, if it is the current master */
703 		if (intf_master == BIT(cfg->intf - INTF_0)) {
704 			DPU_DEBUG_DRIVER("Unsetting INTF_%d master\n", cfg->intf - INTF_0);
705 			DPU_REG_WRITE(c, CTL_INTF_MASTER, 0);
706 		}
707 	}
708 
709 	if (cfg->cwb) {
710 		cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
711 		cwb_active &= ~cfg->cwb;
712 		DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
713 	}
714 
715 	if (cfg->wb) {
716 		wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
717 		wb_active &= ~BIT(cfg->wb - WB_0);
718 		DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
719 	}
720 
721 	if (cfg->dsc) {
722 		dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
723 		dsc_active &= ~cfg->dsc;
724 		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
725 	}
726 
727 	if (cfg->cdm) {
728 		cdm_active = DPU_REG_READ(c, CTL_CDM_ACTIVE);
729 		cdm_active &= ~cfg->cdm;
730 		DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cdm_active);
731 	}
732 }
733 
734 static void dpu_hw_ctl_set_active_fetch_pipes(struct dpu_hw_ctl *ctx,
735 					      unsigned long *fetch_active)
736 {
737 	int i;
738 	u32 val = 0;
739 
740 	if (fetch_active) {
741 		for (i = 0; i < SSPP_MAX; i++) {
742 			if (test_bit(i, fetch_active) &&
743 				fetch_tbl[i] != CTL_INVALID_BIT)
744 				val |= BIT(fetch_tbl[i]);
745 		}
746 	}
747 
748 	DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
749 }
750 
751 static void dpu_hw_ctl_set_active_pipes(struct dpu_hw_ctl *ctx,
752 					unsigned long *active_pipes)
753 {
754 	int i;
755 	u32 val = 0;
756 
757 	if (active_pipes) {
758 		for (i = 0; i < SSPP_MAX; i++) {
759 			if (test_bit(i, active_pipes) &&
760 			    fetch_tbl[i] != CTL_INVALID_BIT)
761 				val |= BIT(fetch_tbl[i]);
762 		}
763 	}
764 
765 	DPU_REG_WRITE(&ctx->hw, CTL_PIPE_ACTIVE, val);
766 }
767 
768 static void dpu_hw_ctl_set_active_lms(struct dpu_hw_ctl *ctx,
769 				      unsigned long *active_lms)
770 {
771 	int i;
772 	u32 val = 0;
773 
774 	if (active_lms) {
775 		for (i = LM_0; i < LM_MAX; i++) {
776 			if (test_bit(i, active_lms) &&
777 			    lm_tbl[i] != CTL_INVALID_BIT)
778 				val |= BIT(lm_tbl[i]);
779 		}
780 	}
781 
782 	DPU_REG_WRITE(&ctx->hw, CTL_LAYER_ACTIVE, val);
783 }
784 
785 /**
786  * dpu_hw_ctl_init() - Initializes the ctl_path hw driver object.
787  * Should be called before accessing any ctl_path register.
788  * @dev:  Corresponding device for devres management
789  * @cfg:  ctl_path catalog entry for which driver object is required
790  * @addr: mapped register io address of MDP
791  * @mdss_ver: dpu core's major and minor versions
792  * @mixer_count: Number of mixers in @mixer
793  * @mixer: Pointer to an array of Layer Mixers defined in the catalog
794  */
795 struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
796 				   const struct dpu_ctl_cfg *cfg,
797 				   void __iomem *addr,
798 				   const struct dpu_mdss_version *mdss_ver,
799 				   u32 mixer_count,
800 				   const struct dpu_lm_cfg *mixer)
801 {
802 	struct dpu_hw_ctl *c;
803 
804 	c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
805 	if (!c)
806 		return ERR_PTR(-ENOMEM);
807 
808 	c->hw.blk_addr = addr + cfg->base;
809 	c->hw.log_mask = DPU_DBG_MASK_CTL;
810 
811 	c->caps = cfg;
812 	c->mdss_ver = mdss_ver;
813 
814 	if (mdss_ver->core_major_ver >= 5) {
815 		c->ops.trigger_flush = dpu_hw_ctl_trigger_flush_v1;
816 		c->ops.setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
817 		c->ops.reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
818 		c->ops.update_pending_flush_intf =
819 			dpu_hw_ctl_update_pending_flush_intf_v1;
820 
821 		c->ops.update_pending_flush_periph =
822 			dpu_hw_ctl_update_pending_flush_periph_v1;
823 
824 		c->ops.update_pending_flush_merge_3d =
825 			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
826 		c->ops.update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
827 		c->ops.update_pending_flush_cwb = dpu_hw_ctl_update_pending_flush_cwb_v1;
828 		c->ops.update_pending_flush_dsc =
829 			dpu_hw_ctl_update_pending_flush_dsc_v1;
830 		c->ops.update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
831 	} else {
832 		c->ops.trigger_flush = dpu_hw_ctl_trigger_flush;
833 		c->ops.setup_intf_cfg = dpu_hw_ctl_intf_cfg;
834 		c->ops.update_pending_flush_intf =
835 			dpu_hw_ctl_update_pending_flush_intf;
836 		c->ops.update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
837 		c->ops.update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm;
838 	}
839 	c->ops.clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
840 	c->ops.update_pending_flush = dpu_hw_ctl_update_pending_flush;
841 	c->ops.get_pending_flush = dpu_hw_ctl_get_pending_flush;
842 	c->ops.get_flush_register = dpu_hw_ctl_get_flush_register;
843 	c->ops.trigger_start = dpu_hw_ctl_trigger_start;
844 	c->ops.is_started = dpu_hw_ctl_is_started;
845 	c->ops.trigger_pending = dpu_hw_ctl_trigger_pending;
846 	c->ops.reset = dpu_hw_ctl_reset_control;
847 	c->ops.wait_reset_status = dpu_hw_ctl_wait_reset_status;
848 	if (mdss_ver->core_major_ver < 12) {
849 		c->ops.clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
850 		c->ops.setup_blendstage = dpu_hw_ctl_setup_blendstage;
851 	} else {
852 		c->ops.set_active_pipes = dpu_hw_ctl_set_active_pipes;
853 		c->ops.set_active_lms = dpu_hw_ctl_set_active_lms;
854 	}
855 	c->ops.update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
856 	c->ops.update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
857 	if (mdss_ver->core_major_ver >= 7)
858 		c->ops.update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
859 	else
860 		c->ops.update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
861 
862 	if (mdss_ver->core_major_ver >= 7)
863 		c->ops.set_active_fetch_pipes = dpu_hw_ctl_set_active_fetch_pipes;
864 
865 	c->idx = cfg->id;
866 	c->mixer_count = mixer_count;
867 	c->mixer_hw_caps = mixer;
868 
869 	return c;
870 }
871