xref: /linux/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c (revision e843ca2f30e630675e2d2a75c96f4844f2854430)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include <drm/drm_managed.h>
9 
10 #include "dpu_hwio.h"
11 #include "dpu_hw_ctl.h"
12 #include "dpu_kms.h"
13 #include "dpu_trace.h"
14 
15 #define   CTL_LAYER(lm)                 \
16 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
17 #define   CTL_LAYER_EXT(lm)             \
18 	(0x40 + (((lm) - LM_0) * 0x004))
19 #define   CTL_LAYER_EXT2(lm)             \
20 	(0x70 + (((lm) - LM_0) * 0x004))
21 #define   CTL_LAYER_EXT3(lm)             \
22 	(0xA0 + (((lm) - LM_0) * 0x004))
23 #define CTL_LAYER_EXT4(lm)             \
24 	(0xB8 + (((lm) - LM_0) * 0x004))
25 #define   CTL_TOP                       0x014
26 #define   CTL_FLUSH                     0x018
27 #define   CTL_START                     0x01C
28 #define   CTL_PREPARE                   0x0d0
29 #define   CTL_SW_RESET                  0x030
30 #define   CTL_LAYER_EXTN_OFFSET         0x40
31 #define   CTL_MERGE_3D_ACTIVE           0x0E4
32 #define   CTL_DSC_ACTIVE                0x0E8
33 #define   CTL_WB_ACTIVE                 0x0EC
34 #define   CTL_INTF_ACTIVE               0x0F4
35 #define   CTL_FETCH_PIPE_ACTIVE         0x0FC
36 #define   CTL_MERGE_3D_FLUSH            0x100
37 #define   CTL_DSC_FLUSH                0x104
38 #define   CTL_WB_FLUSH                  0x108
39 #define   CTL_INTF_FLUSH                0x110
40 #define   CTL_INTF_MASTER               0x134
41 #define   CTL_DSPP_n_FLUSH(n)           ((0x13C) + ((n) * 4))
42 
43 #define CTL_MIXER_BORDER_OUT            BIT(24)
44 #define CTL_FLUSH_MASK_CTL              BIT(17)
45 
46 #define DPU_REG_RESET_TIMEOUT_US        2000
47 #define  MERGE_3D_IDX   23
48 #define  DSC_IDX        22
49 #define  INTF_IDX       31
50 #define WB_IDX          16
51 #define  DSPP_IDX       29  /* From DPU hw rev 7.x.x */
52 #define CTL_INVALID_BIT                 0xffff
53 #define CTL_DEFAULT_GROUP_ID		0xf
54 
55 static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
56 	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
57 	1, 2, 3, 4, 5};
58 
59 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
60 		enum dpu_lm lm)
61 {
62 	int i;
63 	int stages = -EINVAL;
64 
65 	for (i = 0; i < count; i++) {
66 		if (lm == mixer[i].id) {
67 			stages = mixer[i].sblk->maxblendstages;
68 			break;
69 		}
70 	}
71 
72 	return stages;
73 }
74 
75 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
76 {
77 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
78 
79 	return DPU_REG_READ(c, CTL_FLUSH);
80 }
81 
82 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
83 {
84 	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
85 				       dpu_hw_ctl_get_flush_register(ctx));
86 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
87 }
88 
89 static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
90 {
91 	return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
92 }
93 
94 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
95 {
96 	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
97 					 dpu_hw_ctl_get_flush_register(ctx));
98 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
99 }
100 
101 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
102 {
103 	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
104 				     dpu_hw_ctl_get_flush_register(ctx));
105 	ctx->pending_flush_mask = 0x0;
106 	ctx->pending_intf_flush_mask = 0;
107 	ctx->pending_wb_flush_mask = 0;
108 	ctx->pending_merge_3d_flush_mask = 0;
109 	ctx->pending_dsc_flush_mask = 0;
110 
111 	memset(ctx->pending_dspp_flush_mask, 0,
112 		sizeof(ctx->pending_dspp_flush_mask));
113 }
114 
115 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
116 		u32 flushbits)
117 {
118 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
119 					      ctx->pending_flush_mask);
120 	ctx->pending_flush_mask |= flushbits;
121 }
122 
123 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
124 {
125 	return ctx->pending_flush_mask;
126 }
127 
128 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
129 {
130 	int dspp;
131 
132 	if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
133 		DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
134 				ctx->pending_merge_3d_flush_mask);
135 	if (ctx->pending_flush_mask & BIT(INTF_IDX))
136 		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
137 				ctx->pending_intf_flush_mask);
138 	if (ctx->pending_flush_mask & BIT(WB_IDX))
139 		DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
140 				ctx->pending_wb_flush_mask);
141 
142 	if (ctx->pending_flush_mask & BIT(DSPP_IDX))
143 		for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) {
144 			if (ctx->pending_dspp_flush_mask[dspp - DSPP_0])
145 				DPU_REG_WRITE(&ctx->hw,
146 				CTL_DSPP_n_FLUSH(dspp - DSPP_0),
147 				ctx->pending_dspp_flush_mask[dspp - DSPP_0]);
148 		}
149 
150 	if (ctx->pending_flush_mask & BIT(DSC_IDX))
151 		DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
152 			      ctx->pending_dsc_flush_mask);
153 
154 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
155 }
156 
157 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
158 {
159 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
160 				     dpu_hw_ctl_get_flush_register(ctx));
161 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
162 }
163 
164 static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
165 	enum dpu_sspp sspp)
166 {
167 	switch (sspp) {
168 	case SSPP_VIG0:
169 		ctx->pending_flush_mask |=  BIT(0);
170 		break;
171 	case SSPP_VIG1:
172 		ctx->pending_flush_mask |= BIT(1);
173 		break;
174 	case SSPP_VIG2:
175 		ctx->pending_flush_mask |= BIT(2);
176 		break;
177 	case SSPP_VIG3:
178 		ctx->pending_flush_mask |= BIT(18);
179 		break;
180 	case SSPP_RGB0:
181 		ctx->pending_flush_mask |= BIT(3);
182 		break;
183 	case SSPP_RGB1:
184 		ctx->pending_flush_mask |= BIT(4);
185 		break;
186 	case SSPP_RGB2:
187 		ctx->pending_flush_mask |= BIT(5);
188 		break;
189 	case SSPP_RGB3:
190 		ctx->pending_flush_mask |= BIT(19);
191 		break;
192 	case SSPP_DMA0:
193 		ctx->pending_flush_mask |= BIT(11);
194 		break;
195 	case SSPP_DMA1:
196 		ctx->pending_flush_mask |= BIT(12);
197 		break;
198 	case SSPP_DMA2:
199 		ctx->pending_flush_mask |= BIT(24);
200 		break;
201 	case SSPP_DMA3:
202 		ctx->pending_flush_mask |= BIT(25);
203 		break;
204 	case SSPP_DMA4:
205 		ctx->pending_flush_mask |= BIT(13);
206 		break;
207 	case SSPP_DMA5:
208 		ctx->pending_flush_mask |= BIT(14);
209 		break;
210 	case SSPP_CURSOR0:
211 		ctx->pending_flush_mask |= BIT(22);
212 		break;
213 	case SSPP_CURSOR1:
214 		ctx->pending_flush_mask |= BIT(23);
215 		break;
216 	default:
217 		break;
218 	}
219 }
220 
221 static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
222 	enum dpu_lm lm)
223 {
224 	switch (lm) {
225 	case LM_0:
226 		ctx->pending_flush_mask |= BIT(6);
227 		break;
228 	case LM_1:
229 		ctx->pending_flush_mask |= BIT(7);
230 		break;
231 	case LM_2:
232 		ctx->pending_flush_mask |= BIT(8);
233 		break;
234 	case LM_3:
235 		ctx->pending_flush_mask |= BIT(9);
236 		break;
237 	case LM_4:
238 		ctx->pending_flush_mask |= BIT(10);
239 		break;
240 	case LM_5:
241 		ctx->pending_flush_mask |= BIT(20);
242 		break;
243 	default:
244 		break;
245 	}
246 
247 	ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
248 }
249 
250 static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
251 		enum dpu_intf intf)
252 {
253 	switch (intf) {
254 	case INTF_0:
255 		ctx->pending_flush_mask |= BIT(31);
256 		break;
257 	case INTF_1:
258 		ctx->pending_flush_mask |= BIT(30);
259 		break;
260 	case INTF_2:
261 		ctx->pending_flush_mask |= BIT(29);
262 		break;
263 	case INTF_3:
264 		ctx->pending_flush_mask |= BIT(28);
265 		break;
266 	default:
267 		break;
268 	}
269 }
270 
271 static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
272 		enum dpu_wb wb)
273 {
274 	switch (wb) {
275 	case WB_0:
276 	case WB_1:
277 	case WB_2:
278 		ctx->pending_flush_mask |= BIT(WB_IDX);
279 		break;
280 	default:
281 		break;
282 	}
283 }
284 
285 static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
286 		enum dpu_wb wb)
287 {
288 	ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
289 	ctx->pending_flush_mask |= BIT(WB_IDX);
290 }
291 
292 static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
293 		enum dpu_intf intf)
294 {
295 	ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
296 	ctx->pending_flush_mask |= BIT(INTF_IDX);
297 }
298 
299 static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
300 		enum dpu_merge_3d merge_3d)
301 {
302 	ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
303 	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
304 }
305 
306 static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx,
307 						   enum dpu_dsc dsc_num)
308 {
309 	ctx->pending_dsc_flush_mask |= BIT(dsc_num - DSC_0);
310 	ctx->pending_flush_mask |= BIT(DSC_IDX);
311 }
312 
313 static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
314 	enum dpu_dspp dspp, u32 dspp_sub_blk)
315 {
316 	switch (dspp) {
317 	case DSPP_0:
318 		ctx->pending_flush_mask |= BIT(13);
319 		break;
320 	case DSPP_1:
321 		ctx->pending_flush_mask |= BIT(14);
322 		break;
323 	case DSPP_2:
324 		ctx->pending_flush_mask |= BIT(15);
325 		break;
326 	case DSPP_3:
327 		ctx->pending_flush_mask |= BIT(21);
328 		break;
329 	default:
330 		break;
331 	}
332 }
333 
334 static void dpu_hw_ctl_update_pending_flush_dspp_sub_blocks(
335 	struct dpu_hw_ctl *ctx,	enum dpu_dspp dspp, u32 dspp_sub_blk)
336 {
337 	if (dspp >= DSPP_MAX)
338 		return;
339 
340 	switch (dspp_sub_blk) {
341 	case DPU_DSPP_PCC:
342 		ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(4);
343 		break;
344 	default:
345 		return;
346 	}
347 
348 	ctx->pending_flush_mask |= BIT(DSPP_IDX);
349 }
350 
351 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
352 {
353 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
354 	ktime_t timeout;
355 	u32 status;
356 
357 	timeout = ktime_add_us(ktime_get(), timeout_us);
358 
359 	/*
360 	 * it takes around 30us to have mdp finish resetting its ctl path
361 	 * poll every 50us so that reset should be completed at 1st poll
362 	 */
363 	do {
364 		status = DPU_REG_READ(c, CTL_SW_RESET);
365 		status &= 0x1;
366 		if (status)
367 			usleep_range(20, 50);
368 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
369 
370 	return status;
371 }
372 
373 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
374 {
375 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
376 
377 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
378 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
379 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
380 		return -EINVAL;
381 
382 	return 0;
383 }
384 
385 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
386 {
387 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
388 	u32 status;
389 
390 	status = DPU_REG_READ(c, CTL_SW_RESET);
391 	status &= 0x01;
392 	if (!status)
393 		return 0;
394 
395 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
396 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
397 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
398 		return -EINVAL;
399 	}
400 
401 	return 0;
402 }
403 
404 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
405 {
406 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
407 	int i;
408 
409 	for (i = 0; i < ctx->mixer_count; i++) {
410 		enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
411 
412 		DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
413 		DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
414 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
415 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
416 	}
417 
418 	DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
419 }
420 
421 struct ctl_blend_config {
422 	int idx, shift, ext_shift;
423 };
424 
425 static const struct ctl_blend_config ctl_blend_config[][2] = {
426 	[SSPP_NONE] = { { -1 }, { -1 } },
427 	[SSPP_MAX] =  { { -1 }, { -1 } },
428 	[SSPP_VIG0] = { { 0, 0,  0  }, { 3, 0 } },
429 	[SSPP_VIG1] = { { 0, 3,  2  }, { 3, 4 } },
430 	[SSPP_VIG2] = { { 0, 6,  4  }, { 3, 8 } },
431 	[SSPP_VIG3] = { { 0, 26, 6  }, { 3, 12 } },
432 	[SSPP_RGB0] = { { 0, 9,  8  }, { -1 } },
433 	[SSPP_RGB1] = { { 0, 12, 10 }, { -1 } },
434 	[SSPP_RGB2] = { { 0, 15, 12 }, { -1 } },
435 	[SSPP_RGB3] = { { 0, 29, 14 }, { -1 } },
436 	[SSPP_DMA0] = { { 0, 18, 16 }, { 2, 8 } },
437 	[SSPP_DMA1] = { { 0, 21, 18 }, { 2, 12 } },
438 	[SSPP_DMA2] = { { 2, 0      }, { 2, 16 } },
439 	[SSPP_DMA3] = { { 2, 4      }, { 2, 20 } },
440 	[SSPP_DMA4] = { { 4, 0      }, { 4, 8 } },
441 	[SSPP_DMA5] = { { 4, 4      }, { 4, 12 } },
442 	[SSPP_CURSOR0] =  { { 1, 20 }, { -1 } },
443 	[SSPP_CURSOR1] =  { { 1, 26 }, { -1 } },
444 };
445 
446 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
447 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
448 {
449 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
450 	u32 mix, ext, mix_ext;
451 	u32 mixercfg[5] = { 0 };
452 	int i, j;
453 	int stages;
454 	int pipes_per_stage;
455 
456 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
457 	if (stages < 0)
458 		return;
459 
460 	if (test_bit(DPU_MIXER_SOURCESPLIT,
461 		&ctx->mixer_hw_caps->features))
462 		pipes_per_stage = PIPES_PER_STAGE;
463 	else
464 		pipes_per_stage = 1;
465 
466 	mixercfg[0] = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
467 
468 	if (!stage_cfg)
469 		goto exit;
470 
471 	for (i = 0; i <= stages; i++) {
472 		/* overflow to ext register if 'i + 1 > 7' */
473 		mix = (i + 1) & 0x7;
474 		ext = i >= 7;
475 		mix_ext = (i + 1) & 0xf;
476 
477 		for (j = 0 ; j < pipes_per_stage; j++) {
478 			enum dpu_sspp_multirect_index rect_index =
479 				stage_cfg->multirect_index[i][j];
480 			enum dpu_sspp pipe = stage_cfg->stage[i][j];
481 			const struct ctl_blend_config *cfg =
482 				&ctl_blend_config[pipe][rect_index == DPU_SSPP_RECT_1];
483 
484 			/*
485 			 * CTL_LAYER has 3-bit field (and extra bits in EXT register),
486 			 * all EXT registers has 4-bit fields.
487 			 */
488 			if (cfg->idx == -1) {
489 				continue;
490 			} else if (cfg->idx == 0) {
491 				mixercfg[0] |= mix << cfg->shift;
492 				mixercfg[1] |= ext << cfg->ext_shift;
493 			} else {
494 				mixercfg[cfg->idx] |= mix_ext << cfg->shift;
495 			}
496 		}
497 	}
498 
499 exit:
500 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg[0]);
501 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]);
502 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]);
503 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]);
504 	if ((test_bit(DPU_CTL_HAS_LAYER_EXT4, &ctx->caps->features)))
505 		DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]);
506 }
507 
508 
509 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
510 		struct dpu_hw_intf_cfg *cfg)
511 {
512 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
513 	u32 intf_active = 0;
514 	u32 wb_active = 0;
515 	u32 mode_sel = 0;
516 
517 	/* CTL_TOP[31:28] carries group_id to collate CTL paths
518 	 * per VM. Explicitly disable it until VM support is
519 	 * added in SW. Power on reset value is not disable.
520 	 */
521 	if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
522 		mode_sel = CTL_DEFAULT_GROUP_ID  << 28;
523 
524 	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
525 		mode_sel |= BIT(17);
526 
527 	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
528 	wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
529 
530 	if (cfg->intf)
531 		intf_active |= BIT(cfg->intf - INTF_0);
532 
533 	if (cfg->wb)
534 		wb_active |= BIT(cfg->wb - WB_0);
535 
536 	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
537 	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
538 	DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
539 
540 	if (cfg->merge_3d)
541 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
542 			      BIT(cfg->merge_3d - MERGE_3D_0));
543 
544 	if (cfg->dsc)
545 		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
546 }
547 
548 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
549 		struct dpu_hw_intf_cfg *cfg)
550 {
551 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
552 	u32 intf_cfg = 0;
553 
554 	intf_cfg |= (cfg->intf & 0xF) << 4;
555 
556 	if (cfg->mode_3d) {
557 		intf_cfg |= BIT(19);
558 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
559 	}
560 
561 	if (cfg->wb)
562 		intf_cfg |= (cfg->wb & 0x3) + 2;
563 
564 	switch (cfg->intf_mode_sel) {
565 	case DPU_CTL_MODE_SEL_VID:
566 		intf_cfg &= ~BIT(17);
567 		intf_cfg &= ~(0x3 << 15);
568 		break;
569 	case DPU_CTL_MODE_SEL_CMD:
570 		intf_cfg |= BIT(17);
571 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
572 		break;
573 	default:
574 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
575 		return;
576 	}
577 
578 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
579 }
580 
581 static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
582 		struct dpu_hw_intf_cfg *cfg)
583 {
584 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
585 	u32 intf_active = 0;
586 	u32 wb_active = 0;
587 	u32 merge3d_active = 0;
588 	u32 dsc_active;
589 
590 	/*
591 	 * This API resets each portion of the CTL path namely,
592 	 * clearing the sspps staged on the lm, merge_3d block,
593 	 * interfaces , writeback etc to ensure clean teardown of the pipeline.
594 	 * This will be used for writeback to begin with to have a
595 	 * proper teardown of the writeback session but upon further
596 	 * validation, this can be extended to all interfaces.
597 	 */
598 	if (cfg->merge_3d) {
599 		merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
600 		merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
601 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
602 				merge3d_active);
603 	}
604 
605 	dpu_hw_ctl_clear_all_blendstages(ctx);
606 
607 	if (cfg->intf) {
608 		intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
609 		intf_active &= ~BIT(cfg->intf - INTF_0);
610 		DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
611 	}
612 
613 	if (cfg->wb) {
614 		wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
615 		wb_active &= ~BIT(cfg->wb - WB_0);
616 		DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
617 	}
618 
619 	if (cfg->dsc) {
620 		dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
621 		dsc_active &= ~cfg->dsc;
622 		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
623 	}
624 }
625 
626 static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
627 	unsigned long *fetch_active)
628 {
629 	int i;
630 	u32 val = 0;
631 
632 	if (fetch_active) {
633 		for (i = 0; i < SSPP_MAX; i++) {
634 			if (test_bit(i, fetch_active) &&
635 				fetch_tbl[i] != CTL_INVALID_BIT)
636 				val |= BIT(fetch_tbl[i]);
637 		}
638 	}
639 
640 	DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
641 }
642 
643 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
644 		unsigned long cap)
645 {
646 	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
647 		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
648 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
649 		ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
650 		ops->update_pending_flush_intf =
651 			dpu_hw_ctl_update_pending_flush_intf_v1;
652 		ops->update_pending_flush_merge_3d =
653 			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
654 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
655 		ops->update_pending_flush_dsc =
656 			dpu_hw_ctl_update_pending_flush_dsc_v1;
657 	} else {
658 		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
659 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
660 		ops->update_pending_flush_intf =
661 			dpu_hw_ctl_update_pending_flush_intf;
662 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
663 	}
664 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
665 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
666 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
667 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
668 	ops->trigger_start = dpu_hw_ctl_trigger_start;
669 	ops->is_started = dpu_hw_ctl_is_started;
670 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
671 	ops->reset = dpu_hw_ctl_reset_control;
672 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
673 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
674 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
675 	ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
676 	ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
677 	if (cap & BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
678 		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
679 	else
680 		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
681 
682 	if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
683 		ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
684 };
685 
686 struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
687 				   const struct dpu_ctl_cfg *cfg,
688 				   void __iomem *addr,
689 				   u32 mixer_count,
690 				   const struct dpu_lm_cfg *mixer)
691 {
692 	struct dpu_hw_ctl *c;
693 
694 	c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
695 	if (!c)
696 		return ERR_PTR(-ENOMEM);
697 
698 	c->hw.blk_addr = addr + cfg->base;
699 	c->hw.log_mask = DPU_DBG_MASK_CTL;
700 
701 	c->caps = cfg;
702 	_setup_ctl_ops(&c->ops, c->caps->features);
703 	c->idx = cfg->id;
704 	c->mixer_count = mixer_count;
705 	c->mixer_hw_caps = mixer;
706 
707 	return c;
708 }
709