xref: /linux/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c (revision db5d28c0bfe566908719bec8e25443aabecbb802)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include <drm/drm_managed.h>
9 
10 #include "dpu_hwio.h"
11 #include "dpu_hw_ctl.h"
12 #include "dpu_kms.h"
13 #include "dpu_trace.h"
14 
15 #define   CTL_LAYER(lm)                 \
16 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
17 #define   CTL_LAYER_EXT(lm)             \
18 	(0x40 + (((lm) - LM_0) * 0x004))
19 #define   CTL_LAYER_EXT2(lm)             \
20 	(0x70 + (((lm) - LM_0) * 0x004))
21 #define   CTL_LAYER_EXT3(lm)             \
22 	(0xA0 + (((lm) - LM_0) * 0x004))
23 #define CTL_LAYER_EXT4(lm)             \
24 	(0xB8 + (((lm) - LM_0) * 0x004))
25 #define   CTL_TOP                       0x014
26 #define   CTL_FLUSH                     0x018
27 #define   CTL_START                     0x01C
28 #define   CTL_PREPARE                   0x0d0
29 #define   CTL_SW_RESET                  0x030
30 #define   CTL_LAYER_EXTN_OFFSET         0x40
31 #define   CTL_MERGE_3D_ACTIVE           0x0E4
32 #define   CTL_DSC_ACTIVE                0x0E8
33 #define   CTL_WB_ACTIVE                 0x0EC
34 #define   CTL_INTF_ACTIVE               0x0F4
35 #define   CTL_CDM_ACTIVE                0x0F8
36 #define   CTL_FETCH_PIPE_ACTIVE         0x0FC
37 #define   CTL_MERGE_3D_FLUSH            0x100
38 #define   CTL_DSC_FLUSH                0x104
39 #define   CTL_WB_FLUSH                  0x108
40 #define   CTL_INTF_FLUSH                0x110
41 #define   CTL_CDM_FLUSH                0x114
42 #define   CTL_PERIPH_FLUSH              0x128
43 #define   CTL_INTF_MASTER               0x134
44 #define   CTL_DSPP_n_FLUSH(n)           ((0x13C) + ((n) * 4))
45 
46 #define CTL_MIXER_BORDER_OUT            BIT(24)
47 #define CTL_FLUSH_MASK_CTL              BIT(17)
48 
49 #define DPU_REG_RESET_TIMEOUT_US        2000
50 #define  MERGE_3D_IDX   23
51 #define  DSC_IDX        22
52 #define CDM_IDX         26
53 #define  PERIPH_IDX     30
54 #define  INTF_IDX       31
55 #define WB_IDX          16
56 #define  DSPP_IDX       29  /* From DPU hw rev 7.x.x */
57 #define CTL_INVALID_BIT                 0xffff
58 #define CTL_DEFAULT_GROUP_ID		0xf
59 
60 static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
61 	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
62 	1, 2, 3, 4, 5};
63 
_mixer_stages(const struct dpu_lm_cfg * mixer,int count,enum dpu_lm lm)64 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
65 		enum dpu_lm lm)
66 {
67 	int i;
68 	int stages = -EINVAL;
69 
70 	for (i = 0; i < count; i++) {
71 		if (lm == mixer[i].id) {
72 			stages = mixer[i].sblk->maxblendstages;
73 			break;
74 		}
75 	}
76 
77 	return stages;
78 }
79 
dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl * ctx)80 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
81 {
82 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
83 
84 	return DPU_REG_READ(c, CTL_FLUSH);
85 }
86 
dpu_hw_ctl_trigger_start(struct dpu_hw_ctl * ctx)87 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
88 {
89 	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
90 				       dpu_hw_ctl_get_flush_register(ctx));
91 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
92 }
93 
dpu_hw_ctl_is_started(struct dpu_hw_ctl * ctx)94 static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
95 {
96 	return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
97 }
98 
dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl * ctx)99 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
100 {
101 	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
102 					 dpu_hw_ctl_get_flush_register(ctx));
103 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
104 }
105 
dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl * ctx)106 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
107 {
108 	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
109 				     dpu_hw_ctl_get_flush_register(ctx));
110 	ctx->pending_flush_mask = 0x0;
111 	ctx->pending_intf_flush_mask = 0;
112 	ctx->pending_wb_flush_mask = 0;
113 	ctx->pending_merge_3d_flush_mask = 0;
114 	ctx->pending_dsc_flush_mask = 0;
115 	ctx->pending_cdm_flush_mask = 0;
116 
117 	memset(ctx->pending_dspp_flush_mask, 0,
118 		sizeof(ctx->pending_dspp_flush_mask));
119 }
120 
dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl * ctx,u32 flushbits)121 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
122 		u32 flushbits)
123 {
124 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
125 					      ctx->pending_flush_mask);
126 	ctx->pending_flush_mask |= flushbits;
127 }
128 
dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl * ctx)129 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
130 {
131 	return ctx->pending_flush_mask;
132 }
133 
dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl * ctx)134 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
135 {
136 	int dspp;
137 
138 	if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
139 		DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
140 				ctx->pending_merge_3d_flush_mask);
141 	if (ctx->pending_flush_mask & BIT(INTF_IDX))
142 		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
143 				ctx->pending_intf_flush_mask);
144 	if (ctx->pending_flush_mask & BIT(WB_IDX))
145 		DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
146 				ctx->pending_wb_flush_mask);
147 
148 	if (ctx->pending_flush_mask & BIT(DSPP_IDX))
149 		for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) {
150 			if (ctx->pending_dspp_flush_mask[dspp - DSPP_0])
151 				DPU_REG_WRITE(&ctx->hw,
152 				CTL_DSPP_n_FLUSH(dspp - DSPP_0),
153 				ctx->pending_dspp_flush_mask[dspp - DSPP_0]);
154 		}
155 
156 	if (ctx->pending_flush_mask & BIT(PERIPH_IDX))
157 		DPU_REG_WRITE(&ctx->hw, CTL_PERIPH_FLUSH,
158 			      ctx->pending_periph_flush_mask);
159 
160 	if (ctx->pending_flush_mask & BIT(DSC_IDX))
161 		DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
162 			      ctx->pending_dsc_flush_mask);
163 
164 	if (ctx->pending_flush_mask & BIT(CDM_IDX))
165 		DPU_REG_WRITE(&ctx->hw, CTL_CDM_FLUSH,
166 			      ctx->pending_cdm_flush_mask);
167 
168 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
169 }
170 
dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl * ctx)171 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
172 {
173 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
174 				     dpu_hw_ctl_get_flush_register(ctx));
175 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
176 }
177 
dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl * ctx,enum dpu_sspp sspp)178 static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
179 	enum dpu_sspp sspp)
180 {
181 	switch (sspp) {
182 	case SSPP_VIG0:
183 		ctx->pending_flush_mask |=  BIT(0);
184 		break;
185 	case SSPP_VIG1:
186 		ctx->pending_flush_mask |= BIT(1);
187 		break;
188 	case SSPP_VIG2:
189 		ctx->pending_flush_mask |= BIT(2);
190 		break;
191 	case SSPP_VIG3:
192 		ctx->pending_flush_mask |= BIT(18);
193 		break;
194 	case SSPP_RGB0:
195 		ctx->pending_flush_mask |= BIT(3);
196 		break;
197 	case SSPP_RGB1:
198 		ctx->pending_flush_mask |= BIT(4);
199 		break;
200 	case SSPP_RGB2:
201 		ctx->pending_flush_mask |= BIT(5);
202 		break;
203 	case SSPP_RGB3:
204 		ctx->pending_flush_mask |= BIT(19);
205 		break;
206 	case SSPP_DMA0:
207 		ctx->pending_flush_mask |= BIT(11);
208 		break;
209 	case SSPP_DMA1:
210 		ctx->pending_flush_mask |= BIT(12);
211 		break;
212 	case SSPP_DMA2:
213 		ctx->pending_flush_mask |= BIT(24);
214 		break;
215 	case SSPP_DMA3:
216 		ctx->pending_flush_mask |= BIT(25);
217 		break;
218 	case SSPP_DMA4:
219 		ctx->pending_flush_mask |= BIT(13);
220 		break;
221 	case SSPP_DMA5:
222 		ctx->pending_flush_mask |= BIT(14);
223 		break;
224 	case SSPP_CURSOR0:
225 		ctx->pending_flush_mask |= BIT(22);
226 		break;
227 	case SSPP_CURSOR1:
228 		ctx->pending_flush_mask |= BIT(23);
229 		break;
230 	default:
231 		break;
232 	}
233 }
234 
dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl * ctx,enum dpu_lm lm)235 static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
236 	enum dpu_lm lm)
237 {
238 	switch (lm) {
239 	case LM_0:
240 		ctx->pending_flush_mask |= BIT(6);
241 		break;
242 	case LM_1:
243 		ctx->pending_flush_mask |= BIT(7);
244 		break;
245 	case LM_2:
246 		ctx->pending_flush_mask |= BIT(8);
247 		break;
248 	case LM_3:
249 		ctx->pending_flush_mask |= BIT(9);
250 		break;
251 	case LM_4:
252 		ctx->pending_flush_mask |= BIT(10);
253 		break;
254 	case LM_5:
255 		ctx->pending_flush_mask |= BIT(20);
256 		break;
257 	default:
258 		break;
259 	}
260 
261 	ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
262 }
263 
dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl * ctx,enum dpu_intf intf)264 static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
265 		enum dpu_intf intf)
266 {
267 	switch (intf) {
268 	case INTF_0:
269 		ctx->pending_flush_mask |= BIT(31);
270 		break;
271 	case INTF_1:
272 		ctx->pending_flush_mask |= BIT(30);
273 		break;
274 	case INTF_2:
275 		ctx->pending_flush_mask |= BIT(29);
276 		break;
277 	case INTF_3:
278 		ctx->pending_flush_mask |= BIT(28);
279 		break;
280 	default:
281 		break;
282 	}
283 }
284 
dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl * ctx,enum dpu_wb wb)285 static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
286 		enum dpu_wb wb)
287 {
288 	switch (wb) {
289 	case WB_0:
290 	case WB_1:
291 	case WB_2:
292 		ctx->pending_flush_mask |= BIT(WB_IDX);
293 		break;
294 	default:
295 		break;
296 	}
297 }
298 
dpu_hw_ctl_update_pending_flush_cdm(struct dpu_hw_ctl * ctx,enum dpu_cdm cdm_num)299 static void dpu_hw_ctl_update_pending_flush_cdm(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
300 {
301 	/* update pending flush only if CDM_0 is flushed */
302 	if (cdm_num == CDM_0)
303 		ctx->pending_flush_mask |= BIT(CDM_IDX);
304 }
305 
dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl * ctx,enum dpu_wb wb)306 static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
307 		enum dpu_wb wb)
308 {
309 	ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
310 	ctx->pending_flush_mask |= BIT(WB_IDX);
311 }
312 
dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl * ctx,enum dpu_intf intf)313 static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
314 		enum dpu_intf intf)
315 {
316 	ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
317 	ctx->pending_flush_mask |= BIT(INTF_IDX);
318 }
319 
dpu_hw_ctl_update_pending_flush_periph_v1(struct dpu_hw_ctl * ctx,enum dpu_intf intf)320 static void dpu_hw_ctl_update_pending_flush_periph_v1(struct dpu_hw_ctl *ctx,
321 						      enum dpu_intf intf)
322 {
323 	ctx->pending_periph_flush_mask |= BIT(intf - INTF_0);
324 	ctx->pending_flush_mask |= BIT(PERIPH_IDX);
325 }
326 
dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl * ctx,enum dpu_merge_3d merge_3d)327 static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
328 		enum dpu_merge_3d merge_3d)
329 {
330 	ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
331 	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
332 }
333 
dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl * ctx,enum dpu_dsc dsc_num)334 static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx,
335 						   enum dpu_dsc dsc_num)
336 {
337 	ctx->pending_dsc_flush_mask |= BIT(dsc_num - DSC_0);
338 	ctx->pending_flush_mask |= BIT(DSC_IDX);
339 }
340 
dpu_hw_ctl_update_pending_flush_cdm_v1(struct dpu_hw_ctl * ctx,enum dpu_cdm cdm_num)341 static void dpu_hw_ctl_update_pending_flush_cdm_v1(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
342 {
343 	ctx->pending_cdm_flush_mask |= BIT(cdm_num - CDM_0);
344 	ctx->pending_flush_mask |= BIT(CDM_IDX);
345 }
346 
dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl * ctx,enum dpu_dspp dspp,u32 dspp_sub_blk)347 static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
348 	enum dpu_dspp dspp, u32 dspp_sub_blk)
349 {
350 	switch (dspp) {
351 	case DSPP_0:
352 		ctx->pending_flush_mask |= BIT(13);
353 		break;
354 	case DSPP_1:
355 		ctx->pending_flush_mask |= BIT(14);
356 		break;
357 	case DSPP_2:
358 		ctx->pending_flush_mask |= BIT(15);
359 		break;
360 	case DSPP_3:
361 		ctx->pending_flush_mask |= BIT(21);
362 		break;
363 	default:
364 		break;
365 	}
366 }
367 
dpu_hw_ctl_update_pending_flush_dspp_sub_blocks(struct dpu_hw_ctl * ctx,enum dpu_dspp dspp,u32 dspp_sub_blk)368 static void dpu_hw_ctl_update_pending_flush_dspp_sub_blocks(
369 	struct dpu_hw_ctl *ctx,	enum dpu_dspp dspp, u32 dspp_sub_blk)
370 {
371 	if (dspp >= DSPP_MAX)
372 		return;
373 
374 	switch (dspp_sub_blk) {
375 	case DPU_DSPP_PCC:
376 		ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(4);
377 		break;
378 	default:
379 		return;
380 	}
381 
382 	ctx->pending_flush_mask |= BIT(DSPP_IDX);
383 }
384 
dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl * ctx,u32 timeout_us)385 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
386 {
387 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
388 	ktime_t timeout;
389 	u32 status;
390 
391 	timeout = ktime_add_us(ktime_get(), timeout_us);
392 
393 	/*
394 	 * it takes around 30us to have mdp finish resetting its ctl path
395 	 * poll every 50us so that reset should be completed at 1st poll
396 	 */
397 	do {
398 		status = DPU_REG_READ(c, CTL_SW_RESET);
399 		status &= 0x1;
400 		if (status)
401 			usleep_range(20, 50);
402 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
403 
404 	return status;
405 }
406 
dpu_hw_ctl_reset_control(struct dpu_hw_ctl * ctx)407 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
408 {
409 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
410 
411 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
412 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
413 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
414 		return -EINVAL;
415 
416 	return 0;
417 }
418 
dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl * ctx)419 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
420 {
421 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
422 	u32 status;
423 
424 	status = DPU_REG_READ(c, CTL_SW_RESET);
425 	status &= 0x01;
426 	if (!status)
427 		return 0;
428 
429 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
430 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
431 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
432 		return -EINVAL;
433 	}
434 
435 	return 0;
436 }
437 
dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl * ctx)438 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
439 {
440 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
441 	int i;
442 
443 	for (i = 0; i < ctx->mixer_count; i++) {
444 		enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
445 
446 		DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
447 		DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
448 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
449 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
450 	}
451 
452 	DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
453 }
454 
455 struct ctl_blend_config {
456 	int idx, shift, ext_shift;
457 };
458 
459 static const struct ctl_blend_config ctl_blend_config[][2] = {
460 	[SSPP_NONE] = { { -1 }, { -1 } },
461 	[SSPP_MAX] =  { { -1 }, { -1 } },
462 	[SSPP_VIG0] = { { 0, 0,  0  }, { 3, 0 } },
463 	[SSPP_VIG1] = { { 0, 3,  2  }, { 3, 4 } },
464 	[SSPP_VIG2] = { { 0, 6,  4  }, { 3, 8 } },
465 	[SSPP_VIG3] = { { 0, 26, 6  }, { 3, 12 } },
466 	[SSPP_RGB0] = { { 0, 9,  8  }, { -1 } },
467 	[SSPP_RGB1] = { { 0, 12, 10 }, { -1 } },
468 	[SSPP_RGB2] = { { 0, 15, 12 }, { -1 } },
469 	[SSPP_RGB3] = { { 0, 29, 14 }, { -1 } },
470 	[SSPP_DMA0] = { { 0, 18, 16 }, { 2, 8 } },
471 	[SSPP_DMA1] = { { 0, 21, 18 }, { 2, 12 } },
472 	[SSPP_DMA2] = { { 2, 0      }, { 2, 16 } },
473 	[SSPP_DMA3] = { { 2, 4      }, { 2, 20 } },
474 	[SSPP_DMA4] = { { 4, 0      }, { 4, 8 } },
475 	[SSPP_DMA5] = { { 4, 4      }, { 4, 12 } },
476 	[SSPP_CURSOR0] =  { { 1, 20 }, { -1 } },
477 	[SSPP_CURSOR1] =  { { 1, 26 }, { -1 } },
478 };
479 
dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl * ctx,enum dpu_lm lm,struct dpu_hw_stage_cfg * stage_cfg)480 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
481 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
482 {
483 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
484 	u32 mix, ext, mix_ext;
485 	u32 mixercfg[5] = { 0 };
486 	int i, j;
487 	int stages;
488 	int pipes_per_stage;
489 
490 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
491 	if (stages < 0)
492 		return;
493 
494 	if (test_bit(DPU_MIXER_SOURCESPLIT,
495 		&ctx->mixer_hw_caps->features))
496 		pipes_per_stage = PIPES_PER_STAGE;
497 	else
498 		pipes_per_stage = 1;
499 
500 	mixercfg[0] = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
501 
502 	if (!stage_cfg)
503 		goto exit;
504 
505 	for (i = 0; i <= stages; i++) {
506 		/* overflow to ext register if 'i + 1 > 7' */
507 		mix = (i + 1) & 0x7;
508 		ext = i >= 7;
509 		mix_ext = (i + 1) & 0xf;
510 
511 		for (j = 0 ; j < pipes_per_stage; j++) {
512 			enum dpu_sspp_multirect_index rect_index =
513 				stage_cfg->multirect_index[i][j];
514 			enum dpu_sspp pipe = stage_cfg->stage[i][j];
515 			const struct ctl_blend_config *cfg =
516 				&ctl_blend_config[pipe][rect_index == DPU_SSPP_RECT_1];
517 
518 			/*
519 			 * CTL_LAYER has 3-bit field (and extra bits in EXT register),
520 			 * all EXT registers has 4-bit fields.
521 			 */
522 			if (cfg->idx == -1) {
523 				continue;
524 			} else if (cfg->idx == 0) {
525 				mixercfg[0] |= mix << cfg->shift;
526 				mixercfg[1] |= ext << cfg->ext_shift;
527 			} else {
528 				mixercfg[cfg->idx] |= mix_ext << cfg->shift;
529 			}
530 		}
531 	}
532 
533 exit:
534 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg[0]);
535 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]);
536 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]);
537 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]);
538 	if ((test_bit(DPU_CTL_HAS_LAYER_EXT4, &ctx->caps->features)))
539 		DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]);
540 }
541 
542 
dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl * ctx,struct dpu_hw_intf_cfg * cfg)543 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
544 		struct dpu_hw_intf_cfg *cfg)
545 {
546 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
547 	u32 intf_active = 0;
548 	u32 dsc_active = 0;
549 	u32 wb_active = 0;
550 	u32 mode_sel = 0;
551 
552 	/* CTL_TOP[31:28] carries group_id to collate CTL paths
553 	 * per VM. Explicitly disable it until VM support is
554 	 * added in SW. Power on reset value is not disable.
555 	 */
556 	if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
557 		mode_sel = CTL_DEFAULT_GROUP_ID  << 28;
558 
559 	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
560 		mode_sel |= BIT(17);
561 
562 	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
563 	wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
564 	dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
565 
566 	if (cfg->intf)
567 		intf_active |= BIT(cfg->intf - INTF_0);
568 
569 	if (cfg->wb)
570 		wb_active |= BIT(cfg->wb - WB_0);
571 
572 	if (cfg->dsc)
573 		dsc_active |= cfg->dsc;
574 
575 	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
576 	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
577 	DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
578 	DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
579 
580 	if (cfg->merge_3d)
581 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
582 			      BIT(cfg->merge_3d - MERGE_3D_0));
583 
584 	if (cfg->cdm)
585 		DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm);
586 }
587 
dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl * ctx,struct dpu_hw_intf_cfg * cfg)588 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
589 		struct dpu_hw_intf_cfg *cfg)
590 {
591 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
592 	u32 intf_cfg = 0;
593 
594 	intf_cfg |= (cfg->intf & 0xF) << 4;
595 
596 	if (cfg->mode_3d) {
597 		intf_cfg |= BIT(19);
598 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
599 	}
600 
601 	if (cfg->wb)
602 		intf_cfg |= (cfg->wb & 0x3) + 2;
603 
604 	switch (cfg->intf_mode_sel) {
605 	case DPU_CTL_MODE_SEL_VID:
606 		intf_cfg &= ~BIT(17);
607 		intf_cfg &= ~(0x3 << 15);
608 		break;
609 	case DPU_CTL_MODE_SEL_CMD:
610 		intf_cfg |= BIT(17);
611 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
612 		break;
613 	default:
614 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
615 		return;
616 	}
617 
618 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
619 }
620 
dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl * ctx,struct dpu_hw_intf_cfg * cfg)621 static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
622 		struct dpu_hw_intf_cfg *cfg)
623 {
624 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
625 	u32 intf_active = 0;
626 	u32 wb_active = 0;
627 	u32 merge3d_active = 0;
628 	u32 dsc_active;
629 	u32 cdm_active;
630 
631 	/*
632 	 * This API resets each portion of the CTL path namely,
633 	 * clearing the sspps staged on the lm, merge_3d block,
634 	 * interfaces , writeback etc to ensure clean teardown of the pipeline.
635 	 * This will be used for writeback to begin with to have a
636 	 * proper teardown of the writeback session but upon further
637 	 * validation, this can be extended to all interfaces.
638 	 */
639 	if (cfg->merge_3d) {
640 		merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
641 		merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
642 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
643 				merge3d_active);
644 	}
645 
646 	dpu_hw_ctl_clear_all_blendstages(ctx);
647 
648 	if (cfg->intf) {
649 		intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
650 		intf_active &= ~BIT(cfg->intf - INTF_0);
651 		DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
652 	}
653 
654 	if (cfg->wb) {
655 		wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
656 		wb_active &= ~BIT(cfg->wb - WB_0);
657 		DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
658 	}
659 
660 	if (cfg->dsc) {
661 		dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
662 		dsc_active &= ~cfg->dsc;
663 		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
664 	}
665 
666 	if (cfg->cdm) {
667 		cdm_active = DPU_REG_READ(c, CTL_CDM_ACTIVE);
668 		cdm_active &= ~cfg->cdm;
669 		DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cdm_active);
670 	}
671 }
672 
dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl * ctx,unsigned long * fetch_active)673 static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
674 	unsigned long *fetch_active)
675 {
676 	int i;
677 	u32 val = 0;
678 
679 	if (fetch_active) {
680 		for (i = 0; i < SSPP_MAX; i++) {
681 			if (test_bit(i, fetch_active) &&
682 				fetch_tbl[i] != CTL_INVALID_BIT)
683 				val |= BIT(fetch_tbl[i]);
684 		}
685 	}
686 
687 	DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
688 }
689 
_setup_ctl_ops(struct dpu_hw_ctl_ops * ops,unsigned long cap)690 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
691 		unsigned long cap)
692 {
693 	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
694 		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
695 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
696 		ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
697 		ops->update_pending_flush_intf =
698 			dpu_hw_ctl_update_pending_flush_intf_v1;
699 
700 		ops->update_pending_flush_periph =
701 			dpu_hw_ctl_update_pending_flush_periph_v1;
702 
703 		ops->update_pending_flush_merge_3d =
704 			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
705 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
706 		ops->update_pending_flush_dsc =
707 			dpu_hw_ctl_update_pending_flush_dsc_v1;
708 		ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
709 	} else {
710 		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
711 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
712 		ops->update_pending_flush_intf =
713 			dpu_hw_ctl_update_pending_flush_intf;
714 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
715 		ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm;
716 	}
717 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
718 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
719 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
720 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
721 	ops->trigger_start = dpu_hw_ctl_trigger_start;
722 	ops->is_started = dpu_hw_ctl_is_started;
723 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
724 	ops->reset = dpu_hw_ctl_reset_control;
725 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
726 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
727 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
728 	ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
729 	ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
730 	if (cap & BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
731 		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
732 	else
733 		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
734 
735 	if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
736 		ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
737 };
738 
dpu_hw_ctl_init(struct drm_device * dev,const struct dpu_ctl_cfg * cfg,void __iomem * addr,u32 mixer_count,const struct dpu_lm_cfg * mixer)739 struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
740 				   const struct dpu_ctl_cfg *cfg,
741 				   void __iomem *addr,
742 				   u32 mixer_count,
743 				   const struct dpu_lm_cfg *mixer)
744 {
745 	struct dpu_hw_ctl *c;
746 
747 	c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
748 	if (!c)
749 		return ERR_PTR(-ENOMEM);
750 
751 	c->hw.blk_addr = addr + cfg->base;
752 	c->hw.log_mask = DPU_DBG_MASK_CTL;
753 
754 	c->caps = cfg;
755 	_setup_ctl_ops(&c->ops, c->caps->features);
756 	c->idx = cfg->id;
757 	c->mixer_count = mixer_count;
758 	c->mixer_hw_caps = mixer;
759 
760 	return c;
761 }
762