xref: /linux/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/bitfield.h>
6 
7 #include <drm/drm_managed.h>
8 
9 #include "dpu_hwio.h"
10 #include "dpu_hw_catalog.h"
11 #include "dpu_hw_top.h"
12 #include "dpu_kms.h"
13 
14 #define FLD_SPLIT_DISPLAY_CMD             BIT(1)
15 #define FLD_SMART_PANEL_FREE_RUN          BIT(2)
16 #define FLD_INTF_1_SW_TRG_MUX             BIT(4)
17 #define FLD_INTF_2_SW_TRG_MUX             BIT(8)
18 #define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
19 
20 #define TRAFFIC_SHAPER_EN                 BIT(31)
21 #define TRAFFIC_SHAPER_RD_CLIENT(num)     (0x030 + (num * 4))
22 #define TRAFFIC_SHAPER_WR_CLIENT(num)     (0x060 + (num * 4))
23 #define TRAFFIC_SHAPER_FIXPOINT_FACTOR    4
24 
25 #define MDP_TICK_COUNT                    16
26 #define XO_CLK_RATE                       19200
27 #define MS_TICKS_IN_SEC                   1000
28 
29 #define CALCULATE_WD_LOAD_VALUE(fps) \
30 	((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
31 
dpu_hw_setup_split_pipe(struct dpu_hw_mdp * mdp,struct split_pipe_cfg * cfg)32 static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
33 		struct split_pipe_cfg *cfg)
34 {
35 	struct dpu_hw_blk_reg_map *c;
36 	u32 upper_pipe = 0;
37 	u32 lower_pipe = 0;
38 
39 	if (!mdp || !cfg)
40 		return;
41 
42 	c = &mdp->hw;
43 
44 	if (cfg->en) {
45 		if (cfg->mode == INTF_MODE_CMD) {
46 			lower_pipe = FLD_SPLIT_DISPLAY_CMD;
47 			/* interface controlling sw trigger */
48 			if (cfg->intf == INTF_2)
49 				lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
50 			else
51 				lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
52 			upper_pipe = lower_pipe;
53 		} else {
54 			if (cfg->intf == INTF_2) {
55 				lower_pipe = FLD_INTF_1_SW_TRG_MUX;
56 				upper_pipe = FLD_INTF_2_SW_TRG_MUX;
57 			} else {
58 				lower_pipe = FLD_INTF_2_SW_TRG_MUX;
59 				upper_pipe = FLD_INTF_1_SW_TRG_MUX;
60 			}
61 		}
62 	}
63 
64 	DPU_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
65 	DPU_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
66 	DPU_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
67 	DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
68 }
69 
dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp * mdp,enum dpu_clk_ctrl_type clk_ctrl,bool enable)70 static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
71 		enum dpu_clk_ctrl_type clk_ctrl, bool enable)
72 {
73 	if (!mdp)
74 		return false;
75 
76 	if (clk_ctrl <= DPU_CLK_CTRL_NONE || clk_ctrl >= DPU_CLK_CTRL_MAX)
77 		return false;
78 
79 	return dpu_hw_clk_force_ctrl(&mdp->hw, &mdp->caps->clk_ctrls[clk_ctrl], enable);
80 }
81 
82 
dpu_hw_get_danger_status(struct dpu_hw_mdp * mdp,struct dpu_danger_safe_status * status)83 static void dpu_hw_get_danger_status(struct dpu_hw_mdp *mdp,
84 		struct dpu_danger_safe_status *status)
85 {
86 	struct dpu_hw_blk_reg_map *c;
87 	u32 value;
88 
89 	if (!mdp || !status)
90 		return;
91 
92 	c = &mdp->hw;
93 
94 	value = DPU_REG_READ(c, DANGER_STATUS);
95 	status->mdp = (value >> 0) & 0x3;
96 	status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
97 	status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
98 	status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
99 	status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
100 	status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
101 	status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
102 	status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
103 	status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
104 	status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
105 	status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
106 	status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
107 	status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
108 	status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
109 	status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
110 }
111 
dpu_hw_setup_wd_timer(struct dpu_hw_mdp * mdp,struct dpu_vsync_source_cfg * cfg)112 static void dpu_hw_setup_wd_timer(struct dpu_hw_mdp *mdp,
113 				  struct dpu_vsync_source_cfg *cfg)
114 {
115 	struct dpu_hw_blk_reg_map *c;
116 	u32 reg, wd_load_value, wd_ctl, wd_ctl2;
117 
118 	if (!mdp || !cfg)
119 		return;
120 
121 	c = &mdp->hw;
122 
123 	if (cfg->vsync_source >= DPU_VSYNC_SOURCE_WD_TIMER_4 &&
124 			cfg->vsync_source <= DPU_VSYNC_SOURCE_WD_TIMER_0) {
125 		switch (cfg->vsync_source) {
126 		case DPU_VSYNC_SOURCE_WD_TIMER_4:
127 			wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
128 			wd_ctl = MDP_WD_TIMER_4_CTL;
129 			wd_ctl2 = MDP_WD_TIMER_4_CTL2;
130 			break;
131 		case DPU_VSYNC_SOURCE_WD_TIMER_3:
132 			wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
133 			wd_ctl = MDP_WD_TIMER_3_CTL;
134 			wd_ctl2 = MDP_WD_TIMER_3_CTL2;
135 			break;
136 		case DPU_VSYNC_SOURCE_WD_TIMER_2:
137 			wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
138 			wd_ctl = MDP_WD_TIMER_2_CTL;
139 			wd_ctl2 = MDP_WD_TIMER_2_CTL2;
140 			break;
141 		case DPU_VSYNC_SOURCE_WD_TIMER_1:
142 			wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
143 			wd_ctl = MDP_WD_TIMER_1_CTL;
144 			wd_ctl2 = MDP_WD_TIMER_1_CTL2;
145 			break;
146 		case DPU_VSYNC_SOURCE_WD_TIMER_0:
147 		default:
148 			wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
149 			wd_ctl = MDP_WD_TIMER_0_CTL;
150 			wd_ctl2 = MDP_WD_TIMER_0_CTL2;
151 			break;
152 		}
153 
154 		DPU_REG_WRITE(c, wd_load_value,
155 			CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
156 
157 		DPU_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
158 		reg = DPU_REG_READ(c, wd_ctl2);
159 		reg |= BIT(8);		/* enable heartbeat timer */
160 		reg |= BIT(0);		/* enable WD timer */
161 		DPU_REG_WRITE(c, wd_ctl2, reg);
162 
163 		/* make sure that timers are enabled/disabled for vsync state */
164 		wmb();
165 	}
166 }
167 
dpu_hw_setup_vsync_sel(struct dpu_hw_mdp * mdp,struct dpu_vsync_source_cfg * cfg)168 static void dpu_hw_setup_vsync_sel(struct dpu_hw_mdp *mdp,
169 				   struct dpu_vsync_source_cfg *cfg)
170 {
171 	struct dpu_hw_blk_reg_map *c;
172 	u32 reg, i;
173 	static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
174 
175 	if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
176 		return;
177 
178 	c = &mdp->hw;
179 
180 	reg = DPU_REG_READ(c, MDP_VSYNC_SEL);
181 	for (i = 0; i < cfg->pp_count; i++) {
182 		int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
183 
184 		if (pp_idx >= ARRAY_SIZE(pp_offset))
185 			continue;
186 
187 		reg &= ~(0xf << pp_offset[pp_idx]);
188 		reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
189 	}
190 	DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg);
191 
192 	dpu_hw_setup_wd_timer(mdp, cfg);
193 }
194 
dpu_hw_get_safe_status(struct dpu_hw_mdp * mdp,struct dpu_danger_safe_status * status)195 static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
196 		struct dpu_danger_safe_status *status)
197 {
198 	struct dpu_hw_blk_reg_map *c;
199 	u32 value;
200 
201 	if (!mdp || !status)
202 		return;
203 
204 	c = &mdp->hw;
205 
206 	value = DPU_REG_READ(c, SAFE_STATUS);
207 	status->mdp = (value >> 0) & 0x1;
208 	status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
209 	status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
210 	status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
211 	status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
212 	status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
213 	status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
214 	status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
215 	status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
216 	status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
217 	status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
218 	status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
219 	status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
220 	status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
221 	status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
222 }
223 
dpu_hw_intf_audio_select(struct dpu_hw_mdp * mdp)224 static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
225 {
226 	struct dpu_hw_blk_reg_map *c;
227 
228 	if (!mdp)
229 		return;
230 
231 	c = &mdp->hw;
232 
233 	DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
234 }
235 
dpu_hw_dp_phy_intf_sel(struct dpu_hw_mdp * mdp,enum dpu_dp_phy_sel phys[2])236 static void dpu_hw_dp_phy_intf_sel(struct dpu_hw_mdp *mdp,
237 				   enum dpu_dp_phy_sel phys[2])
238 {
239 	struct dpu_hw_blk_reg_map *c = &mdp->hw;
240 	unsigned int intf;
241 	u32 sel = 0;
242 
243 	sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_INTF0, phys[0]);
244 	sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_INTF1, phys[1]);
245 
246 	for (intf = 0; intf < 2; intf++) {
247 		switch (phys[intf]) {
248 		case DPU_DP_PHY_0:
249 			sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY0, intf + 1);
250 			break;
251 		case DPU_DP_PHY_1:
252 			sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY1, intf + 1);
253 			break;
254 		case DPU_DP_PHY_2:
255 			sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY2, intf + 1);
256 			break;
257 		default:
258 			/* ignore */
259 			break;
260 		}
261 	}
262 
263 	DPU_REG_WRITE(c, MDP_DP_PHY_INTF_SEL, sel);
264 }
265 
_setup_mdp_ops(struct dpu_hw_mdp_ops * ops,unsigned long cap,const struct dpu_mdss_version * mdss_rev)266 static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
267 		unsigned long cap, const struct dpu_mdss_version *mdss_rev)
268 {
269 	ops->setup_split_pipe = dpu_hw_setup_split_pipe;
270 	ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
271 	ops->get_danger_status = dpu_hw_get_danger_status;
272 
273 	if (cap & BIT(DPU_MDP_VSYNC_SEL))
274 		ops->setup_vsync_source = dpu_hw_setup_vsync_sel;
275 	else
276 		ops->setup_vsync_source = dpu_hw_setup_wd_timer;
277 
278 	ops->get_safe_status = dpu_hw_get_safe_status;
279 
280 	if (mdss_rev->core_major_ver >= 5)
281 		ops->dp_phy_intf_sel = dpu_hw_dp_phy_intf_sel;
282 
283 	if (cap & BIT(DPU_MDP_AUDIO_SELECT))
284 		ops->intf_audio_select = dpu_hw_intf_audio_select;
285 }
286 
287 /**
288  * dpu_hw_mdptop_init - initializes the top driver for the passed config
289  * @dev:  Corresponding device for devres management
290  * @cfg:  MDP TOP configuration from catalog
291  * @addr: Mapped register io address of MDP
292  * @mdss_rev: dpu core's major and minor versions
293  */
dpu_hw_mdptop_init(struct drm_device * dev,const struct dpu_mdp_cfg * cfg,void __iomem * addr,const struct dpu_mdss_version * mdss_rev)294 struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
295 				      const struct dpu_mdp_cfg *cfg,
296 				      void __iomem *addr,
297 				      const struct dpu_mdss_version *mdss_rev)
298 {
299 	struct dpu_hw_mdp *mdp;
300 
301 	if (!addr)
302 		return ERR_PTR(-EINVAL);
303 
304 	mdp = drmm_kzalloc(dev, sizeof(*mdp), GFP_KERNEL);
305 	if (!mdp)
306 		return ERR_PTR(-ENOMEM);
307 
308 	mdp->hw.blk_addr = addr + cfg->base;
309 	mdp->hw.log_mask = DPU_DBG_MASK_TOP;
310 
311 	/*
312 	 * Assign ops
313 	 */
314 	mdp->caps = cfg;
315 	_setup_mdp_ops(&mdp->ops, mdp->caps->features, mdss_rev);
316 
317 	return mdp;
318 }
319