xref: /linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <dt-bindings/clock/qcom,dsi-phy-28nm.h>
7 #include <linux/clk.h>
8 #include <linux/clk-provider.h>
9 
10 #include "dsi_phy.h"
11 #include "dsi.xml.h"
12 #include "dsi_phy_28nm.xml.h"
13 
14 /*
15  * DSI PLL 28nm - clock diagram (eg: DSI0):
16  *
17  *         dsi0analog_postdiv_clk
18  *                             |         dsi0indirect_path_div2_clk
19  *                             |          |
20  *                   +------+  |  +----+  |  |\   dsi0byte_mux
21  *  dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \   |
22  *                |  +------+     +----+     | m|  |  +----+
23  *                |                          | u|--o--| /4 |-- dsi0pllbyte
24  *                |                          | x|     +----+
25  *                o--------------------------| /
26  *                |                          |/
27  *                |          +------+
28  *                o----------| DIV3 |------------------------- dsi0pll
29  *                           +------+
30  */
31 
32 #define POLL_MAX_READS			10
33 #define POLL_TIMEOUT_US		50
34 
35 #define VCO_REF_CLK_RATE		19200000
36 #define VCO_MIN_RATE			350000000
37 #define VCO_MAX_RATE			750000000
38 
39 /* v2.0.0 28nm LP implementation */
40 #define DSI_PHY_28NM_QUIRK_PHY_LP	BIT(0)
41 #define DSI_PHY_28NM_QUIRK_PHY_8226	BIT(1)
42 
43 #define LPFR_LUT_SIZE			10
44 struct lpfr_cfg {
45 	unsigned long vco_rate;
46 	u32 resistance;
47 };
48 
49 /* Loop filter resistance: */
50 static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
51 	{ 479500000,  8 },
52 	{ 480000000, 11 },
53 	{ 575500000,  8 },
54 	{ 576000000, 12 },
55 	{ 610500000,  8 },
56 	{ 659500000,  9 },
57 	{ 671500000, 10 },
58 	{ 672000000, 14 },
59 	{ 708500000, 10 },
60 	{ 750000000, 11 },
61 };
62 
63 struct pll_28nm_cached_state {
64 	unsigned long vco_rate;
65 	u8 postdiv3;
66 	u8 postdiv1;
67 	u8 byte_mux;
68 };
69 
70 struct dsi_pll_28nm {
71 	struct clk_hw clk_hw;
72 
73 	struct msm_dsi_phy *phy;
74 
75 	struct pll_28nm_cached_state cached_state;
76 };
77 
78 #define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, clk_hw)
79 
80 static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
81 				u32 nb_tries, u32 timeout_us)
82 {
83 	bool pll_locked = false;
84 	u32 val;
85 
86 	while (nb_tries--) {
87 		val = readl(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_STATUS);
88 		pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
89 
90 		if (pll_locked)
91 			break;
92 
93 		udelay(timeout_us);
94 	}
95 	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
96 
97 	return pll_locked;
98 }
99 
100 static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
101 {
102 	void __iomem *base = pll_28nm->phy->pll_base;
103 
104 	/*
105 	 * Add HW recommended delays after toggling the software
106 	 * reset bit off and back on.
107 	 */
108 	writel(DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, base + REG_DSI_28nm_PHY_PLL_TEST_CFG);
109 	udelay(1);
110 	writel(0, base + REG_DSI_28nm_PHY_PLL_TEST_CFG);
111 	udelay(1);
112 }
113 
114 /*
115  * Clock Callbacks
116  */
117 static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
118 		unsigned long parent_rate)
119 {
120 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
121 	struct device *dev = &pll_28nm->phy->pdev->dev;
122 	void __iomem *base = pll_28nm->phy->pll_base;
123 	unsigned long div_fbx1000, gen_vco_clk;
124 	u32 refclk_cfg, frac_n_mode, frac_n_value;
125 	u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
126 	u32 cal_cfg10, cal_cfg11;
127 	u32 rem;
128 	int i;
129 
130 	VERB("rate=%lu, parent's=%lu", rate, parent_rate);
131 
132 	/* Force postdiv2 to be div-4 */
133 	writel(3, base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG);
134 
135 	/* Configure the Loop filter resistance */
136 	for (i = 0; i < LPFR_LUT_SIZE; i++)
137 		if (rate <= lpfr_lut[i].vco_rate)
138 			break;
139 	if (i == LPFR_LUT_SIZE) {
140 		DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
141 				rate);
142 		return -EINVAL;
143 	}
144 	writel(lpfr_lut[i].resistance, base + REG_DSI_28nm_PHY_PLL_LPFR_CFG);
145 
146 	/* Loop filter capacitance values : c1 and c2 */
147 	writel(0x70, base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG);
148 	writel(0x15, base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG);
149 
150 	rem = rate % VCO_REF_CLK_RATE;
151 	if (rem) {
152 		refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
153 		frac_n_mode = 1;
154 		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
155 		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
156 	} else {
157 		refclk_cfg = 0x0;
158 		frac_n_mode = 0;
159 		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
160 		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
161 	}
162 
163 	DBG("refclk_cfg = %d", refclk_cfg);
164 
165 	rem = div_fbx1000 % 1000;
166 	frac_n_value = (rem << 16) / 1000;
167 
168 	DBG("div_fb = %lu", div_fbx1000);
169 	DBG("frac_n_value = %d", frac_n_value);
170 
171 	DBG("Generated VCO Clock: %lu", gen_vco_clk);
172 	rem = 0;
173 	sdm_cfg1 = readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
174 	sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
175 	if (frac_n_mode) {
176 		sdm_cfg0 = 0x0;
177 		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
178 		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
179 				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
180 		sdm_cfg3 = frac_n_value >> 8;
181 		sdm_cfg2 = frac_n_value & 0xff;
182 	} else {
183 		sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
184 		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
185 				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
186 		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
187 		sdm_cfg2 = 0;
188 		sdm_cfg3 = 0;
189 	}
190 
191 	DBG("sdm_cfg0=%d", sdm_cfg0);
192 	DBG("sdm_cfg1=%d", sdm_cfg1);
193 	DBG("sdm_cfg2=%d", sdm_cfg2);
194 	DBG("sdm_cfg3=%d", sdm_cfg3);
195 
196 	cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
197 	cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
198 	DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
199 
200 	writel(0x02, base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG);
201 	writel(0x2b, base + REG_DSI_28nm_PHY_PLL_CAL_CFG3);
202 	writel(0x06, base + REG_DSI_28nm_PHY_PLL_CAL_CFG4);
203 	writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
204 
205 	writel(sdm_cfg1, base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
206 	writel(DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2),
207 	       base + REG_DSI_28nm_PHY_PLL_SDM_CFG2);
208 	writel(DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3),
209 	       base + REG_DSI_28nm_PHY_PLL_SDM_CFG3);
210 	writel(0, base + REG_DSI_28nm_PHY_PLL_SDM_CFG4);
211 
212 	/* Add hardware recommended delay for correct PLL configuration */
213 	if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
214 		udelay(1000);
215 	else
216 		udelay(1);
217 
218 	writel(refclk_cfg, base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG);
219 	writel(0x00, base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG);
220 	writel(0x31, base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG);
221 	writel(sdm_cfg0, base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
222 	writel(0x12, base + REG_DSI_28nm_PHY_PLL_CAL_CFG0);
223 	writel(0x30, base + REG_DSI_28nm_PHY_PLL_CAL_CFG6);
224 	writel(0x00, base + REG_DSI_28nm_PHY_PLL_CAL_CFG7);
225 	writel(0x60, base + REG_DSI_28nm_PHY_PLL_CAL_CFG8);
226 	writel(0x00, base + REG_DSI_28nm_PHY_PLL_CAL_CFG9);
227 	writel(cal_cfg10 & 0xff, base + REG_DSI_28nm_PHY_PLL_CAL_CFG10);
228 	writel(cal_cfg11 & 0xff, base + REG_DSI_28nm_PHY_PLL_CAL_CFG11);
229 	writel(0x20, base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG);
230 
231 	return 0;
232 }
233 
234 static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
235 {
236 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
237 
238 	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
239 					POLL_TIMEOUT_US);
240 }
241 
242 static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
243 		unsigned long parent_rate)
244 {
245 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
246 	void __iomem *base = pll_28nm->phy->pll_base;
247 	u32 sdm0, doubler, sdm_byp_div;
248 	u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
249 	u32 ref_clk = VCO_REF_CLK_RATE;
250 	unsigned long vco_rate;
251 
252 	VERB("parent_rate=%lu", parent_rate);
253 
254 	/* Check to see if the ref clk doubler is enabled */
255 	doubler = readl(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
256 			DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
257 	ref_clk += (doubler * VCO_REF_CLK_RATE);
258 
259 	/* see if it is integer mode or sdm mode */
260 	sdm0 = readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
261 	if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
262 		/* integer mode */
263 		sdm_byp_div = FIELD(
264 				readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
265 				DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
266 		vco_rate = ref_clk * sdm_byp_div;
267 	} else {
268 		/* sdm mode */
269 		sdm_dc_off = FIELD(
270 				readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
271 				DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
272 		DBG("sdm_dc_off = %d", sdm_dc_off);
273 		sdm2 = FIELD(readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
274 				DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
275 		sdm3 = FIELD(readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
276 				DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
277 		sdm_freq_seed = (sdm3 << 8) | sdm2;
278 		DBG("sdm_freq_seed = %d", sdm_freq_seed);
279 
280 		vco_rate = (ref_clk * (sdm_dc_off + 1)) +
281 			mult_frac(ref_clk, sdm_freq_seed, BIT(16));
282 		DBG("vco rate = %lu", vco_rate);
283 	}
284 
285 	DBG("returning vco rate = %lu", vco_rate);
286 
287 	return vco_rate;
288 }
289 
290 static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm)
291 {
292 	struct device *dev = &pll_28nm->phy->pdev->dev;
293 	void __iomem *base = pll_28nm->phy->pll_base;
294 	u32 max_reads = 5, timeout_us = 100;
295 	bool locked;
296 	u32 val;
297 	int i;
298 
299 	DBG("id=%d", pll_28nm->phy->id);
300 
301 	pll_28nm_software_reset(pll_28nm);
302 
303 	/*
304 	 * PLL power up sequence.
305 	 * Add necessary delays recommended by hardware.
306 	 */
307 	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
308 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
309 	udelay(1);
310 
311 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
312 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
313 	udelay(200);
314 
315 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
316 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
317 	udelay(500);
318 
319 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
320 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
321 	udelay(600);
322 
323 	for (i = 0; i < 2; i++) {
324 		/* DSI Uniphy lock detect setting */
325 		writel(0x0c, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
326 		udelay(100);
327 		writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
328 
329 		/* poll for PLL ready status */
330 		locked = pll_28nm_poll_for_ready(pll_28nm, max_reads,
331 						 timeout_us);
332 		if (locked)
333 			break;
334 
335 		pll_28nm_software_reset(pll_28nm);
336 
337 		/*
338 		 * PLL power up sequence.
339 		 * Add necessary delays recommended by hardware.
340 		 */
341 		val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
342 		writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
343 		udelay(1);
344 
345 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
346 		writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
347 		udelay(200);
348 
349 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
350 		writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
351 		udelay(250);
352 
353 		val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
354 		writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
355 		udelay(200);
356 
357 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
358 		writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
359 		udelay(500);
360 
361 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
362 		writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
363 		udelay(600);
364 	}
365 
366 	if (unlikely(!locked))
367 		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
368 	else
369 		DBG("DSI PLL Lock success");
370 
371 	return locked ? 0 : -EINVAL;
372 }
373 
374 static int dsi_pll_28nm_vco_prepare_hpm(struct clk_hw *hw)
375 {
376 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
377 	int i, ret;
378 
379 	if (unlikely(pll_28nm->phy->pll_on))
380 		return 0;
381 
382 	for (i = 0; i < 3; i++) {
383 		ret = _dsi_pll_28nm_vco_prepare_hpm(pll_28nm);
384 		if (!ret) {
385 			pll_28nm->phy->pll_on = true;
386 			return 0;
387 		}
388 	}
389 
390 	return ret;
391 }
392 
393 static int dsi_pll_28nm_vco_prepare_8226(struct clk_hw *hw)
394 {
395 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
396 	struct device *dev = &pll_28nm->phy->pdev->dev;
397 	void __iomem *base = pll_28nm->phy->pll_base;
398 	u32 max_reads = 5, timeout_us = 100;
399 	bool locked;
400 	u32 val;
401 	int i;
402 
403 	DBG("id=%d", pll_28nm->phy->id);
404 
405 	pll_28nm_software_reset(pll_28nm);
406 
407 	/*
408 	 * PLL power up sequence.
409 	 * Add necessary delays recommended by hardware.
410 	 */
411 	writel(0x34, base + REG_DSI_28nm_PHY_PLL_CAL_CFG1);
412 
413 	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
414 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
415 	udelay(200);
416 
417 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
418 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
419 	udelay(200);
420 
421 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
422 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
423 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
424 	udelay(600);
425 
426 	for (i = 0; i < 7; i++) {
427 		/* DSI Uniphy lock detect setting */
428 		writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
429 		writel(0x0c, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
430 		udelay(100);
431 		writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
432 
433 		/* poll for PLL ready status */
434 		locked = pll_28nm_poll_for_ready(pll_28nm,
435 						max_reads, timeout_us);
436 		if (locked)
437 			break;
438 
439 		pll_28nm_software_reset(pll_28nm);
440 
441 		/*
442 		 * PLL power up sequence.
443 		 * Add necessary delays recommended by hardware.
444 		 */
445 		writel(0x00, base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG);
446 		udelay(50);
447 
448 		val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
449 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
450 		writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
451 		udelay(100);
452 
453 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
454 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
455 		writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
456 		udelay(600);
457 	}
458 
459 	if (unlikely(!locked))
460 		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
461 	else
462 		DBG("DSI PLL Lock success");
463 
464 	return locked ? 0 : -EINVAL;
465 }
466 
467 static int dsi_pll_28nm_vco_prepare_lp(struct clk_hw *hw)
468 {
469 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
470 	struct device *dev = &pll_28nm->phy->pdev->dev;
471 	void __iomem *base = pll_28nm->phy->pll_base;
472 	bool locked;
473 	u32 max_reads = 10, timeout_us = 50;
474 	u32 val;
475 
476 	DBG("id=%d", pll_28nm->phy->id);
477 
478 	if (unlikely(pll_28nm->phy->pll_on))
479 		return 0;
480 
481 	pll_28nm_software_reset(pll_28nm);
482 
483 	/*
484 	 * PLL power up sequence.
485 	 * Add necessary delays recommended by hardware.
486 	 */
487 	writel(0x34, base + REG_DSI_28nm_PHY_PLL_CAL_CFG1);
488 	ndelay(500);
489 
490 	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
491 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
492 	ndelay(500);
493 
494 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
495 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
496 	ndelay(500);
497 
498 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
499 		DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
500 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
501 	ndelay(500);
502 
503 	/* DSI PLL toggle lock detect setting */
504 	writel(0x04, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
505 	ndelay(500);
506 	writel(0x05, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
507 	udelay(512);
508 
509 	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
510 
511 	if (unlikely(!locked)) {
512 		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
513 		return -EINVAL;
514 	}
515 
516 	DBG("DSI PLL lock success");
517 	pll_28nm->phy->pll_on = true;
518 
519 	return 0;
520 }
521 
522 static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
523 {
524 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
525 
526 	DBG("id=%d", pll_28nm->phy->id);
527 
528 	if (unlikely(!pll_28nm->phy->pll_on))
529 		return;
530 
531 	writel(0, pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
532 
533 	pll_28nm->phy->pll_on = false;
534 }
535 
536 static int dsi_pll_28nm_clk_determine_rate(struct clk_hw *hw,
537 					   struct clk_rate_request *req)
538 {
539 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
540 
541 	req->rate = clamp_t(unsigned long, req->rate,
542 			    pll_28nm->phy->cfg->min_pll_rate,
543 			    pll_28nm->phy->cfg->max_pll_rate);
544 
545 	return 0;
546 }
547 
548 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = {
549 	.determine_rate = dsi_pll_28nm_clk_determine_rate,
550 	.set_rate = dsi_pll_28nm_clk_set_rate,
551 	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
552 	.prepare = dsi_pll_28nm_vco_prepare_hpm,
553 	.unprepare = dsi_pll_28nm_vco_unprepare,
554 	.is_enabled = dsi_pll_28nm_clk_is_enabled,
555 };
556 
557 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
558 	.determine_rate = dsi_pll_28nm_clk_determine_rate,
559 	.set_rate = dsi_pll_28nm_clk_set_rate,
560 	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
561 	.prepare = dsi_pll_28nm_vco_prepare_lp,
562 	.unprepare = dsi_pll_28nm_vco_unprepare,
563 	.is_enabled = dsi_pll_28nm_clk_is_enabled,
564 };
565 
566 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_8226 = {
567 	.determine_rate = dsi_pll_28nm_clk_determine_rate,
568 	.set_rate = dsi_pll_28nm_clk_set_rate,
569 	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
570 	.prepare = dsi_pll_28nm_vco_prepare_8226,
571 	.unprepare = dsi_pll_28nm_vco_unprepare,
572 	.is_enabled = dsi_pll_28nm_clk_is_enabled,
573 };
574 
575 /*
576  * PLL Callbacks
577  */
578 
579 static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
580 {
581 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
582 	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
583 	void __iomem *base = pll_28nm->phy->pll_base;
584 
585 	cached_state->postdiv3 =
586 			readl(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
587 	cached_state->postdiv1 =
588 			readl(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
589 	cached_state->byte_mux = readl(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
590 	if (dsi_pll_28nm_clk_is_enabled(phy->vco_hw))
591 		cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
592 	else
593 		cached_state->vco_rate = 0;
594 }
595 
596 static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
597 {
598 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
599 	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
600 	void __iomem *base = pll_28nm->phy->pll_base;
601 	int ret;
602 
603 	ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw,
604 					cached_state->vco_rate, 0);
605 	if (ret) {
606 		DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
607 			"restore vco rate failed. ret=%d\n", ret);
608 		return ret;
609 	}
610 
611 	writel(cached_state->postdiv3, base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
612 	writel(cached_state->postdiv1, base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
613 	writel(cached_state->byte_mux, base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
614 
615 	return 0;
616 }
617 
618 static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
619 {
620 	char clk_name[32];
621 	struct clk_init_data vco_init = {
622 		.parent_data = &(const struct clk_parent_data) {
623 			.fw_name = "ref", .name = "xo",
624 		},
625 		.num_parents = 1,
626 		.name = clk_name,
627 		.flags = CLK_IGNORE_UNUSED,
628 	};
629 	struct device *dev = &pll_28nm->phy->pdev->dev;
630 	struct clk_hw *hw, *analog_postdiv, *indirect_path_div2, *byte_mux;
631 	int ret;
632 
633 	DBG("%d", pll_28nm->phy->id);
634 
635 	if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
636 		vco_init.ops = &clk_ops_dsi_pll_28nm_vco_lp;
637 	else if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_8226)
638 		vco_init.ops = &clk_ops_dsi_pll_28nm_vco_8226;
639 	else
640 		vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm;
641 
642 	snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
643 	pll_28nm->clk_hw.init = &vco_init;
644 	ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
645 	if (ret)
646 		return ret;
647 
648 	snprintf(clk_name, sizeof(clk_name), "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
649 	analog_postdiv = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
650 			&pll_28nm->clk_hw, CLK_SET_RATE_PARENT,
651 			pll_28nm->phy->pll_base +
652 				REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
653 			0, 4, 0, NULL);
654 	if (IS_ERR(analog_postdiv))
655 		return PTR_ERR(analog_postdiv);
656 
657 	snprintf(clk_name, sizeof(clk_name), "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
658 	indirect_path_div2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
659 			clk_name, analog_postdiv, CLK_SET_RATE_PARENT, 1, 2);
660 	if (IS_ERR(indirect_path_div2))
661 		return PTR_ERR(indirect_path_div2);
662 
663 	snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id);
664 	hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
665 			&pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
666 				REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
667 			0, 8, 0, NULL);
668 	if (IS_ERR(hw))
669 		return PTR_ERR(hw);
670 	provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
671 
672 	snprintf(clk_name, sizeof(clk_name), "dsi%dbyte_mux", pll_28nm->phy->id);
673 	byte_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
674 			((const struct clk_hw *[]){
675 				&pll_28nm->clk_hw,
676 				indirect_path_div2,
677 			}), 2, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base +
678 				REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
679 	if (IS_ERR(byte_mux))
680 		return PTR_ERR(byte_mux);
681 
682 	snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id);
683 	hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
684 			byte_mux, CLK_SET_RATE_PARENT, 1, 4);
685 	if (IS_ERR(hw))
686 		return PTR_ERR(hw);
687 	provided_clocks[DSI_BYTE_PLL_CLK] = hw;
688 
689 	return 0;
690 }
691 
692 static int dsi_pll_28nm_init(struct msm_dsi_phy *phy)
693 {
694 	struct platform_device *pdev = phy->pdev;
695 	struct dsi_pll_28nm *pll_28nm;
696 	int ret;
697 
698 	if (!pdev)
699 		return -ENODEV;
700 
701 	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
702 	if (!pll_28nm)
703 		return -ENOMEM;
704 
705 	pll_28nm->phy = phy;
706 
707 	ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws);
708 	if (ret) {
709 		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
710 		return ret;
711 	}
712 
713 	phy->vco_hw = &pll_28nm->clk_hw;
714 
715 	return 0;
716 }
717 
718 static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
719 		struct msm_dsi_dphy_timing *timing)
720 {
721 	void __iomem *base = phy->base;
722 
723 	writel(DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero),
724 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_0);
725 	writel(DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail),
726 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_1);
727 	writel(DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare),
728 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_2);
729 	if (timing->clk_zero & BIT(8))
730 		writel(DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8,
731 		       base + REG_DSI_28nm_PHY_TIMING_CTRL_3);
732 	writel(DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit),
733 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_4);
734 	writel(DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero),
735 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_5);
736 	writel(DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare),
737 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_6);
738 	writel(DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail),
739 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_7);
740 	writel(DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst),
741 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_8);
742 	writel(DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
743 	       DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure),
744 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_9);
745 	writel(DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get),
746 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_10);
747 	writel(DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0),
748 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_11);
749 }
750 
751 static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
752 {
753 	void __iomem *base = phy->reg_base;
754 
755 	writel(0x0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0);
756 	writel(1, base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG);
757 	writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5);
758 	writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3);
759 	writel(0x3, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2);
760 	writel(0x9, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1);
761 	writel(0x7, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0);
762 	writel(0x20, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4);
763 	writel(0x00, phy->base + REG_DSI_28nm_PHY_LDO_CNTRL);
764 }
765 
766 static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy)
767 {
768 	void __iomem *base = phy->reg_base;
769 
770 	writel(0x0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0);
771 	writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG);
772 	writel(0x7, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5);
773 	writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3);
774 	writel(0x1, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2);
775 	writel(0x1, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1);
776 	writel(0x20, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4);
777 
778 	if (phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
779 		writel(0x05, phy->base + REG_DSI_28nm_PHY_LDO_CNTRL);
780 	else
781 		writel(0x0d, phy->base + REG_DSI_28nm_PHY_LDO_CNTRL);
782 }
783 
784 static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
785 {
786 	if (!enable) {
787 		writel(0, phy->reg_base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG);
788 		return;
789 	}
790 
791 	if (phy->regulator_ldo_mode)
792 		dsi_28nm_phy_regulator_enable_ldo(phy);
793 	else
794 		dsi_28nm_phy_regulator_enable_dcdc(phy);
795 }
796 
797 static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
798 				struct msm_dsi_phy_clk_request *clk_req)
799 {
800 	struct msm_dsi_dphy_timing *timing = &phy->timing;
801 	int i;
802 	void __iomem *base = phy->base;
803 	u32 val;
804 
805 	DBG("");
806 
807 	if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
808 		DRM_DEV_ERROR(&phy->pdev->dev,
809 			      "%s: D-PHY timing calculation failed\n",
810 			      __func__);
811 		return -EINVAL;
812 	}
813 
814 	writel(0xff, base + REG_DSI_28nm_PHY_STRENGTH_0);
815 
816 	dsi_28nm_phy_regulator_ctrl(phy, true);
817 
818 	dsi_28nm_dphy_set_timing(phy, timing);
819 
820 	writel(0x00, base + REG_DSI_28nm_PHY_CTRL_1);
821 	writel(0x5f, base + REG_DSI_28nm_PHY_CTRL_0);
822 
823 	writel(0x6, base + REG_DSI_28nm_PHY_STRENGTH_1);
824 
825 	for (i = 0; i < 4; i++) {
826 		writel(0, base + REG_DSI_28nm_PHY_LN_CFG_0(i));
827 		writel(0, base + REG_DSI_28nm_PHY_LN_CFG_1(i));
828 		writel(0, base + REG_DSI_28nm_PHY_LN_CFG_2(i));
829 		writel(0, base + REG_DSI_28nm_PHY_LN_CFG_3(i));
830 		writel(0, base + REG_DSI_28nm_PHY_LN_CFG_4(i));
831 		writel(0, base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i));
832 		writel(0, base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i));
833 		writel(0x1, base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i));
834 		writel(0x97, base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i));
835 	}
836 
837 	writel(0, base + REG_DSI_28nm_PHY_LNCK_CFG_4);
838 	writel(0xc0, base + REG_DSI_28nm_PHY_LNCK_CFG_1);
839 	writel(0x1, base + REG_DSI_28nm_PHY_LNCK_TEST_STR0);
840 	writel(0xbb, base + REG_DSI_28nm_PHY_LNCK_TEST_STR1);
841 
842 	writel(0x5f, base + REG_DSI_28nm_PHY_CTRL_0);
843 
844 	val = readl(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL);
845 	if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
846 		val &= ~DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
847 	else
848 		val |= DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
849 	writel(val, base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL);
850 
851 	return 0;
852 }
853 
854 static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
855 {
856 	writel(0, phy->base + REG_DSI_28nm_PHY_CTRL_0);
857 	dsi_28nm_phy_regulator_ctrl(phy, false);
858 
859 	/*
860 	 * Wait for the registers writes to complete in order to
861 	 * ensure that the phy is completely disabled
862 	 */
863 	wmb();
864 }
865 
866 static const struct regulator_bulk_data dsi_phy_28nm_regulators[] = {
867 	{ .supply = "vddio", .init_load_uA = 100000 },
868 };
869 
870 const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
871 	.has_phy_regulator = true,
872 	.regulator_data = dsi_phy_28nm_regulators,
873 	.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
874 	.ops = {
875 		.enable = dsi_28nm_phy_enable,
876 		.disable = dsi_28nm_phy_disable,
877 		.pll_init = dsi_pll_28nm_init,
878 		.save_pll_state = dsi_28nm_pll_save_state,
879 		.restore_pll_state = dsi_28nm_pll_restore_state,
880 	},
881 	.min_pll_rate = VCO_MIN_RATE,
882 	.max_pll_rate = VCO_MAX_RATE,
883 	.io_start = { 0xfd922b00, 0xfd923100 },
884 	.num_dsi_phy = 2,
885 };
886 
887 const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
888 	.has_phy_regulator = true,
889 	.regulator_data = dsi_phy_28nm_regulators,
890 	.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
891 	.ops = {
892 		.enable = dsi_28nm_phy_enable,
893 		.disable = dsi_28nm_phy_disable,
894 		.pll_init = dsi_pll_28nm_init,
895 		.save_pll_state = dsi_28nm_pll_save_state,
896 		.restore_pll_state = dsi_28nm_pll_restore_state,
897 	},
898 	.min_pll_rate = VCO_MIN_RATE,
899 	.max_pll_rate = VCO_MAX_RATE,
900 	.io_start = { 0x1a94400, 0x1a96400 },
901 	.num_dsi_phy = 2,
902 };
903 
904 const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
905 	.has_phy_regulator = true,
906 	.regulator_data = dsi_phy_28nm_regulators,
907 	.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
908 	.ops = {
909 		.enable = dsi_28nm_phy_enable,
910 		.disable = dsi_28nm_phy_disable,
911 		.pll_init = dsi_pll_28nm_init,
912 		.save_pll_state = dsi_28nm_pll_save_state,
913 		.restore_pll_state = dsi_28nm_pll_restore_state,
914 	},
915 	.min_pll_rate = VCO_MIN_RATE,
916 	.max_pll_rate = VCO_MAX_RATE,
917 	.io_start = { 0x1a98500 },
918 	.num_dsi_phy = 1,
919 	.quirks = DSI_PHY_28NM_QUIRK_PHY_LP,
920 };
921 
922 const struct msm_dsi_phy_cfg dsi_phy_28nm_8226_cfgs = {
923 	.has_phy_regulator = true,
924 	.regulator_data = dsi_phy_28nm_regulators,
925 	.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
926 	.ops = {
927 		.enable = dsi_28nm_phy_enable,
928 		.disable = dsi_28nm_phy_disable,
929 		.pll_init = dsi_pll_28nm_init,
930 		.save_pll_state = dsi_28nm_pll_save_state,
931 		.restore_pll_state = dsi_28nm_pll_restore_state,
932 	},
933 	.min_pll_rate = VCO_MIN_RATE,
934 	.max_pll_rate = VCO_MAX_RATE,
935 	.io_start = { 0xfd922b00 },
936 	.num_dsi_phy = 1,
937 	.quirks = DSI_PHY_28NM_QUIRK_PHY_8226,
938 };
939 
940 const struct msm_dsi_phy_cfg dsi_phy_28nm_8937_cfgs = {
941 	.has_phy_regulator = true,
942 	.regulator_data = dsi_phy_28nm_regulators,
943 	.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
944 	.ops = {
945 		.enable = dsi_28nm_phy_enable,
946 		.disable = dsi_28nm_phy_disable,
947 		.pll_init = dsi_pll_28nm_init,
948 		.save_pll_state = dsi_28nm_pll_save_state,
949 		.restore_pll_state = dsi_28nm_pll_restore_state,
950 	},
951 	.min_pll_rate = VCO_MIN_RATE,
952 	.max_pll_rate = VCO_MAX_RATE,
953 	.io_start = { 0x1a94400, 0x1a96400 },
954 	.num_dsi_phy = 2,
955 	.quirks = DSI_PHY_28NM_QUIRK_PHY_LP,
956 };
957