xref: /linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8 
9 #include "dsi_phy.h"
10 #include "dsi.xml.h"
11 #include "dsi_phy_28nm.xml.h"
12 
13 /*
14  * DSI PLL 28nm - clock diagram (eg: DSI0):
15  *
16  *         dsi0analog_postdiv_clk
17  *                             |         dsi0indirect_path_div2_clk
18  *                             |          |
19  *                   +------+  |  +----+  |  |\   dsi0byte_mux
20  *  dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \   |
21  *                |  +------+     +----+     | m|  |  +----+
22  *                |                          | u|--o--| /4 |-- dsi0pllbyte
23  *                |                          | x|     +----+
24  *                o--------------------------| /
25  *                |                          |/
26  *                |          +------+
27  *                o----------| DIV3 |------------------------- dsi0pll
28  *                           +------+
29  */
30 
31 #define POLL_MAX_READS			10
32 #define POLL_TIMEOUT_US		50
33 
34 #define VCO_REF_CLK_RATE		19200000
35 #define VCO_MIN_RATE			350000000
36 #define VCO_MAX_RATE			750000000
37 
38 /* v2.0.0 28nm LP implementation */
39 #define DSI_PHY_28NM_QUIRK_PHY_LP	BIT(0)
40 #define DSI_PHY_28NM_QUIRK_PHY_8226	BIT(1)
41 
42 #define LPFR_LUT_SIZE			10
43 struct lpfr_cfg {
44 	unsigned long vco_rate;
45 	u32 resistance;
46 };
47 
48 /* Loop filter resistance: */
49 static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
50 	{ 479500000,  8 },
51 	{ 480000000, 11 },
52 	{ 575500000,  8 },
53 	{ 576000000, 12 },
54 	{ 610500000,  8 },
55 	{ 659500000,  9 },
56 	{ 671500000, 10 },
57 	{ 672000000, 14 },
58 	{ 708500000, 10 },
59 	{ 750000000, 11 },
60 };
61 
62 struct pll_28nm_cached_state {
63 	unsigned long vco_rate;
64 	u8 postdiv3;
65 	u8 postdiv1;
66 	u8 byte_mux;
67 };
68 
69 struct dsi_pll_28nm {
70 	struct clk_hw clk_hw;
71 
72 	struct msm_dsi_phy *phy;
73 
74 	struct pll_28nm_cached_state cached_state;
75 };
76 
77 #define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, clk_hw)
78 
pll_28nm_poll_for_ready(struct dsi_pll_28nm * pll_28nm,u32 nb_tries,u32 timeout_us)79 static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
80 				u32 nb_tries, u32 timeout_us)
81 {
82 	bool pll_locked = false;
83 	u32 val;
84 
85 	while (nb_tries--) {
86 		val = readl(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_STATUS);
87 		pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
88 
89 		if (pll_locked)
90 			break;
91 
92 		udelay(timeout_us);
93 	}
94 	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
95 
96 	return pll_locked;
97 }
98 
pll_28nm_software_reset(struct dsi_pll_28nm * pll_28nm)99 static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
100 {
101 	void __iomem *base = pll_28nm->phy->pll_base;
102 
103 	/*
104 	 * Add HW recommended delays after toggling the software
105 	 * reset bit off and back on.
106 	 */
107 	writel(DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, base + REG_DSI_28nm_PHY_PLL_TEST_CFG);
108 	udelay(1);
109 	writel(0, base + REG_DSI_28nm_PHY_PLL_TEST_CFG);
110 	udelay(1);
111 }
112 
113 /*
114  * Clock Callbacks
115  */
dsi_pll_28nm_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)116 static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
117 		unsigned long parent_rate)
118 {
119 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
120 	struct device *dev = &pll_28nm->phy->pdev->dev;
121 	void __iomem *base = pll_28nm->phy->pll_base;
122 	unsigned long div_fbx1000, gen_vco_clk;
123 	u32 refclk_cfg, frac_n_mode, frac_n_value;
124 	u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
125 	u32 cal_cfg10, cal_cfg11;
126 	u32 rem;
127 	int i;
128 
129 	VERB("rate=%lu, parent's=%lu", rate, parent_rate);
130 
131 	/* Force postdiv2 to be div-4 */
132 	writel(3, base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG);
133 
134 	/* Configure the Loop filter resistance */
135 	for (i = 0; i < LPFR_LUT_SIZE; i++)
136 		if (rate <= lpfr_lut[i].vco_rate)
137 			break;
138 	if (i == LPFR_LUT_SIZE) {
139 		DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
140 				rate);
141 		return -EINVAL;
142 	}
143 	writel(lpfr_lut[i].resistance, base + REG_DSI_28nm_PHY_PLL_LPFR_CFG);
144 
145 	/* Loop filter capacitance values : c1 and c2 */
146 	writel(0x70, base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG);
147 	writel(0x15, base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG);
148 
149 	rem = rate % VCO_REF_CLK_RATE;
150 	if (rem) {
151 		refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
152 		frac_n_mode = 1;
153 		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
154 		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
155 	} else {
156 		refclk_cfg = 0x0;
157 		frac_n_mode = 0;
158 		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
159 		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
160 	}
161 
162 	DBG("refclk_cfg = %d", refclk_cfg);
163 
164 	rem = div_fbx1000 % 1000;
165 	frac_n_value = (rem << 16) / 1000;
166 
167 	DBG("div_fb = %lu", div_fbx1000);
168 	DBG("frac_n_value = %d", frac_n_value);
169 
170 	DBG("Generated VCO Clock: %lu", gen_vco_clk);
171 	rem = 0;
172 	sdm_cfg1 = readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
173 	sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
174 	if (frac_n_mode) {
175 		sdm_cfg0 = 0x0;
176 		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
177 		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
178 				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
179 		sdm_cfg3 = frac_n_value >> 8;
180 		sdm_cfg2 = frac_n_value & 0xff;
181 	} else {
182 		sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
183 		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
184 				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
185 		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
186 		sdm_cfg2 = 0;
187 		sdm_cfg3 = 0;
188 	}
189 
190 	DBG("sdm_cfg0=%d", sdm_cfg0);
191 	DBG("sdm_cfg1=%d", sdm_cfg1);
192 	DBG("sdm_cfg2=%d", sdm_cfg2);
193 	DBG("sdm_cfg3=%d", sdm_cfg3);
194 
195 	cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
196 	cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
197 	DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
198 
199 	writel(0x02, base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG);
200 	writel(0x2b, base + REG_DSI_28nm_PHY_PLL_CAL_CFG3);
201 	writel(0x06, base + REG_DSI_28nm_PHY_PLL_CAL_CFG4);
202 	writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
203 
204 	writel(sdm_cfg1, base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
205 	writel(DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2),
206 	       base + REG_DSI_28nm_PHY_PLL_SDM_CFG2);
207 	writel(DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3),
208 	       base + REG_DSI_28nm_PHY_PLL_SDM_CFG3);
209 	writel(0, base + REG_DSI_28nm_PHY_PLL_SDM_CFG4);
210 
211 	/* Add hardware recommended delay for correct PLL configuration */
212 	if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
213 		udelay(1000);
214 	else
215 		udelay(1);
216 
217 	writel(refclk_cfg, base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG);
218 	writel(0x00, base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG);
219 	writel(0x31, base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG);
220 	writel(sdm_cfg0, base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
221 	writel(0x12, base + REG_DSI_28nm_PHY_PLL_CAL_CFG0);
222 	writel(0x30, base + REG_DSI_28nm_PHY_PLL_CAL_CFG6);
223 	writel(0x00, base + REG_DSI_28nm_PHY_PLL_CAL_CFG7);
224 	writel(0x60, base + REG_DSI_28nm_PHY_PLL_CAL_CFG8);
225 	writel(0x00, base + REG_DSI_28nm_PHY_PLL_CAL_CFG9);
226 	writel(cal_cfg10 & 0xff, base + REG_DSI_28nm_PHY_PLL_CAL_CFG10);
227 	writel(cal_cfg11 & 0xff, base + REG_DSI_28nm_PHY_PLL_CAL_CFG11);
228 	writel(0x20, base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG);
229 
230 	return 0;
231 }
232 
dsi_pll_28nm_clk_is_enabled(struct clk_hw * hw)233 static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
234 {
235 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
236 
237 	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
238 					POLL_TIMEOUT_US);
239 }
240 
dsi_pll_28nm_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)241 static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
242 		unsigned long parent_rate)
243 {
244 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
245 	void __iomem *base = pll_28nm->phy->pll_base;
246 	u32 sdm0, doubler, sdm_byp_div;
247 	u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
248 	u32 ref_clk = VCO_REF_CLK_RATE;
249 	unsigned long vco_rate;
250 
251 	VERB("parent_rate=%lu", parent_rate);
252 
253 	/* Check to see if the ref clk doubler is enabled */
254 	doubler = readl(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
255 			DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
256 	ref_clk += (doubler * VCO_REF_CLK_RATE);
257 
258 	/* see if it is integer mode or sdm mode */
259 	sdm0 = readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
260 	if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
261 		/* integer mode */
262 		sdm_byp_div = FIELD(
263 				readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
264 				DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
265 		vco_rate = ref_clk * sdm_byp_div;
266 	} else {
267 		/* sdm mode */
268 		sdm_dc_off = FIELD(
269 				readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
270 				DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
271 		DBG("sdm_dc_off = %d", sdm_dc_off);
272 		sdm2 = FIELD(readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
273 				DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
274 		sdm3 = FIELD(readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
275 				DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
276 		sdm_freq_seed = (sdm3 << 8) | sdm2;
277 		DBG("sdm_freq_seed = %d", sdm_freq_seed);
278 
279 		vco_rate = (ref_clk * (sdm_dc_off + 1)) +
280 			mult_frac(ref_clk, sdm_freq_seed, BIT(16));
281 		DBG("vco rate = %lu", vco_rate);
282 	}
283 
284 	DBG("returning vco rate = %lu", vco_rate);
285 
286 	return vco_rate;
287 }
288 
_dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm * pll_28nm)289 static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm)
290 {
291 	struct device *dev = &pll_28nm->phy->pdev->dev;
292 	void __iomem *base = pll_28nm->phy->pll_base;
293 	u32 max_reads = 5, timeout_us = 100;
294 	bool locked;
295 	u32 val;
296 	int i;
297 
298 	DBG("id=%d", pll_28nm->phy->id);
299 
300 	pll_28nm_software_reset(pll_28nm);
301 
302 	/*
303 	 * PLL power up sequence.
304 	 * Add necessary delays recommended by hardware.
305 	 */
306 	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
307 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
308 	udelay(1);
309 
310 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
311 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
312 	udelay(200);
313 
314 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
315 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
316 	udelay(500);
317 
318 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
319 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
320 	udelay(600);
321 
322 	for (i = 0; i < 2; i++) {
323 		/* DSI Uniphy lock detect setting */
324 		writel(0x0c, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
325 		udelay(100);
326 		writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
327 
328 		/* poll for PLL ready status */
329 		locked = pll_28nm_poll_for_ready(pll_28nm, max_reads,
330 						 timeout_us);
331 		if (locked)
332 			break;
333 
334 		pll_28nm_software_reset(pll_28nm);
335 
336 		/*
337 		 * PLL power up sequence.
338 		 * Add necessary delays recommended by hardware.
339 		 */
340 		val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
341 		writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
342 		udelay(1);
343 
344 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
345 		writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
346 		udelay(200);
347 
348 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
349 		writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
350 		udelay(250);
351 
352 		val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
353 		writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
354 		udelay(200);
355 
356 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
357 		writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
358 		udelay(500);
359 
360 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
361 		writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
362 		udelay(600);
363 	}
364 
365 	if (unlikely(!locked))
366 		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
367 	else
368 		DBG("DSI PLL Lock success");
369 
370 	return locked ? 0 : -EINVAL;
371 }
372 
dsi_pll_28nm_vco_prepare_hpm(struct clk_hw * hw)373 static int dsi_pll_28nm_vco_prepare_hpm(struct clk_hw *hw)
374 {
375 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
376 	int i, ret;
377 
378 	if (unlikely(pll_28nm->phy->pll_on))
379 		return 0;
380 
381 	for (i = 0; i < 3; i++) {
382 		ret = _dsi_pll_28nm_vco_prepare_hpm(pll_28nm);
383 		if (!ret) {
384 			pll_28nm->phy->pll_on = true;
385 			return 0;
386 		}
387 	}
388 
389 	return ret;
390 }
391 
dsi_pll_28nm_vco_prepare_8226(struct clk_hw * hw)392 static int dsi_pll_28nm_vco_prepare_8226(struct clk_hw *hw)
393 {
394 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
395 	struct device *dev = &pll_28nm->phy->pdev->dev;
396 	void __iomem *base = pll_28nm->phy->pll_base;
397 	u32 max_reads = 5, timeout_us = 100;
398 	bool locked;
399 	u32 val;
400 	int i;
401 
402 	DBG("id=%d", pll_28nm->phy->id);
403 
404 	pll_28nm_software_reset(pll_28nm);
405 
406 	/*
407 	 * PLL power up sequence.
408 	 * Add necessary delays recommended by hardware.
409 	 */
410 	writel(0x34, base + REG_DSI_28nm_PHY_PLL_CAL_CFG1);
411 
412 	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
413 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
414 	udelay(200);
415 
416 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
417 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
418 	udelay(200);
419 
420 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
421 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
422 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
423 	udelay(600);
424 
425 	for (i = 0; i < 7; i++) {
426 		/* DSI Uniphy lock detect setting */
427 		writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
428 		writel(0x0c, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
429 		udelay(100);
430 		writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
431 
432 		/* poll for PLL ready status */
433 		locked = pll_28nm_poll_for_ready(pll_28nm,
434 						max_reads, timeout_us);
435 		if (locked)
436 			break;
437 
438 		pll_28nm_software_reset(pll_28nm);
439 
440 		/*
441 		 * PLL power up sequence.
442 		 * Add necessary delays recommended by hardware.
443 		 */
444 		writel(0x00, base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG);
445 		udelay(50);
446 
447 		val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
448 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
449 		writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
450 		udelay(100);
451 
452 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
453 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
454 		writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
455 		udelay(600);
456 	}
457 
458 	if (unlikely(!locked))
459 		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
460 	else
461 		DBG("DSI PLL Lock success");
462 
463 	return locked ? 0 : -EINVAL;
464 }
465 
dsi_pll_28nm_vco_prepare_lp(struct clk_hw * hw)466 static int dsi_pll_28nm_vco_prepare_lp(struct clk_hw *hw)
467 {
468 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
469 	struct device *dev = &pll_28nm->phy->pdev->dev;
470 	void __iomem *base = pll_28nm->phy->pll_base;
471 	bool locked;
472 	u32 max_reads = 10, timeout_us = 50;
473 	u32 val;
474 
475 	DBG("id=%d", pll_28nm->phy->id);
476 
477 	if (unlikely(pll_28nm->phy->pll_on))
478 		return 0;
479 
480 	pll_28nm_software_reset(pll_28nm);
481 
482 	/*
483 	 * PLL power up sequence.
484 	 * Add necessary delays recommended by hardware.
485 	 */
486 	writel(0x34, base + REG_DSI_28nm_PHY_PLL_CAL_CFG1);
487 	ndelay(500);
488 
489 	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
490 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
491 	ndelay(500);
492 
493 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
494 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
495 	ndelay(500);
496 
497 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
498 		DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
499 	writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
500 	ndelay(500);
501 
502 	/* DSI PLL toggle lock detect setting */
503 	writel(0x04, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
504 	ndelay(500);
505 	writel(0x05, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
506 	udelay(512);
507 
508 	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
509 
510 	if (unlikely(!locked)) {
511 		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
512 		return -EINVAL;
513 	}
514 
515 	DBG("DSI PLL lock success");
516 	pll_28nm->phy->pll_on = true;
517 
518 	return 0;
519 }
520 
dsi_pll_28nm_vco_unprepare(struct clk_hw * hw)521 static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
522 {
523 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
524 
525 	DBG("id=%d", pll_28nm->phy->id);
526 
527 	if (unlikely(!pll_28nm->phy->pll_on))
528 		return;
529 
530 	writel(0, pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
531 
532 	pll_28nm->phy->pll_on = false;
533 }
534 
dsi_pll_28nm_clk_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)535 static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
536 		unsigned long rate, unsigned long *parent_rate)
537 {
538 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
539 
540 	if      (rate < pll_28nm->phy->cfg->min_pll_rate)
541 		return  pll_28nm->phy->cfg->min_pll_rate;
542 	else if (rate > pll_28nm->phy->cfg->max_pll_rate)
543 		return  pll_28nm->phy->cfg->max_pll_rate;
544 	else
545 		return rate;
546 }
547 
548 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = {
549 	.round_rate = dsi_pll_28nm_clk_round_rate,
550 	.set_rate = dsi_pll_28nm_clk_set_rate,
551 	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
552 	.prepare = dsi_pll_28nm_vco_prepare_hpm,
553 	.unprepare = dsi_pll_28nm_vco_unprepare,
554 	.is_enabled = dsi_pll_28nm_clk_is_enabled,
555 };
556 
557 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
558 	.round_rate = dsi_pll_28nm_clk_round_rate,
559 	.set_rate = dsi_pll_28nm_clk_set_rate,
560 	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
561 	.prepare = dsi_pll_28nm_vco_prepare_lp,
562 	.unprepare = dsi_pll_28nm_vco_unprepare,
563 	.is_enabled = dsi_pll_28nm_clk_is_enabled,
564 };
565 
566 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_8226 = {
567 	.round_rate = dsi_pll_28nm_clk_round_rate,
568 	.set_rate = dsi_pll_28nm_clk_set_rate,
569 	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
570 	.prepare = dsi_pll_28nm_vco_prepare_8226,
571 	.unprepare = dsi_pll_28nm_vco_unprepare,
572 	.is_enabled = dsi_pll_28nm_clk_is_enabled,
573 };
574 
575 /*
576  * PLL Callbacks
577  */
578 
dsi_28nm_pll_save_state(struct msm_dsi_phy * phy)579 static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
580 {
581 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
582 	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
583 	void __iomem *base = pll_28nm->phy->pll_base;
584 
585 	cached_state->postdiv3 =
586 			readl(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
587 	cached_state->postdiv1 =
588 			readl(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
589 	cached_state->byte_mux = readl(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
590 	if (dsi_pll_28nm_clk_is_enabled(phy->vco_hw))
591 		cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
592 	else
593 		cached_state->vco_rate = 0;
594 }
595 
dsi_28nm_pll_restore_state(struct msm_dsi_phy * phy)596 static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
597 {
598 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
599 	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
600 	void __iomem *base = pll_28nm->phy->pll_base;
601 	int ret;
602 
603 	ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw,
604 					cached_state->vco_rate, 0);
605 	if (ret) {
606 		DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
607 			"restore vco rate failed. ret=%d\n", ret);
608 		return ret;
609 	}
610 
611 	writel(cached_state->postdiv3, base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
612 	writel(cached_state->postdiv1, base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
613 	writel(cached_state->byte_mux, base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
614 
615 	return 0;
616 }
617 
pll_28nm_register(struct dsi_pll_28nm * pll_28nm,struct clk_hw ** provided_clocks)618 static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
619 {
620 	char clk_name[32];
621 	struct clk_init_data vco_init = {
622 		.parent_data = &(const struct clk_parent_data) {
623 			.fw_name = "ref", .name = "xo",
624 		},
625 		.num_parents = 1,
626 		.name = clk_name,
627 		.flags = CLK_IGNORE_UNUSED,
628 	};
629 	struct device *dev = &pll_28nm->phy->pdev->dev;
630 	struct clk_hw *hw, *analog_postdiv, *indirect_path_div2, *byte_mux;
631 	int ret;
632 
633 	DBG("%d", pll_28nm->phy->id);
634 
635 	if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
636 		vco_init.ops = &clk_ops_dsi_pll_28nm_vco_lp;
637 	else if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_8226)
638 		vco_init.ops = &clk_ops_dsi_pll_28nm_vco_8226;
639 	else
640 		vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm;
641 
642 	snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
643 	pll_28nm->clk_hw.init = &vco_init;
644 	ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
645 	if (ret)
646 		return ret;
647 
648 	snprintf(clk_name, sizeof(clk_name), "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
649 	analog_postdiv = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
650 			&pll_28nm->clk_hw, CLK_SET_RATE_PARENT,
651 			pll_28nm->phy->pll_base +
652 				REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
653 			0, 4, 0, NULL);
654 	if (IS_ERR(analog_postdiv))
655 		return PTR_ERR(analog_postdiv);
656 
657 	snprintf(clk_name, sizeof(clk_name), "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
658 	indirect_path_div2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
659 			clk_name, analog_postdiv, CLK_SET_RATE_PARENT, 1, 2);
660 	if (IS_ERR(indirect_path_div2))
661 		return PTR_ERR(indirect_path_div2);
662 
663 	snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id);
664 	hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
665 			&pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
666 				REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
667 			0, 8, 0, NULL);
668 	if (IS_ERR(hw))
669 		return PTR_ERR(hw);
670 	provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
671 
672 	snprintf(clk_name, sizeof(clk_name), "dsi%dbyte_mux", pll_28nm->phy->id);
673 	byte_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
674 			((const struct clk_hw *[]){
675 				&pll_28nm->clk_hw,
676 				indirect_path_div2,
677 			}), 2, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base +
678 				REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
679 	if (IS_ERR(byte_mux))
680 		return PTR_ERR(byte_mux);
681 
682 	snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id);
683 	hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
684 			byte_mux, CLK_SET_RATE_PARENT, 1, 4);
685 	if (IS_ERR(hw))
686 		return PTR_ERR(hw);
687 	provided_clocks[DSI_BYTE_PLL_CLK] = hw;
688 
689 	return 0;
690 }
691 
dsi_pll_28nm_init(struct msm_dsi_phy * phy)692 static int dsi_pll_28nm_init(struct msm_dsi_phy *phy)
693 {
694 	struct platform_device *pdev = phy->pdev;
695 	struct dsi_pll_28nm *pll_28nm;
696 	int ret;
697 
698 	if (!pdev)
699 		return -ENODEV;
700 
701 	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
702 	if (!pll_28nm)
703 		return -ENOMEM;
704 
705 	pll_28nm->phy = phy;
706 
707 	ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws);
708 	if (ret) {
709 		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
710 		return ret;
711 	}
712 
713 	phy->vco_hw = &pll_28nm->clk_hw;
714 
715 	return 0;
716 }
717 
dsi_28nm_dphy_set_timing(struct msm_dsi_phy * phy,struct msm_dsi_dphy_timing * timing)718 static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
719 		struct msm_dsi_dphy_timing *timing)
720 {
721 	void __iomem *base = phy->base;
722 
723 	writel(DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero),
724 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_0);
725 	writel(DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail),
726 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_1);
727 	writel(DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare),
728 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_2);
729 	if (timing->clk_zero & BIT(8))
730 		writel(DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8,
731 		       base + REG_DSI_28nm_PHY_TIMING_CTRL_3);
732 	writel(DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit),
733 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_4);
734 	writel(DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero),
735 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_5);
736 	writel(DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare),
737 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_6);
738 	writel(DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail),
739 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_7);
740 	writel(DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst),
741 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_8);
742 	writel(DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
743 	       DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure),
744 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_9);
745 	writel(DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get),
746 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_10);
747 	writel(DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0),
748 	       base + REG_DSI_28nm_PHY_TIMING_CTRL_11);
749 }
750 
dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy * phy)751 static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
752 {
753 	void __iomem *base = phy->reg_base;
754 
755 	writel(0x0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0);
756 	writel(1, base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG);
757 	writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5);
758 	writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3);
759 	writel(0x3, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2);
760 	writel(0x9, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1);
761 	writel(0x7, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0);
762 	writel(0x20, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4);
763 	writel(0x00, phy->base + REG_DSI_28nm_PHY_LDO_CNTRL);
764 }
765 
dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy * phy)766 static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy)
767 {
768 	void __iomem *base = phy->reg_base;
769 
770 	writel(0x0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0);
771 	writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG);
772 	writel(0x7, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5);
773 	writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3);
774 	writel(0x1, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2);
775 	writel(0x1, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1);
776 	writel(0x20, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4);
777 
778 	if (phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
779 		writel(0x05, phy->base + REG_DSI_28nm_PHY_LDO_CNTRL);
780 	else
781 		writel(0x0d, phy->base + REG_DSI_28nm_PHY_LDO_CNTRL);
782 }
783 
dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy * phy,bool enable)784 static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
785 {
786 	if (!enable) {
787 		writel(0, phy->reg_base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG);
788 		return;
789 	}
790 
791 	if (phy->regulator_ldo_mode)
792 		dsi_28nm_phy_regulator_enable_ldo(phy);
793 	else
794 		dsi_28nm_phy_regulator_enable_dcdc(phy);
795 }
796 
dsi_28nm_phy_enable(struct msm_dsi_phy * phy,struct msm_dsi_phy_clk_request * clk_req)797 static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
798 				struct msm_dsi_phy_clk_request *clk_req)
799 {
800 	struct msm_dsi_dphy_timing *timing = &phy->timing;
801 	int i;
802 	void __iomem *base = phy->base;
803 	u32 val;
804 
805 	DBG("");
806 
807 	if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
808 		DRM_DEV_ERROR(&phy->pdev->dev,
809 			      "%s: D-PHY timing calculation failed\n",
810 			      __func__);
811 		return -EINVAL;
812 	}
813 
814 	writel(0xff, base + REG_DSI_28nm_PHY_STRENGTH_0);
815 
816 	dsi_28nm_phy_regulator_ctrl(phy, true);
817 
818 	dsi_28nm_dphy_set_timing(phy, timing);
819 
820 	writel(0x00, base + REG_DSI_28nm_PHY_CTRL_1);
821 	writel(0x5f, base + REG_DSI_28nm_PHY_CTRL_0);
822 
823 	writel(0x6, base + REG_DSI_28nm_PHY_STRENGTH_1);
824 
825 	for (i = 0; i < 4; i++) {
826 		writel(0, base + REG_DSI_28nm_PHY_LN_CFG_0(i));
827 		writel(0, base + REG_DSI_28nm_PHY_LN_CFG_1(i));
828 		writel(0, base + REG_DSI_28nm_PHY_LN_CFG_2(i));
829 		writel(0, base + REG_DSI_28nm_PHY_LN_CFG_3(i));
830 		writel(0, base + REG_DSI_28nm_PHY_LN_CFG_4(i));
831 		writel(0, base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i));
832 		writel(0, base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i));
833 		writel(0x1, base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i));
834 		writel(0x97, base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i));
835 	}
836 
837 	writel(0, base + REG_DSI_28nm_PHY_LNCK_CFG_4);
838 	writel(0xc0, base + REG_DSI_28nm_PHY_LNCK_CFG_1);
839 	writel(0x1, base + REG_DSI_28nm_PHY_LNCK_TEST_STR0);
840 	writel(0xbb, base + REG_DSI_28nm_PHY_LNCK_TEST_STR1);
841 
842 	writel(0x5f, base + REG_DSI_28nm_PHY_CTRL_0);
843 
844 	val = readl(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL);
845 	if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
846 		val &= ~DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
847 	else
848 		val |= DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
849 	writel(val, base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL);
850 
851 	return 0;
852 }
853 
dsi_28nm_phy_disable(struct msm_dsi_phy * phy)854 static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
855 {
856 	writel(0, phy->base + REG_DSI_28nm_PHY_CTRL_0);
857 	dsi_28nm_phy_regulator_ctrl(phy, false);
858 
859 	/*
860 	 * Wait for the registers writes to complete in order to
861 	 * ensure that the phy is completely disabled
862 	 */
863 	wmb();
864 }
865 
866 static const struct regulator_bulk_data dsi_phy_28nm_regulators[] = {
867 	{ .supply = "vddio", .init_load_uA = 100000 },
868 };
869 
870 const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
871 	.has_phy_regulator = true,
872 	.regulator_data = dsi_phy_28nm_regulators,
873 	.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
874 	.ops = {
875 		.enable = dsi_28nm_phy_enable,
876 		.disable = dsi_28nm_phy_disable,
877 		.pll_init = dsi_pll_28nm_init,
878 		.save_pll_state = dsi_28nm_pll_save_state,
879 		.restore_pll_state = dsi_28nm_pll_restore_state,
880 	},
881 	.min_pll_rate = VCO_MIN_RATE,
882 	.max_pll_rate = VCO_MAX_RATE,
883 	.io_start = { 0xfd922b00, 0xfd923100 },
884 	.num_dsi_phy = 2,
885 };
886 
887 const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
888 	.has_phy_regulator = true,
889 	.regulator_data = dsi_phy_28nm_regulators,
890 	.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
891 	.ops = {
892 		.enable = dsi_28nm_phy_enable,
893 		.disable = dsi_28nm_phy_disable,
894 		.pll_init = dsi_pll_28nm_init,
895 		.save_pll_state = dsi_28nm_pll_save_state,
896 		.restore_pll_state = dsi_28nm_pll_restore_state,
897 	},
898 	.min_pll_rate = VCO_MIN_RATE,
899 	.max_pll_rate = VCO_MAX_RATE,
900 	.io_start = { 0x1a94400, 0x1a96400 },
901 	.num_dsi_phy = 2,
902 };
903 
904 const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
905 	.has_phy_regulator = true,
906 	.regulator_data = dsi_phy_28nm_regulators,
907 	.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
908 	.ops = {
909 		.enable = dsi_28nm_phy_enable,
910 		.disable = dsi_28nm_phy_disable,
911 		.pll_init = dsi_pll_28nm_init,
912 		.save_pll_state = dsi_28nm_pll_save_state,
913 		.restore_pll_state = dsi_28nm_pll_restore_state,
914 	},
915 	.min_pll_rate = VCO_MIN_RATE,
916 	.max_pll_rate = VCO_MAX_RATE,
917 	.io_start = { 0x1a98500 },
918 	.num_dsi_phy = 1,
919 	.quirks = DSI_PHY_28NM_QUIRK_PHY_LP,
920 };
921 
922 const struct msm_dsi_phy_cfg dsi_phy_28nm_8226_cfgs = {
923 	.has_phy_regulator = true,
924 	.regulator_data = dsi_phy_28nm_regulators,
925 	.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
926 	.ops = {
927 		.enable = dsi_28nm_phy_enable,
928 		.disable = dsi_28nm_phy_disable,
929 		.pll_init = dsi_pll_28nm_init,
930 		.save_pll_state = dsi_28nm_pll_save_state,
931 		.restore_pll_state = dsi_28nm_pll_restore_state,
932 	},
933 	.min_pll_rate = VCO_MIN_RATE,
934 	.max_pll_rate = VCO_MAX_RATE,
935 	.io_start = { 0xfd922b00 },
936 	.num_dsi_phy = 1,
937 	.quirks = DSI_PHY_28NM_QUIRK_PHY_8226,
938 };
939 
940 const struct msm_dsi_phy_cfg dsi_phy_28nm_8937_cfgs = {
941 	.has_phy_regulator = true,
942 	.regulator_data = dsi_phy_28nm_regulators,
943 	.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
944 	.ops = {
945 		.enable = dsi_28nm_phy_enable,
946 		.disable = dsi_28nm_phy_disable,
947 		.pll_init = dsi_pll_28nm_init,
948 		.save_pll_state = dsi_28nm_pll_save_state,
949 		.restore_pll_state = dsi_28nm_pll_restore_state,
950 	},
951 	.min_pll_rate = VCO_MIN_RATE,
952 	.max_pll_rate = VCO_MAX_RATE,
953 	.io_start = { 0x1a94400, 0x1a96400 },
954 	.num_dsi_phy = 2,
955 	.quirks = DSI_PHY_28NM_QUIRK_PHY_LP,
956 };
957