xref: /linux/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c (revision 40d269c000bda9fcd276a0412a9cebd3f6e344c5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8 
9 #include "dsi_phy.h"
10 #include "dsi.xml.h"
11 #include "dsi_phy_28nm.xml.h"
12 
13 /*
14  * DSI PLL 28nm - clock diagram (eg: DSI0):
15  *
16  *         dsi0analog_postdiv_clk
17  *                             |         dsi0indirect_path_div2_clk
18  *                             |          |
19  *                   +------+  |  +----+  |  |\   dsi0byte_mux
20  *  dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \   |
21  *                |  +------+     +----+     | m|  |  +----+
22  *                |                          | u|--o--| /4 |-- dsi0pllbyte
23  *                |                          | x|     +----+
24  *                o--------------------------| /
25  *                |                          |/
26  *                |          +------+
27  *                o----------| DIV3 |------------------------- dsi0pll
28  *                           +------+
29  */
30 
31 #define POLL_MAX_READS			10
32 #define POLL_TIMEOUT_US		50
33 
34 #define VCO_REF_CLK_RATE		19200000
35 #define VCO_MIN_RATE			350000000
36 #define VCO_MAX_RATE			750000000
37 
38 /* v2.0.0 28nm LP implementation */
39 #define DSI_PHY_28NM_QUIRK_PHY_LP	BIT(0)
40 #define DSI_PHY_28NM_QUIRK_PHY_8226	BIT(1)
41 
42 #define LPFR_LUT_SIZE			10
43 struct lpfr_cfg {
44 	unsigned long vco_rate;
45 	u32 resistance;
46 };
47 
48 /* Loop filter resistance: */
49 static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
50 	{ 479500000,  8 },
51 	{ 480000000, 11 },
52 	{ 575500000,  8 },
53 	{ 576000000, 12 },
54 	{ 610500000,  8 },
55 	{ 659500000,  9 },
56 	{ 671500000, 10 },
57 	{ 672000000, 14 },
58 	{ 708500000, 10 },
59 	{ 750000000, 11 },
60 };
61 
62 struct pll_28nm_cached_state {
63 	unsigned long vco_rate;
64 	u8 postdiv3;
65 	u8 postdiv1;
66 	u8 byte_mux;
67 };
68 
69 struct dsi_pll_28nm {
70 	struct clk_hw clk_hw;
71 
72 	struct msm_dsi_phy *phy;
73 
74 	struct pll_28nm_cached_state cached_state;
75 };
76 
77 #define to_pll_28nm(x)	container_of(x, struct dsi_pll_28nm, clk_hw)
78 
79 static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
80 				u32 nb_tries, u32 timeout_us)
81 {
82 	bool pll_locked = false;
83 	u32 val;
84 
85 	while (nb_tries--) {
86 		val = dsi_phy_read(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_STATUS);
87 		pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
88 
89 		if (pll_locked)
90 			break;
91 
92 		udelay(timeout_us);
93 	}
94 	DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
95 
96 	return pll_locked;
97 }
98 
99 static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
100 {
101 	void __iomem *base = pll_28nm->phy->pll_base;
102 
103 	/*
104 	 * Add HW recommended delays after toggling the software
105 	 * reset bit off and back on.
106 	 */
107 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
108 			     DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
109 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
110 }
111 
112 /*
113  * Clock Callbacks
114  */
115 static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
116 		unsigned long parent_rate)
117 {
118 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
119 	struct device *dev = &pll_28nm->phy->pdev->dev;
120 	void __iomem *base = pll_28nm->phy->pll_base;
121 	unsigned long div_fbx1000, gen_vco_clk;
122 	u32 refclk_cfg, frac_n_mode, frac_n_value;
123 	u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
124 	u32 cal_cfg10, cal_cfg11;
125 	u32 rem;
126 	int i;
127 
128 	VERB("rate=%lu, parent's=%lu", rate, parent_rate);
129 
130 	/* Force postdiv2 to be div-4 */
131 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
132 
133 	/* Configure the Loop filter resistance */
134 	for (i = 0; i < LPFR_LUT_SIZE; i++)
135 		if (rate <= lpfr_lut[i].vco_rate)
136 			break;
137 	if (i == LPFR_LUT_SIZE) {
138 		DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
139 				rate);
140 		return -EINVAL;
141 	}
142 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
143 
144 	/* Loop filter capacitance values : c1 and c2 */
145 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
146 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
147 
148 	rem = rate % VCO_REF_CLK_RATE;
149 	if (rem) {
150 		refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
151 		frac_n_mode = 1;
152 		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
153 		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
154 	} else {
155 		refclk_cfg = 0x0;
156 		frac_n_mode = 0;
157 		div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
158 		gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
159 	}
160 
161 	DBG("refclk_cfg = %d", refclk_cfg);
162 
163 	rem = div_fbx1000 % 1000;
164 	frac_n_value = (rem << 16) / 1000;
165 
166 	DBG("div_fb = %lu", div_fbx1000);
167 	DBG("frac_n_value = %d", frac_n_value);
168 
169 	DBG("Generated VCO Clock: %lu", gen_vco_clk);
170 	rem = 0;
171 	sdm_cfg1 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
172 	sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
173 	if (frac_n_mode) {
174 		sdm_cfg0 = 0x0;
175 		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
176 		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
177 				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
178 		sdm_cfg3 = frac_n_value >> 8;
179 		sdm_cfg2 = frac_n_value & 0xff;
180 	} else {
181 		sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
182 		sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
183 				(u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
184 		sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
185 		sdm_cfg2 = 0;
186 		sdm_cfg3 = 0;
187 	}
188 
189 	DBG("sdm_cfg0=%d", sdm_cfg0);
190 	DBG("sdm_cfg1=%d", sdm_cfg1);
191 	DBG("sdm_cfg2=%d", sdm_cfg2);
192 	DBG("sdm_cfg3=%d", sdm_cfg3);
193 
194 	cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
195 	cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
196 	DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
197 
198 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
199 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3,    0x2b);
200 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4,    0x06);
201 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,  0x0d);
202 
203 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
204 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
205 		      DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
206 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
207 		      DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
208 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
209 
210 	/* Add hardware recommended delay for correct PLL configuration */
211 	if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
212 		udelay(1000);
213 	else
214 		udelay(1);
215 
216 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
217 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
218 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
219 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0,   sdm_cfg0);
220 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0,   0x12);
221 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6,   0x30);
222 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7,   0x00);
223 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8,   0x60);
224 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9,   0x00);
225 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10,  cal_cfg10 & 0xff);
226 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11,  cal_cfg11 & 0xff);
227 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG,  0x20);
228 
229 	return 0;
230 }
231 
232 static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
233 {
234 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
235 
236 	return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
237 					POLL_TIMEOUT_US);
238 }
239 
240 static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
241 		unsigned long parent_rate)
242 {
243 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
244 	void __iomem *base = pll_28nm->phy->pll_base;
245 	u32 sdm0, doubler, sdm_byp_div;
246 	u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
247 	u32 ref_clk = VCO_REF_CLK_RATE;
248 	unsigned long vco_rate;
249 
250 	VERB("parent_rate=%lu", parent_rate);
251 
252 	/* Check to see if the ref clk doubler is enabled */
253 	doubler = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
254 			DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
255 	ref_clk += (doubler * VCO_REF_CLK_RATE);
256 
257 	/* see if it is integer mode or sdm mode */
258 	sdm0 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
259 	if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
260 		/* integer mode */
261 		sdm_byp_div = FIELD(
262 				dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
263 				DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
264 		vco_rate = ref_clk * sdm_byp_div;
265 	} else {
266 		/* sdm mode */
267 		sdm_dc_off = FIELD(
268 				dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
269 				DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
270 		DBG("sdm_dc_off = %d", sdm_dc_off);
271 		sdm2 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
272 				DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
273 		sdm3 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
274 				DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
275 		sdm_freq_seed = (sdm3 << 8) | sdm2;
276 		DBG("sdm_freq_seed = %d", sdm_freq_seed);
277 
278 		vco_rate = (ref_clk * (sdm_dc_off + 1)) +
279 			mult_frac(ref_clk, sdm_freq_seed, BIT(16));
280 		DBG("vco rate = %lu", vco_rate);
281 	}
282 
283 	DBG("returning vco rate = %lu", vco_rate);
284 
285 	return vco_rate;
286 }
287 
288 static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm)
289 {
290 	struct device *dev = &pll_28nm->phy->pdev->dev;
291 	void __iomem *base = pll_28nm->phy->pll_base;
292 	u32 max_reads = 5, timeout_us = 100;
293 	bool locked;
294 	u32 val;
295 	int i;
296 
297 	DBG("id=%d", pll_28nm->phy->id);
298 
299 	pll_28nm_software_reset(pll_28nm);
300 
301 	/*
302 	 * PLL power up sequence.
303 	 * Add necessary delays recommended by hardware.
304 	 */
305 	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
306 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
307 
308 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
309 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
310 
311 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
312 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
313 
314 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
315 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
316 
317 	for (i = 0; i < 2; i++) {
318 		/* DSI Uniphy lock detect setting */
319 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
320 				     0x0c, 100);
321 		dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
322 
323 		/* poll for PLL ready status */
324 		locked = pll_28nm_poll_for_ready(pll_28nm, max_reads,
325 						 timeout_us);
326 		if (locked)
327 			break;
328 
329 		pll_28nm_software_reset(pll_28nm);
330 
331 		/*
332 		 * PLL power up sequence.
333 		 * Add necessary delays recommended by hardware.
334 		 */
335 		val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
336 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
337 
338 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
339 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
340 
341 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
342 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
343 
344 		val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
345 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
346 
347 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
348 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
349 
350 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
351 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
352 	}
353 
354 	if (unlikely(!locked))
355 		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
356 	else
357 		DBG("DSI PLL Lock success");
358 
359 	return locked ? 0 : -EINVAL;
360 }
361 
362 static int dsi_pll_28nm_vco_prepare_hpm(struct clk_hw *hw)
363 {
364 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
365 	int i, ret;
366 
367 	if (unlikely(pll_28nm->phy->pll_on))
368 		return 0;
369 
370 	for (i = 0; i < 3; i++) {
371 		ret = _dsi_pll_28nm_vco_prepare_hpm(pll_28nm);
372 		if (!ret) {
373 			pll_28nm->phy->pll_on = true;
374 			return 0;
375 		}
376 	}
377 
378 	return ret;
379 }
380 
381 static int dsi_pll_28nm_vco_prepare_8226(struct clk_hw *hw)
382 {
383 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
384 	struct device *dev = &pll_28nm->phy->pdev->dev;
385 	void __iomem *base = pll_28nm->phy->pll_base;
386 	u32 max_reads = 5, timeout_us = 100;
387 	bool locked;
388 	u32 val;
389 	int i;
390 
391 	DBG("id=%d", pll_28nm->phy->id);
392 
393 	pll_28nm_software_reset(pll_28nm);
394 
395 	/*
396 	 * PLL power up sequence.
397 	 * Add necessary delays recommended by hardware.
398 	 */
399 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34);
400 
401 	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
402 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
403 
404 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
405 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
406 
407 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
408 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
409 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
410 
411 	for (i = 0; i < 7; i++) {
412 		/* DSI Uniphy lock detect setting */
413 		dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
414 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
415 				0x0c, 100);
416 		dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
417 
418 		/* poll for PLL ready status */
419 		locked = pll_28nm_poll_for_ready(pll_28nm,
420 						max_reads, timeout_us);
421 		if (locked)
422 			break;
423 
424 		pll_28nm_software_reset(pll_28nm);
425 
426 		/*
427 		 * PLL power up sequence.
428 		 * Add necessary delays recommended by hardware.
429 		 */
430 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00, 50);
431 
432 		val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
433 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
434 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 100);
435 
436 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
437 		val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
438 		dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
439 	}
440 
441 	if (unlikely(!locked))
442 		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
443 	else
444 		DBG("DSI PLL Lock success");
445 
446 	return locked ? 0 : -EINVAL;
447 }
448 
449 static int dsi_pll_28nm_vco_prepare_lp(struct clk_hw *hw)
450 {
451 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
452 	struct device *dev = &pll_28nm->phy->pdev->dev;
453 	void __iomem *base = pll_28nm->phy->pll_base;
454 	bool locked;
455 	u32 max_reads = 10, timeout_us = 50;
456 	u32 val;
457 
458 	DBG("id=%d", pll_28nm->phy->id);
459 
460 	if (unlikely(pll_28nm->phy->pll_on))
461 		return 0;
462 
463 	pll_28nm_software_reset(pll_28nm);
464 
465 	/*
466 	 * PLL power up sequence.
467 	 * Add necessary delays recommended by hardware.
468 	 */
469 	dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
470 
471 	val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
472 	dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
473 
474 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
475 	dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
476 
477 	val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
478 		DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
479 	dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
480 
481 	/* DSI PLL toggle lock detect setting */
482 	dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
483 	dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
484 
485 	locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
486 
487 	if (unlikely(!locked)) {
488 		DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
489 		return -EINVAL;
490 	}
491 
492 	DBG("DSI PLL lock success");
493 	pll_28nm->phy->pll_on = true;
494 
495 	return 0;
496 }
497 
498 static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
499 {
500 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
501 
502 	DBG("id=%d", pll_28nm->phy->id);
503 
504 	if (unlikely(!pll_28nm->phy->pll_on))
505 		return;
506 
507 	dsi_phy_write(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
508 
509 	pll_28nm->phy->pll_on = false;
510 }
511 
512 static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
513 		unsigned long rate, unsigned long *parent_rate)
514 {
515 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
516 
517 	if      (rate < pll_28nm->phy->cfg->min_pll_rate)
518 		return  pll_28nm->phy->cfg->min_pll_rate;
519 	else if (rate > pll_28nm->phy->cfg->max_pll_rate)
520 		return  pll_28nm->phy->cfg->max_pll_rate;
521 	else
522 		return rate;
523 }
524 
525 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = {
526 	.round_rate = dsi_pll_28nm_clk_round_rate,
527 	.set_rate = dsi_pll_28nm_clk_set_rate,
528 	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
529 	.prepare = dsi_pll_28nm_vco_prepare_hpm,
530 	.unprepare = dsi_pll_28nm_vco_unprepare,
531 	.is_enabled = dsi_pll_28nm_clk_is_enabled,
532 };
533 
534 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
535 	.round_rate = dsi_pll_28nm_clk_round_rate,
536 	.set_rate = dsi_pll_28nm_clk_set_rate,
537 	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
538 	.prepare = dsi_pll_28nm_vco_prepare_lp,
539 	.unprepare = dsi_pll_28nm_vco_unprepare,
540 	.is_enabled = dsi_pll_28nm_clk_is_enabled,
541 };
542 
543 static const struct clk_ops clk_ops_dsi_pll_28nm_vco_8226 = {
544 	.round_rate = dsi_pll_28nm_clk_round_rate,
545 	.set_rate = dsi_pll_28nm_clk_set_rate,
546 	.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
547 	.prepare = dsi_pll_28nm_vco_prepare_8226,
548 	.unprepare = dsi_pll_28nm_vco_unprepare,
549 	.is_enabled = dsi_pll_28nm_clk_is_enabled,
550 };
551 
552 /*
553  * PLL Callbacks
554  */
555 
556 static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
557 {
558 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
559 	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
560 	void __iomem *base = pll_28nm->phy->pll_base;
561 
562 	cached_state->postdiv3 =
563 			dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
564 	cached_state->postdiv1 =
565 			dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
566 	cached_state->byte_mux = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
567 	if (dsi_pll_28nm_clk_is_enabled(phy->vco_hw))
568 		cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
569 	else
570 		cached_state->vco_rate = 0;
571 }
572 
573 static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
574 {
575 	struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
576 	struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
577 	void __iomem *base = pll_28nm->phy->pll_base;
578 	int ret;
579 
580 	ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw,
581 					cached_state->vco_rate, 0);
582 	if (ret) {
583 		DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
584 			"restore vco rate failed. ret=%d\n", ret);
585 		return ret;
586 	}
587 
588 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
589 		      cached_state->postdiv3);
590 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
591 		      cached_state->postdiv1);
592 	dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
593 		      cached_state->byte_mux);
594 
595 	return 0;
596 }
597 
598 static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
599 {
600 	char clk_name[32];
601 	struct clk_init_data vco_init = {
602 		.parent_data = &(const struct clk_parent_data) {
603 			.fw_name = "ref", .name = "xo",
604 		},
605 		.num_parents = 1,
606 		.name = clk_name,
607 		.flags = CLK_IGNORE_UNUSED,
608 	};
609 	struct device *dev = &pll_28nm->phy->pdev->dev;
610 	struct clk_hw *hw, *analog_postdiv, *indirect_path_div2, *byte_mux;
611 	int ret;
612 
613 	DBG("%d", pll_28nm->phy->id);
614 
615 	if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
616 		vco_init.ops = &clk_ops_dsi_pll_28nm_vco_lp;
617 	else if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_8226)
618 		vco_init.ops = &clk_ops_dsi_pll_28nm_vco_8226;
619 	else
620 		vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm;
621 
622 	snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
623 	pll_28nm->clk_hw.init = &vco_init;
624 	ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
625 	if (ret)
626 		return ret;
627 
628 	snprintf(clk_name, sizeof(clk_name), "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
629 	analog_postdiv = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
630 			&pll_28nm->clk_hw, CLK_SET_RATE_PARENT,
631 			pll_28nm->phy->pll_base +
632 				REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
633 			0, 4, 0, NULL);
634 	if (IS_ERR(analog_postdiv))
635 		return PTR_ERR(analog_postdiv);
636 
637 	snprintf(clk_name, sizeof(clk_name), "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
638 	indirect_path_div2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
639 			clk_name, analog_postdiv, CLK_SET_RATE_PARENT, 1, 2);
640 	if (IS_ERR(indirect_path_div2))
641 		return PTR_ERR(indirect_path_div2);
642 
643 	snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id);
644 	hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
645 			&pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
646 				REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
647 			0, 8, 0, NULL);
648 	if (IS_ERR(hw))
649 		return PTR_ERR(hw);
650 	provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
651 
652 	snprintf(clk_name, sizeof(clk_name), "dsi%dbyte_mux", pll_28nm->phy->id);
653 	byte_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
654 			((const struct clk_hw *[]){
655 				&pll_28nm->clk_hw,
656 				indirect_path_div2,
657 			}), 2, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base +
658 				REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
659 	if (IS_ERR(byte_mux))
660 		return PTR_ERR(byte_mux);
661 
662 	snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id);
663 	hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
664 			byte_mux, CLK_SET_RATE_PARENT, 1, 4);
665 	if (IS_ERR(hw))
666 		return PTR_ERR(hw);
667 	provided_clocks[DSI_BYTE_PLL_CLK] = hw;
668 
669 	return 0;
670 }
671 
672 static int dsi_pll_28nm_init(struct msm_dsi_phy *phy)
673 {
674 	struct platform_device *pdev = phy->pdev;
675 	struct dsi_pll_28nm *pll_28nm;
676 	int ret;
677 
678 	if (!pdev)
679 		return -ENODEV;
680 
681 	pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
682 	if (!pll_28nm)
683 		return -ENOMEM;
684 
685 	pll_28nm->phy = phy;
686 
687 	ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws);
688 	if (ret) {
689 		DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
690 		return ret;
691 	}
692 
693 	phy->vco_hw = &pll_28nm->clk_hw;
694 
695 	return 0;
696 }
697 
698 static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
699 		struct msm_dsi_dphy_timing *timing)
700 {
701 	void __iomem *base = phy->base;
702 
703 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
704 		      DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
705 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
706 		      DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
707 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
708 		      DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
709 	if (timing->clk_zero & BIT(8))
710 		dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
711 			      DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
712 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
713 		      DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
714 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
715 		      DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
716 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
717 		      DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
718 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
719 		      DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
720 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
721 		      DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
722 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
723 		      DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
724 		      DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
725 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
726 		      DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
727 	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
728 		      DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
729 }
730 
731 static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
732 {
733 	void __iomem *base = phy->reg_base;
734 
735 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
736 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
737 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
738 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
739 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
740 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
741 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
742 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
743 	dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
744 }
745 
746 static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy)
747 {
748 	void __iomem *base = phy->reg_base;
749 
750 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
751 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
752 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0x7);
753 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
754 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x1);
755 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x1);
756 	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
757 
758 	if (phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
759 		dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x05);
760 	else
761 		dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x0d);
762 }
763 
764 static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
765 {
766 	if (!enable) {
767 		dsi_phy_write(phy->reg_base +
768 			      REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
769 		return;
770 	}
771 
772 	if (phy->regulator_ldo_mode)
773 		dsi_28nm_phy_regulator_enable_ldo(phy);
774 	else
775 		dsi_28nm_phy_regulator_enable_dcdc(phy);
776 }
777 
778 static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
779 				struct msm_dsi_phy_clk_request *clk_req)
780 {
781 	struct msm_dsi_dphy_timing *timing = &phy->timing;
782 	int i;
783 	void __iomem *base = phy->base;
784 	u32 val;
785 
786 	DBG("");
787 
788 	if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
789 		DRM_DEV_ERROR(&phy->pdev->dev,
790 			      "%s: D-PHY timing calculation failed\n",
791 			      __func__);
792 		return -EINVAL;
793 	}
794 
795 	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
796 
797 	dsi_28nm_phy_regulator_ctrl(phy, true);
798 
799 	dsi_28nm_dphy_set_timing(phy, timing);
800 
801 	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
802 	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
803 
804 	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
805 
806 	for (i = 0; i < 4; i++) {
807 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
808 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
809 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
810 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
811 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(i), 0);
812 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
813 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
814 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
815 		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
816 	}
817 
818 	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_4, 0);
819 	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
820 	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
821 	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
822 
823 	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
824 
825 	val = dsi_phy_read(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL);
826 	if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
827 		val &= ~DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
828 	else
829 		val |= DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
830 	dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, val);
831 
832 	return 0;
833 }
834 
835 static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
836 {
837 	dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
838 	dsi_28nm_phy_regulator_ctrl(phy, false);
839 
840 	/*
841 	 * Wait for the registers writes to complete in order to
842 	 * ensure that the phy is completely disabled
843 	 */
844 	wmb();
845 }
846 
847 static const struct regulator_bulk_data dsi_phy_28nm_regulators[] = {
848 	{ .supply = "vddio", .init_load_uA = 100000 },
849 };
850 
851 const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
852 	.has_phy_regulator = true,
853 	.regulator_data = dsi_phy_28nm_regulators,
854 	.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
855 	.ops = {
856 		.enable = dsi_28nm_phy_enable,
857 		.disable = dsi_28nm_phy_disable,
858 		.pll_init = dsi_pll_28nm_init,
859 		.save_pll_state = dsi_28nm_pll_save_state,
860 		.restore_pll_state = dsi_28nm_pll_restore_state,
861 	},
862 	.min_pll_rate = VCO_MIN_RATE,
863 	.max_pll_rate = VCO_MAX_RATE,
864 	.io_start = { 0xfd922b00, 0xfd923100 },
865 	.num_dsi_phy = 2,
866 };
867 
868 const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
869 	.has_phy_regulator = true,
870 	.regulator_data = dsi_phy_28nm_regulators,
871 	.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
872 	.ops = {
873 		.enable = dsi_28nm_phy_enable,
874 		.disable = dsi_28nm_phy_disable,
875 		.pll_init = dsi_pll_28nm_init,
876 		.save_pll_state = dsi_28nm_pll_save_state,
877 		.restore_pll_state = dsi_28nm_pll_restore_state,
878 	},
879 	.min_pll_rate = VCO_MIN_RATE,
880 	.max_pll_rate = VCO_MAX_RATE,
881 	.io_start = { 0x1a94400, 0x1a96400 },
882 	.num_dsi_phy = 2,
883 };
884 
885 const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
886 	.has_phy_regulator = true,
887 	.regulator_data = dsi_phy_28nm_regulators,
888 	.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
889 	.ops = {
890 		.enable = dsi_28nm_phy_enable,
891 		.disable = dsi_28nm_phy_disable,
892 		.pll_init = dsi_pll_28nm_init,
893 		.save_pll_state = dsi_28nm_pll_save_state,
894 		.restore_pll_state = dsi_28nm_pll_restore_state,
895 	},
896 	.min_pll_rate = VCO_MIN_RATE,
897 	.max_pll_rate = VCO_MAX_RATE,
898 	.io_start = { 0x1a98500 },
899 	.num_dsi_phy = 1,
900 	.quirks = DSI_PHY_28NM_QUIRK_PHY_LP,
901 };
902 
903 const struct msm_dsi_phy_cfg dsi_phy_28nm_8226_cfgs = {
904 	.has_phy_regulator = true,
905 	.regulator_data = dsi_phy_28nm_regulators,
906 	.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
907 	.ops = {
908 		.enable = dsi_28nm_phy_enable,
909 		.disable = dsi_28nm_phy_disable,
910 		.pll_init = dsi_pll_28nm_init,
911 		.save_pll_state = dsi_28nm_pll_save_state,
912 		.restore_pll_state = dsi_28nm_pll_restore_state,
913 	},
914 	.min_pll_rate = VCO_MIN_RATE,
915 	.max_pll_rate = VCO_MAX_RATE,
916 	.io_start = { 0xfd922b00 },
917 	.num_dsi_phy = 1,
918 	.quirks = DSI_PHY_28NM_QUIRK_PHY_8226,
919 };
920