xref: /linux/drivers/clk/imx/clk-pll14xx.c (revision 69bfec7548f4c1595bac0e3ddfc0458a5af31f4c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2017-2018 NXP.
4  */
5 
6 #define pr_fmt(fmt) "pll14xx: " fmt
7 
8 #include <linux/bitfield.h>
9 #include <linux/bits.h>
10 #include <linux/clk-provider.h>
11 #include <linux/err.h>
12 #include <linux/export.h>
13 #include <linux/io.h>
14 #include <linux/iopoll.h>
15 #include <linux/slab.h>
16 #include <linux/jiffies.h>
17 
18 #include "clk.h"
19 
20 #define GNRL_CTL	0x0
21 #define DIV_CTL0	0x4
22 #define DIV_CTL1	0x8
23 #define LOCK_STATUS	BIT(31)
24 #define LOCK_SEL_MASK	BIT(29)
25 #define CLKE_MASK	BIT(11)
26 #define RST_MASK	BIT(9)
27 #define BYPASS_MASK	BIT(4)
28 #define MDIV_MASK	GENMASK(21, 12)
29 #define PDIV_MASK	GENMASK(9, 4)
30 #define SDIV_MASK	GENMASK(2, 0)
31 #define KDIV_MASK	GENMASK(15, 0)
32 #define KDIV_MIN	SHRT_MIN
33 #define KDIV_MAX	SHRT_MAX
34 
35 #define LOCK_TIMEOUT_US		10000
36 
37 struct clk_pll14xx {
38 	struct clk_hw			hw;
39 	void __iomem			*base;
40 	enum imx_pll14xx_type		type;
41 	const struct imx_pll14xx_rate_table *rate_table;
42 	int rate_count;
43 };
44 
45 #define to_clk_pll14xx(_hw) container_of(_hw, struct clk_pll14xx, hw)
46 
47 static const struct imx_pll14xx_rate_table imx_pll1416x_tbl[] = {
48 	PLL_1416X_RATE(1800000000U, 225, 3, 0),
49 	PLL_1416X_RATE(1600000000U, 200, 3, 0),
50 	PLL_1416X_RATE(1500000000U, 375, 3, 1),
51 	PLL_1416X_RATE(1400000000U, 350, 3, 1),
52 	PLL_1416X_RATE(1200000000U, 300, 3, 1),
53 	PLL_1416X_RATE(1000000000U, 250, 3, 1),
54 	PLL_1416X_RATE(800000000U,  200, 3, 1),
55 	PLL_1416X_RATE(750000000U,  250, 2, 2),
56 	PLL_1416X_RATE(700000000U,  350, 3, 2),
57 	PLL_1416X_RATE(640000000U,  320, 3, 2),
58 	PLL_1416X_RATE(600000000U,  300, 3, 2),
59 	PLL_1416X_RATE(320000000U,  160, 3, 2),
60 };
61 
62 static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
63 	PLL_1443X_RATE(1039500000U, 173, 2, 1, 16384),
64 	PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
65 	PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
66 	PLL_1443X_RATE(519750000U, 173, 2, 2, 16384),
67 	PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
68 	PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
69 };
70 
71 struct imx_pll14xx_clk imx_1443x_pll = {
72 	.type = PLL_1443X,
73 	.rate_table = imx_pll1443x_tbl,
74 	.rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
75 };
76 EXPORT_SYMBOL_GPL(imx_1443x_pll);
77 
78 struct imx_pll14xx_clk imx_1443x_dram_pll = {
79 	.type = PLL_1443X,
80 	.rate_table = imx_pll1443x_tbl,
81 	.rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
82 	.flags = CLK_GET_RATE_NOCACHE,
83 };
84 EXPORT_SYMBOL_GPL(imx_1443x_dram_pll);
85 
86 struct imx_pll14xx_clk imx_1416x_pll = {
87 	.type = PLL_1416X,
88 	.rate_table = imx_pll1416x_tbl,
89 	.rate_count = ARRAY_SIZE(imx_pll1416x_tbl),
90 };
91 EXPORT_SYMBOL_GPL(imx_1416x_pll);
92 
93 static const struct imx_pll14xx_rate_table *imx_get_pll_settings(
94 		struct clk_pll14xx *pll, unsigned long rate)
95 {
96 	const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
97 	int i;
98 
99 	for (i = 0; i < pll->rate_count; i++)
100 		if (rate == rate_table[i].rate)
101 			return &rate_table[i];
102 
103 	return NULL;
104 }
105 
106 static long pll14xx_calc_rate(struct clk_pll14xx *pll, int mdiv, int pdiv,
107 			      int sdiv, int kdiv, unsigned long prate)
108 {
109 	u64 fvco = prate;
110 
111 	/* fvco = (m * 65536 + k) * Fin / (p * 65536) */
112 	fvco *= (mdiv * 65536 + kdiv);
113 	pdiv *= 65536;
114 
115 	do_div(fvco, pdiv << sdiv);
116 
117 	return fvco;
118 }
119 
120 static long pll1443x_calc_kdiv(int mdiv, int pdiv, int sdiv,
121 		unsigned long rate, unsigned long prate)
122 {
123 	long kdiv;
124 
125 	/* calc kdiv = round(rate * pdiv * 65536 * 2^sdiv / prate) - (mdiv * 65536) */
126 	kdiv = ((rate * ((pdiv * 65536) << sdiv) + prate / 2) / prate) - (mdiv * 65536);
127 
128 	return clamp_t(short, kdiv, KDIV_MIN, KDIV_MAX);
129 }
130 
131 static void imx_pll14xx_calc_settings(struct clk_pll14xx *pll, unsigned long rate,
132 				      unsigned long prate, struct imx_pll14xx_rate_table *t)
133 {
134 	u32 pll_div_ctl0, pll_div_ctl1;
135 	int mdiv, pdiv, sdiv, kdiv;
136 	long fvco, rate_min, rate_max, dist, best = LONG_MAX;
137 	const struct imx_pll14xx_rate_table *tt;
138 
139 	/*
140 	 * Fractional PLL constrains:
141 	 *
142 	 * a) 6MHz <= prate <= 25MHz
143 	 * b) 1 <= p <= 63 (1 <= p <= 4 prate = 24MHz)
144 	 * c) 64 <= m <= 1023
145 	 * d) 0 <= s <= 6
146 	 * e) -32768 <= k <= 32767
147 	 *
148 	 * fvco = (m * 65536 + k) * prate / (p * 65536)
149 	 */
150 
151 	/* First try if we can get the desired rate from one of the static entries */
152 	tt = imx_get_pll_settings(pll, rate);
153 	if (tt) {
154 		pr_debug("%s: in=%ld, want=%ld, Using PLL setting from table\n",
155 			 clk_hw_get_name(&pll->hw), prate, rate);
156 		t->rate = tt->rate;
157 		t->mdiv = tt->mdiv;
158 		t->pdiv = tt->pdiv;
159 		t->sdiv = tt->sdiv;
160 		t->kdiv = tt->kdiv;
161 		return;
162 	}
163 
164 	pll_div_ctl0 = readl_relaxed(pll->base + DIV_CTL0);
165 	mdiv = FIELD_GET(MDIV_MASK, pll_div_ctl0);
166 	pdiv = FIELD_GET(PDIV_MASK, pll_div_ctl0);
167 	sdiv = FIELD_GET(SDIV_MASK, pll_div_ctl0);
168 	pll_div_ctl1 = readl_relaxed(pll->base + DIV_CTL1);
169 
170 	/* Then see if we can get the desired rate by only adjusting kdiv (glitch free) */
171 	rate_min = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, KDIV_MIN, prate);
172 	rate_max = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, KDIV_MAX, prate);
173 
174 	if (rate >= rate_min && rate <= rate_max) {
175 		kdiv = pll1443x_calc_kdiv(mdiv, pdiv, sdiv, rate, prate);
176 		pr_debug("%s: in=%ld, want=%ld Only adjust kdiv %ld -> %d\n",
177 			 clk_hw_get_name(&pll->hw), prate, rate,
178 			 FIELD_GET(KDIV_MASK, pll_div_ctl1), kdiv);
179 		fvco = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, kdiv, prate);
180 		t->rate = (unsigned int)fvco;
181 		t->mdiv = mdiv;
182 		t->pdiv = pdiv;
183 		t->sdiv = sdiv;
184 		t->kdiv = kdiv;
185 		return;
186 	}
187 
188 	/* Finally calculate best values */
189 	for (pdiv = 1; pdiv <= 7; pdiv++) {
190 		for (sdiv = 0; sdiv <= 6; sdiv++) {
191 			/* calc mdiv = round(rate * pdiv * 2^sdiv) / prate) */
192 			mdiv = DIV_ROUND_CLOSEST(rate * (pdiv << sdiv), prate);
193 			mdiv = clamp(mdiv, 64, 1023);
194 
195 			kdiv = pll1443x_calc_kdiv(mdiv, pdiv, sdiv, rate, prate);
196 			fvco = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, kdiv, prate);
197 
198 			/* best match */
199 			dist = abs((long)rate - (long)fvco);
200 			if (dist < best) {
201 				best = dist;
202 				t->rate = (unsigned int)fvco;
203 				t->mdiv = mdiv;
204 				t->pdiv = pdiv;
205 				t->sdiv = sdiv;
206 				t->kdiv = kdiv;
207 
208 				if (!dist)
209 					goto found;
210 			}
211 		}
212 	}
213 found:
214 	pr_debug("%s: in=%ld, want=%ld got=%d (pdiv=%d sdiv=%d mdiv=%d kdiv=%d)\n",
215 		 clk_hw_get_name(&pll->hw), prate, rate, t->rate, t->pdiv, t->sdiv,
216 		 t->mdiv, t->kdiv);
217 }
218 
219 static long clk_pll1416x_round_rate(struct clk_hw *hw, unsigned long rate,
220 			unsigned long *prate)
221 {
222 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
223 	const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
224 	int i;
225 
226 	/* Assuming rate_table is in descending order */
227 	for (i = 0; i < pll->rate_count; i++)
228 		if (rate >= rate_table[i].rate)
229 			return rate_table[i].rate;
230 
231 	/* return minimum supported value */
232 	return rate_table[pll->rate_count - 1].rate;
233 }
234 
235 static long clk_pll1443x_round_rate(struct clk_hw *hw, unsigned long rate,
236 			unsigned long *prate)
237 {
238 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
239 	struct imx_pll14xx_rate_table t;
240 
241 	imx_pll14xx_calc_settings(pll, rate, *prate, &t);
242 
243 	return t.rate;
244 }
245 
246 static unsigned long clk_pll14xx_recalc_rate(struct clk_hw *hw,
247 						  unsigned long parent_rate)
248 {
249 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
250 	u32 mdiv, pdiv, sdiv, kdiv, pll_div_ctl0, pll_div_ctl1;
251 
252 	pll_div_ctl0 = readl_relaxed(pll->base + DIV_CTL0);
253 	mdiv = FIELD_GET(MDIV_MASK, pll_div_ctl0);
254 	pdiv = FIELD_GET(PDIV_MASK, pll_div_ctl0);
255 	sdiv = FIELD_GET(SDIV_MASK, pll_div_ctl0);
256 
257 	if (pll->type == PLL_1443X) {
258 		pll_div_ctl1 = readl_relaxed(pll->base + DIV_CTL1);
259 		kdiv = (s16)FIELD_GET(KDIV_MASK, pll_div_ctl1);
260 	} else {
261 		kdiv = 0;
262 	}
263 
264 	return pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, kdiv, parent_rate);
265 }
266 
267 static inline bool clk_pll14xx_mp_change(const struct imx_pll14xx_rate_table *rate,
268 					  u32 pll_div)
269 {
270 	u32 old_mdiv, old_pdiv;
271 
272 	old_mdiv = FIELD_GET(MDIV_MASK, pll_div);
273 	old_pdiv = FIELD_GET(PDIV_MASK, pll_div);
274 
275 	return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv;
276 }
277 
278 static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll)
279 {
280 	u32 val;
281 
282 	return readl_poll_timeout(pll->base + GNRL_CTL, val, val & LOCK_STATUS, 0,
283 			LOCK_TIMEOUT_US);
284 }
285 
286 static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
287 				 unsigned long prate)
288 {
289 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
290 	const struct imx_pll14xx_rate_table *rate;
291 	u32 tmp, div_val;
292 	int ret;
293 
294 	rate = imx_get_pll_settings(pll, drate);
295 	if (!rate) {
296 		pr_err("Invalid rate %lu for pll clk %s\n", drate,
297 		       clk_hw_get_name(hw));
298 		return -EINVAL;
299 	}
300 
301 	tmp = readl_relaxed(pll->base + DIV_CTL0);
302 
303 	if (!clk_pll14xx_mp_change(rate, tmp)) {
304 		tmp &= ~SDIV_MASK;
305 		tmp |= FIELD_PREP(SDIV_MASK, rate->sdiv);
306 		writel_relaxed(tmp, pll->base + DIV_CTL0);
307 
308 		return 0;
309 	}
310 
311 	/* Bypass clock and set lock to pll output lock */
312 	tmp = readl_relaxed(pll->base + GNRL_CTL);
313 	tmp |= LOCK_SEL_MASK;
314 	writel_relaxed(tmp, pll->base + GNRL_CTL);
315 
316 	/* Enable RST */
317 	tmp &= ~RST_MASK;
318 	writel_relaxed(tmp, pll->base + GNRL_CTL);
319 
320 	/* Enable BYPASS */
321 	tmp |= BYPASS_MASK;
322 	writel(tmp, pll->base + GNRL_CTL);
323 
324 	div_val = FIELD_PREP(MDIV_MASK, rate->mdiv) | FIELD_PREP(PDIV_MASK, rate->pdiv) |
325 		FIELD_PREP(SDIV_MASK, rate->sdiv);
326 	writel_relaxed(div_val, pll->base + DIV_CTL0);
327 
328 	/*
329 	 * According to SPEC, t3 - t2 need to be greater than
330 	 * 1us and 1/FREF, respectively.
331 	 * FREF is FIN / Prediv, the prediv is [1, 63], so choose
332 	 * 3us.
333 	 */
334 	udelay(3);
335 
336 	/* Disable RST */
337 	tmp |= RST_MASK;
338 	writel_relaxed(tmp, pll->base + GNRL_CTL);
339 
340 	/* Wait Lock */
341 	ret = clk_pll14xx_wait_lock(pll);
342 	if (ret)
343 		return ret;
344 
345 	/* Bypass */
346 	tmp &= ~BYPASS_MASK;
347 	writel_relaxed(tmp, pll->base + GNRL_CTL);
348 
349 	return 0;
350 }
351 
352 static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
353 				 unsigned long prate)
354 {
355 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
356 	struct imx_pll14xx_rate_table rate;
357 	u32 gnrl_ctl, div_ctl0;
358 	int ret;
359 
360 	imx_pll14xx_calc_settings(pll, drate, prate, &rate);
361 
362 	div_ctl0 = readl_relaxed(pll->base + DIV_CTL0);
363 
364 	if (!clk_pll14xx_mp_change(&rate, div_ctl0)) {
365 		/* only sdiv and/or kdiv changed - no need to RESET PLL */
366 		div_ctl0 &= ~SDIV_MASK;
367 		div_ctl0 |= FIELD_PREP(SDIV_MASK, rate.sdiv);
368 		writel_relaxed(div_ctl0, pll->base + DIV_CTL0);
369 
370 		writel_relaxed(FIELD_PREP(KDIV_MASK, rate.kdiv),
371 			       pll->base + DIV_CTL1);
372 
373 		return 0;
374 	}
375 
376 	/* Enable RST */
377 	gnrl_ctl = readl_relaxed(pll->base + GNRL_CTL);
378 	gnrl_ctl &= ~RST_MASK;
379 	writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL);
380 
381 	/* Enable BYPASS */
382 	gnrl_ctl |= BYPASS_MASK;
383 	writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL);
384 
385 	div_ctl0 = FIELD_PREP(MDIV_MASK, rate.mdiv) |
386 		   FIELD_PREP(PDIV_MASK, rate.pdiv) |
387 		   FIELD_PREP(SDIV_MASK, rate.sdiv);
388 	writel_relaxed(div_ctl0, pll->base + DIV_CTL0);
389 
390 	writel_relaxed(FIELD_PREP(KDIV_MASK, rate.kdiv), pll->base + DIV_CTL1);
391 
392 	/*
393 	 * According to SPEC, t3 - t2 need to be greater than
394 	 * 1us and 1/FREF, respectively.
395 	 * FREF is FIN / Prediv, the prediv is [1, 63], so choose
396 	 * 3us.
397 	 */
398 	udelay(3);
399 
400 	/* Disable RST */
401 	gnrl_ctl |= RST_MASK;
402 	writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL);
403 
404 	/* Wait Lock*/
405 	ret = clk_pll14xx_wait_lock(pll);
406 	if (ret)
407 		return ret;
408 
409 	/* Bypass */
410 	gnrl_ctl &= ~BYPASS_MASK;
411 	writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL);
412 
413 	return 0;
414 }
415 
416 static int clk_pll14xx_prepare(struct clk_hw *hw)
417 {
418 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
419 	u32 val;
420 	int ret;
421 
422 	/*
423 	 * RESETB = 1 from 0, PLL starts its normal
424 	 * operation after lock time
425 	 */
426 	val = readl_relaxed(pll->base + GNRL_CTL);
427 	if (val & RST_MASK)
428 		return 0;
429 	val |= BYPASS_MASK;
430 	writel_relaxed(val, pll->base + GNRL_CTL);
431 	val |= RST_MASK;
432 	writel_relaxed(val, pll->base + GNRL_CTL);
433 
434 	ret = clk_pll14xx_wait_lock(pll);
435 	if (ret)
436 		return ret;
437 
438 	val &= ~BYPASS_MASK;
439 	writel_relaxed(val, pll->base + GNRL_CTL);
440 
441 	return 0;
442 }
443 
444 static int clk_pll14xx_is_prepared(struct clk_hw *hw)
445 {
446 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
447 	u32 val;
448 
449 	val = readl_relaxed(pll->base + GNRL_CTL);
450 
451 	return (val & RST_MASK) ? 1 : 0;
452 }
453 
454 static void clk_pll14xx_unprepare(struct clk_hw *hw)
455 {
456 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
457 	u32 val;
458 
459 	/*
460 	 * Set RST to 0, power down mode is enabled and
461 	 * every digital block is reset
462 	 */
463 	val = readl_relaxed(pll->base + GNRL_CTL);
464 	val &= ~RST_MASK;
465 	writel_relaxed(val, pll->base + GNRL_CTL);
466 }
467 
468 static const struct clk_ops clk_pll1416x_ops = {
469 	.prepare	= clk_pll14xx_prepare,
470 	.unprepare	= clk_pll14xx_unprepare,
471 	.is_prepared	= clk_pll14xx_is_prepared,
472 	.recalc_rate	= clk_pll14xx_recalc_rate,
473 	.round_rate	= clk_pll1416x_round_rate,
474 	.set_rate	= clk_pll1416x_set_rate,
475 };
476 
477 static const struct clk_ops clk_pll1416x_min_ops = {
478 	.recalc_rate	= clk_pll14xx_recalc_rate,
479 };
480 
481 static const struct clk_ops clk_pll1443x_ops = {
482 	.prepare	= clk_pll14xx_prepare,
483 	.unprepare	= clk_pll14xx_unprepare,
484 	.is_prepared	= clk_pll14xx_is_prepared,
485 	.recalc_rate	= clk_pll14xx_recalc_rate,
486 	.round_rate	= clk_pll1443x_round_rate,
487 	.set_rate	= clk_pll1443x_set_rate,
488 };
489 
490 struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
491 				const char *parent_name, void __iomem *base,
492 				const struct imx_pll14xx_clk *pll_clk)
493 {
494 	struct clk_pll14xx *pll;
495 	struct clk_hw *hw;
496 	struct clk_init_data init;
497 	int ret;
498 	u32 val;
499 
500 	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
501 	if (!pll)
502 		return ERR_PTR(-ENOMEM);
503 
504 	init.name = name;
505 	init.flags = pll_clk->flags;
506 	init.parent_names = &parent_name;
507 	init.num_parents = 1;
508 
509 	switch (pll_clk->type) {
510 	case PLL_1416X:
511 		if (!pll_clk->rate_table)
512 			init.ops = &clk_pll1416x_min_ops;
513 		else
514 			init.ops = &clk_pll1416x_ops;
515 		break;
516 	case PLL_1443X:
517 		init.ops = &clk_pll1443x_ops;
518 		break;
519 	default:
520 		pr_err("Unknown pll type for pll clk %s\n", name);
521 		kfree(pll);
522 		return ERR_PTR(-EINVAL);
523 	}
524 
525 	pll->base = base;
526 	pll->hw.init = &init;
527 	pll->type = pll_clk->type;
528 	pll->rate_table = pll_clk->rate_table;
529 	pll->rate_count = pll_clk->rate_count;
530 
531 	val = readl_relaxed(pll->base + GNRL_CTL);
532 	val &= ~BYPASS_MASK;
533 	writel_relaxed(val, pll->base + GNRL_CTL);
534 
535 	hw = &pll->hw;
536 
537 	ret = clk_hw_register(dev, hw);
538 	if (ret) {
539 		pr_err("failed to register pll %s %d\n", name, ret);
540 		kfree(pll);
541 		return ERR_PTR(ret);
542 	}
543 
544 	return hw;
545 }
546 EXPORT_SYMBOL_GPL(imx_dev_clk_hw_pll14xx);
547