xref: /linux/drivers/clk/imx/clk-pll14xx.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2017-2018 NXP.
4  */
5 
6 #define pr_fmt(fmt) "pll14xx: " fmt
7 
8 #include <linux/bitfield.h>
9 #include <linux/bits.h>
10 #include <linux/clk-provider.h>
11 #include <linux/err.h>
12 #include <linux/export.h>
13 #include <linux/io.h>
14 #include <linux/iopoll.h>
15 #include <linux/slab.h>
16 #include <linux/jiffies.h>
17 
18 #include "clk.h"
19 
20 #define GNRL_CTL	0x0
21 #define DIV_CTL0	0x4
22 #define DIV_CTL1	0x8
23 #define LOCK_STATUS	BIT(31)
24 #define LOCK_SEL_MASK	BIT(29)
25 #define CLKE_MASK	BIT(11)
26 #define RST_MASK	BIT(9)
27 #define BYPASS_MASK	BIT(4)
28 #define MDIV_MASK	GENMASK(21, 12)
29 #define PDIV_MASK	GENMASK(9, 4)
30 #define SDIV_MASK	GENMASK(2, 0)
31 #define KDIV_MASK	GENMASK(15, 0)
32 #define KDIV_MIN	SHRT_MIN
33 #define KDIV_MAX	SHRT_MAX
34 
35 #define LOCK_TIMEOUT_US		10000
36 
37 struct clk_pll14xx {
38 	struct clk_hw			hw;
39 	void __iomem			*base;
40 	enum imx_pll14xx_type		type;
41 	const struct imx_pll14xx_rate_table *rate_table;
42 	int rate_count;
43 };
44 
45 #define to_clk_pll14xx(_hw) container_of(_hw, struct clk_pll14xx, hw)
46 
47 static const struct imx_pll14xx_rate_table imx_pll1416x_tbl[] = {
48 	PLL_1416X_RATE(1800000000U, 225, 3, 0),
49 	PLL_1416X_RATE(1600000000U, 200, 3, 0),
50 	PLL_1416X_RATE(1500000000U, 375, 3, 1),
51 	PLL_1416X_RATE(1400000000U, 350, 3, 1),
52 	PLL_1416X_RATE(1200000000U, 300, 3, 1),
53 	PLL_1416X_RATE(1000000000U, 250, 3, 1),
54 	PLL_1416X_RATE(800000000U,  200, 3, 1),
55 	PLL_1416X_RATE(750000000U,  250, 2, 2),
56 	PLL_1416X_RATE(700000000U,  350, 3, 2),
57 	PLL_1416X_RATE(640000000U,  320, 3, 2),
58 	PLL_1416X_RATE(600000000U,  300, 3, 2),
59 	PLL_1416X_RATE(320000000U,  160, 3, 2),
60 };
61 
62 static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
63 	PLL_1443X_RATE(1039500000U, 173, 2, 1, 16384),
64 	PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
65 	PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
66 	PLL_1443X_RATE(519750000U, 173, 2, 2, 16384),
67 };
68 
69 struct imx_pll14xx_clk imx_1443x_pll = {
70 	.type = PLL_1443X,
71 	.rate_table = imx_pll1443x_tbl,
72 	.rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
73 };
74 EXPORT_SYMBOL_GPL(imx_1443x_pll);
75 
76 struct imx_pll14xx_clk imx_1443x_dram_pll = {
77 	.type = PLL_1443X,
78 	.rate_table = imx_pll1443x_tbl,
79 	.rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
80 	.flags = CLK_GET_RATE_NOCACHE,
81 };
82 EXPORT_SYMBOL_GPL(imx_1443x_dram_pll);
83 
84 struct imx_pll14xx_clk imx_1416x_pll = {
85 	.type = PLL_1416X,
86 	.rate_table = imx_pll1416x_tbl,
87 	.rate_count = ARRAY_SIZE(imx_pll1416x_tbl),
88 };
89 EXPORT_SYMBOL_GPL(imx_1416x_pll);
90 
91 static const struct imx_pll14xx_rate_table *imx_get_pll_settings(
92 		struct clk_pll14xx *pll, unsigned long rate)
93 {
94 	const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
95 	int i;
96 
97 	for (i = 0; i < pll->rate_count; i++)
98 		if (rate == rate_table[i].rate)
99 			return &rate_table[i];
100 
101 	return NULL;
102 }
103 
104 static long pll14xx_calc_rate(struct clk_pll14xx *pll, int mdiv, int pdiv,
105 			      int sdiv, int kdiv, unsigned long prate)
106 {
107 	u64 fout = prate;
108 
109 	/* fout = (m * 65536 + k) * Fin / (p * 65536) / (1 << sdiv) */
110 	fout *= (mdiv * 65536 + kdiv);
111 	pdiv *= 65536;
112 
113 	do_div(fout, pdiv << sdiv);
114 
115 	return fout;
116 }
117 
118 static long pll1443x_calc_kdiv(int mdiv, int pdiv, int sdiv,
119 		unsigned long rate, unsigned long prate)
120 {
121 	long kdiv;
122 
123 	/* calc kdiv = round(rate * pdiv * 65536 * 2^sdiv / prate) - (mdiv * 65536) */
124 	kdiv = ((rate * ((pdiv * 65536) << sdiv) + prate / 2) / prate) - (mdiv * 65536);
125 
126 	return clamp_t(short, kdiv, KDIV_MIN, KDIV_MAX);
127 }
128 
129 static void imx_pll14xx_calc_settings(struct clk_pll14xx *pll, unsigned long rate,
130 				      unsigned long prate, struct imx_pll14xx_rate_table *t)
131 {
132 	u32 pll_div_ctl0, pll_div_ctl1;
133 	int mdiv, pdiv, sdiv, kdiv;
134 	long fout, rate_min, rate_max, dist, best = LONG_MAX;
135 	const struct imx_pll14xx_rate_table *tt;
136 
137 	/*
138 	 * Fractional PLL constrains:
139 	 *
140 	 * a) 1 <= p <= 63
141 	 * b) 64 <= m <= 1023
142 	 * c) 0 <= s <= 6
143 	 * d) -32768 <= k <= 32767
144 	 *
145 	 * fvco = (m * 65536 + k) * prate / (p * 65536)
146 	 * fout = (m * 65536 + k) * prate / (p * 65536) / (1 << sdiv)
147 	 */
148 
149 	/* First try if we can get the desired rate from one of the static entries */
150 	tt = imx_get_pll_settings(pll, rate);
151 	if (tt) {
152 		pr_debug("%s: in=%ld, want=%ld, Using PLL setting from table\n",
153 			 clk_hw_get_name(&pll->hw), prate, rate);
154 		t->rate = tt->rate;
155 		t->mdiv = tt->mdiv;
156 		t->pdiv = tt->pdiv;
157 		t->sdiv = tt->sdiv;
158 		t->kdiv = tt->kdiv;
159 		return;
160 	}
161 
162 	pll_div_ctl0 = readl_relaxed(pll->base + DIV_CTL0);
163 	mdiv = FIELD_GET(MDIV_MASK, pll_div_ctl0);
164 	pdiv = FIELD_GET(PDIV_MASK, pll_div_ctl0);
165 	sdiv = FIELD_GET(SDIV_MASK, pll_div_ctl0);
166 	pll_div_ctl1 = readl_relaxed(pll->base + DIV_CTL1);
167 
168 	/* Then see if we can get the desired rate by only adjusting kdiv (glitch free) */
169 	rate_min = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, KDIV_MIN, prate);
170 	rate_max = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, KDIV_MAX, prate);
171 
172 	if (rate >= rate_min && rate <= rate_max) {
173 		kdiv = pll1443x_calc_kdiv(mdiv, pdiv, sdiv, rate, prate);
174 		pr_debug("%s: in=%ld, want=%ld Only adjust kdiv %ld -> %d\n",
175 			 clk_hw_get_name(&pll->hw), prate, rate,
176 			 FIELD_GET(KDIV_MASK, pll_div_ctl1), kdiv);
177 		fout = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, kdiv, prate);
178 		t->rate = (unsigned int)fout;
179 		t->mdiv = mdiv;
180 		t->pdiv = pdiv;
181 		t->sdiv = sdiv;
182 		t->kdiv = kdiv;
183 		return;
184 	}
185 
186 	/* Finally calculate best values */
187 	for (pdiv = 1; pdiv <= 63; pdiv++) {
188 		for (sdiv = 0; sdiv <= 6; sdiv++) {
189 			/* calc mdiv = round(rate * pdiv * 2^sdiv) / prate) */
190 			mdiv = DIV_ROUND_CLOSEST(rate * (pdiv << sdiv), prate);
191 			mdiv = clamp(mdiv, 64, 1023);
192 
193 			kdiv = pll1443x_calc_kdiv(mdiv, pdiv, sdiv, rate, prate);
194 			fout = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, kdiv, prate);
195 
196 			/* best match */
197 			dist = abs((long)rate - (long)fout);
198 			if (dist < best) {
199 				best = dist;
200 				t->rate = (unsigned int)fout;
201 				t->mdiv = mdiv;
202 				t->pdiv = pdiv;
203 				t->sdiv = sdiv;
204 				t->kdiv = kdiv;
205 
206 				if (!dist)
207 					goto found;
208 			}
209 		}
210 	}
211 found:
212 	pr_debug("%s: in=%ld, want=%ld got=%d (pdiv=%d sdiv=%d mdiv=%d kdiv=%d)\n",
213 		 clk_hw_get_name(&pll->hw), prate, rate, t->rate, t->pdiv, t->sdiv,
214 		 t->mdiv, t->kdiv);
215 }
216 
217 static long clk_pll1416x_round_rate(struct clk_hw *hw, unsigned long rate,
218 			unsigned long *prate)
219 {
220 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
221 	const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
222 	int i;
223 
224 	/* Assuming rate_table is in descending order */
225 	for (i = 0; i < pll->rate_count; i++)
226 		if (rate >= rate_table[i].rate)
227 			return rate_table[i].rate;
228 
229 	/* return minimum supported value */
230 	return rate_table[pll->rate_count - 1].rate;
231 }
232 
233 static long clk_pll1443x_round_rate(struct clk_hw *hw, unsigned long rate,
234 			unsigned long *prate)
235 {
236 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
237 	struct imx_pll14xx_rate_table t;
238 
239 	imx_pll14xx_calc_settings(pll, rate, *prate, &t);
240 
241 	return t.rate;
242 }
243 
244 static unsigned long clk_pll14xx_recalc_rate(struct clk_hw *hw,
245 						  unsigned long parent_rate)
246 {
247 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
248 	u32 mdiv, pdiv, sdiv, kdiv, pll_div_ctl0, pll_div_ctl1;
249 
250 	pll_div_ctl0 = readl_relaxed(pll->base + DIV_CTL0);
251 	mdiv = FIELD_GET(MDIV_MASK, pll_div_ctl0);
252 	pdiv = FIELD_GET(PDIV_MASK, pll_div_ctl0);
253 	sdiv = FIELD_GET(SDIV_MASK, pll_div_ctl0);
254 
255 	if (pll->type == PLL_1443X) {
256 		pll_div_ctl1 = readl_relaxed(pll->base + DIV_CTL1);
257 		kdiv = (s16)FIELD_GET(KDIV_MASK, pll_div_ctl1);
258 	} else {
259 		kdiv = 0;
260 	}
261 
262 	return pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, kdiv, parent_rate);
263 }
264 
265 static inline bool clk_pll14xx_mp_change(const struct imx_pll14xx_rate_table *rate,
266 					  u32 pll_div)
267 {
268 	u32 old_mdiv, old_pdiv;
269 
270 	old_mdiv = FIELD_GET(MDIV_MASK, pll_div);
271 	old_pdiv = FIELD_GET(PDIV_MASK, pll_div);
272 
273 	return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv;
274 }
275 
276 static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll)
277 {
278 	u32 val;
279 
280 	return readl_poll_timeout(pll->base + GNRL_CTL, val, val & LOCK_STATUS, 0,
281 			LOCK_TIMEOUT_US);
282 }
283 
284 static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
285 				 unsigned long prate)
286 {
287 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
288 	const struct imx_pll14xx_rate_table *rate;
289 	u32 tmp, div_val;
290 	int ret;
291 
292 	rate = imx_get_pll_settings(pll, drate);
293 	if (!rate) {
294 		pr_err("Invalid rate %lu for pll clk %s\n", drate,
295 		       clk_hw_get_name(hw));
296 		return -EINVAL;
297 	}
298 
299 	tmp = readl_relaxed(pll->base + DIV_CTL0);
300 
301 	if (!clk_pll14xx_mp_change(rate, tmp)) {
302 		tmp &= ~SDIV_MASK;
303 		tmp |= FIELD_PREP(SDIV_MASK, rate->sdiv);
304 		writel_relaxed(tmp, pll->base + DIV_CTL0);
305 
306 		return 0;
307 	}
308 
309 	/* Bypass clock and set lock to pll output lock */
310 	tmp = readl_relaxed(pll->base + GNRL_CTL);
311 	tmp |= LOCK_SEL_MASK;
312 	writel_relaxed(tmp, pll->base + GNRL_CTL);
313 
314 	/* Enable RST */
315 	tmp &= ~RST_MASK;
316 	writel_relaxed(tmp, pll->base + GNRL_CTL);
317 
318 	/* Enable BYPASS */
319 	tmp |= BYPASS_MASK;
320 	writel(tmp, pll->base + GNRL_CTL);
321 
322 	div_val = FIELD_PREP(MDIV_MASK, rate->mdiv) | FIELD_PREP(PDIV_MASK, rate->pdiv) |
323 		FIELD_PREP(SDIV_MASK, rate->sdiv);
324 	writel_relaxed(div_val, pll->base + DIV_CTL0);
325 
326 	/*
327 	 * According to SPEC, t3 - t2 need to be greater than
328 	 * 1us and 1/FREF, respectively.
329 	 * FREF is FIN / Prediv, the prediv is [1, 63], so choose
330 	 * 3us.
331 	 */
332 	udelay(3);
333 
334 	/* Disable RST */
335 	tmp |= RST_MASK;
336 	writel_relaxed(tmp, pll->base + GNRL_CTL);
337 
338 	/* Wait Lock */
339 	ret = clk_pll14xx_wait_lock(pll);
340 	if (ret)
341 		return ret;
342 
343 	/* Bypass */
344 	tmp &= ~BYPASS_MASK;
345 	writel_relaxed(tmp, pll->base + GNRL_CTL);
346 
347 	return 0;
348 }
349 
350 static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
351 				 unsigned long prate)
352 {
353 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
354 	struct imx_pll14xx_rate_table rate;
355 	u32 gnrl_ctl, div_ctl0;
356 	int ret;
357 
358 	imx_pll14xx_calc_settings(pll, drate, prate, &rate);
359 
360 	div_ctl0 = readl_relaxed(pll->base + DIV_CTL0);
361 
362 	if (!clk_pll14xx_mp_change(&rate, div_ctl0)) {
363 		/* only sdiv and/or kdiv changed - no need to RESET PLL */
364 		div_ctl0 &= ~SDIV_MASK;
365 		div_ctl0 |= FIELD_PREP(SDIV_MASK, rate.sdiv);
366 		writel_relaxed(div_ctl0, pll->base + DIV_CTL0);
367 
368 		writel_relaxed(FIELD_PREP(KDIV_MASK, rate.kdiv),
369 			       pll->base + DIV_CTL1);
370 
371 		return 0;
372 	}
373 
374 	/* Enable RST */
375 	gnrl_ctl = readl_relaxed(pll->base + GNRL_CTL);
376 	gnrl_ctl &= ~RST_MASK;
377 	writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL);
378 
379 	/* Enable BYPASS */
380 	gnrl_ctl |= BYPASS_MASK;
381 	writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL);
382 
383 	div_ctl0 = FIELD_PREP(MDIV_MASK, rate.mdiv) |
384 		   FIELD_PREP(PDIV_MASK, rate.pdiv) |
385 		   FIELD_PREP(SDIV_MASK, rate.sdiv);
386 	writel_relaxed(div_ctl0, pll->base + DIV_CTL0);
387 
388 	writel_relaxed(FIELD_PREP(KDIV_MASK, rate.kdiv), pll->base + DIV_CTL1);
389 
390 	/*
391 	 * According to SPEC, t3 - t2 need to be greater than
392 	 * 1us and 1/FREF, respectively.
393 	 * FREF is FIN / Prediv, the prediv is [1, 63], so choose
394 	 * 3us.
395 	 */
396 	udelay(3);
397 
398 	/* Disable RST */
399 	gnrl_ctl |= RST_MASK;
400 	writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL);
401 
402 	/* Wait Lock*/
403 	ret = clk_pll14xx_wait_lock(pll);
404 	if (ret)
405 		return ret;
406 
407 	/* Bypass */
408 	gnrl_ctl &= ~BYPASS_MASK;
409 	writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL);
410 
411 	return 0;
412 }
413 
414 static int clk_pll14xx_prepare(struct clk_hw *hw)
415 {
416 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
417 	u32 val;
418 	int ret;
419 
420 	/*
421 	 * RESETB = 1 from 0, PLL starts its normal
422 	 * operation after lock time
423 	 */
424 	val = readl_relaxed(pll->base + GNRL_CTL);
425 	if (val & RST_MASK)
426 		return 0;
427 	val |= BYPASS_MASK;
428 	writel_relaxed(val, pll->base + GNRL_CTL);
429 	val |= RST_MASK;
430 	writel_relaxed(val, pll->base + GNRL_CTL);
431 
432 	ret = clk_pll14xx_wait_lock(pll);
433 	if (ret)
434 		return ret;
435 
436 	val &= ~BYPASS_MASK;
437 	writel_relaxed(val, pll->base + GNRL_CTL);
438 
439 	return 0;
440 }
441 
442 static int clk_pll14xx_is_prepared(struct clk_hw *hw)
443 {
444 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
445 	u32 val;
446 
447 	val = readl_relaxed(pll->base + GNRL_CTL);
448 
449 	return (val & RST_MASK) ? 1 : 0;
450 }
451 
452 static void clk_pll14xx_unprepare(struct clk_hw *hw)
453 {
454 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
455 	u32 val;
456 
457 	/*
458 	 * Set RST to 0, power down mode is enabled and
459 	 * every digital block is reset
460 	 */
461 	val = readl_relaxed(pll->base + GNRL_CTL);
462 	val &= ~RST_MASK;
463 	writel_relaxed(val, pll->base + GNRL_CTL);
464 }
465 
466 static const struct clk_ops clk_pll1416x_ops = {
467 	.prepare	= clk_pll14xx_prepare,
468 	.unprepare	= clk_pll14xx_unprepare,
469 	.is_prepared	= clk_pll14xx_is_prepared,
470 	.recalc_rate	= clk_pll14xx_recalc_rate,
471 	.round_rate	= clk_pll1416x_round_rate,
472 	.set_rate	= clk_pll1416x_set_rate,
473 };
474 
475 static const struct clk_ops clk_pll1416x_min_ops = {
476 	.recalc_rate	= clk_pll14xx_recalc_rate,
477 };
478 
479 static const struct clk_ops clk_pll1443x_ops = {
480 	.prepare	= clk_pll14xx_prepare,
481 	.unprepare	= clk_pll14xx_unprepare,
482 	.is_prepared	= clk_pll14xx_is_prepared,
483 	.recalc_rate	= clk_pll14xx_recalc_rate,
484 	.round_rate	= clk_pll1443x_round_rate,
485 	.set_rate	= clk_pll1443x_set_rate,
486 };
487 
488 struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
489 				const char *parent_name, void __iomem *base,
490 				const struct imx_pll14xx_clk *pll_clk)
491 {
492 	struct clk_pll14xx *pll;
493 	struct clk_hw *hw;
494 	struct clk_init_data init;
495 	int ret;
496 	u32 val;
497 
498 	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
499 	if (!pll)
500 		return ERR_PTR(-ENOMEM);
501 
502 	init.name = name;
503 	init.flags = pll_clk->flags;
504 	init.parent_names = &parent_name;
505 	init.num_parents = 1;
506 
507 	switch (pll_clk->type) {
508 	case PLL_1416X:
509 		if (!pll_clk->rate_table)
510 			init.ops = &clk_pll1416x_min_ops;
511 		else
512 			init.ops = &clk_pll1416x_ops;
513 		break;
514 	case PLL_1443X:
515 		init.ops = &clk_pll1443x_ops;
516 		break;
517 	default:
518 		pr_err("Unknown pll type for pll clk %s\n", name);
519 		kfree(pll);
520 		return ERR_PTR(-EINVAL);
521 	}
522 
523 	pll->base = base;
524 	pll->hw.init = &init;
525 	pll->type = pll_clk->type;
526 	pll->rate_table = pll_clk->rate_table;
527 	pll->rate_count = pll_clk->rate_count;
528 
529 	val = readl_relaxed(pll->base + GNRL_CTL);
530 	val &= ~BYPASS_MASK;
531 	writel_relaxed(val, pll->base + GNRL_CTL);
532 
533 	hw = &pll->hw;
534 
535 	ret = clk_hw_register(dev, hw);
536 	if (ret) {
537 		pr_err("failed to register pll %s %d\n", name, ret);
538 		kfree(pll);
539 		return ERR_PTR(ret);
540 	}
541 
542 	return hw;
543 }
544 EXPORT_SYMBOL_GPL(imx_dev_clk_hw_pll14xx);
545