1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2017-2018 NXP. 4 */ 5 6 #define pr_fmt(fmt) "pll14xx: " fmt 7 8 #include <linux/bitfield.h> 9 #include <linux/bits.h> 10 #include <linux/clk-provider.h> 11 #include <linux/err.h> 12 #include <linux/export.h> 13 #include <linux/io.h> 14 #include <linux/iopoll.h> 15 #include <linux/slab.h> 16 #include <linux/jiffies.h> 17 18 #include "clk.h" 19 20 #define GNRL_CTL 0x0 21 #define DIV_CTL0 0x4 22 #define DIV_CTL1 0x8 23 #define LOCK_STATUS BIT(31) 24 #define LOCK_SEL_MASK BIT(29) 25 #define CLKE_MASK BIT(11) 26 #define RST_MASK BIT(9) 27 #define BYPASS_MASK BIT(4) 28 #define MDIV_MASK GENMASK(21, 12) 29 #define PDIV_MASK GENMASK(9, 4) 30 #define SDIV_MASK GENMASK(2, 0) 31 #define KDIV_MASK GENMASK(15, 0) 32 #define KDIV_MIN SHRT_MIN 33 #define KDIV_MAX SHRT_MAX 34 35 #define LOCK_TIMEOUT_US 10000 36 37 struct clk_pll14xx { 38 struct clk_hw hw; 39 void __iomem *base; 40 enum imx_pll14xx_type type; 41 const struct imx_pll14xx_rate_table *rate_table; 42 int rate_count; 43 }; 44 45 #define to_clk_pll14xx(_hw) container_of(_hw, struct clk_pll14xx, hw) 46 47 static const struct imx_pll14xx_rate_table imx_pll1416x_tbl[] = { 48 PLL_1416X_RATE(1800000000U, 225, 3, 0), 49 PLL_1416X_RATE(1600000000U, 200, 3, 0), 50 PLL_1416X_RATE(1500000000U, 375, 3, 1), 51 PLL_1416X_RATE(1400000000U, 350, 3, 1), 52 PLL_1416X_RATE(1200000000U, 300, 3, 1), 53 PLL_1416X_RATE(1000000000U, 250, 3, 1), 54 PLL_1416X_RATE(800000000U, 200, 3, 1), 55 PLL_1416X_RATE(750000000U, 250, 2, 2), 56 PLL_1416X_RATE(700000000U, 350, 3, 2), 57 PLL_1416X_RATE(640000000U, 320, 3, 2), 58 PLL_1416X_RATE(600000000U, 300, 3, 2), 59 PLL_1416X_RATE(416000000U, 208, 3, 2), 60 PLL_1416X_RATE(320000000U, 160, 3, 2), 61 PLL_1416X_RATE(208000000U, 208, 3, 3), 62 }; 63 64 static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = { 65 PLL_1443X_RATE(1039500000U, 173, 2, 1, 16384), 66 PLL_1443X_RATE(650000000U, 325, 3, 2, 0), 67 PLL_1443X_RATE(594000000U, 198, 2, 2, 0), 68 PLL_1443X_RATE(519750000U, 173, 2, 2, 16384), 69 }; 70 71 struct imx_pll14xx_clk imx_1443x_pll = { 72 .type = PLL_1443X, 73 .rate_table = imx_pll1443x_tbl, 74 .rate_count = ARRAY_SIZE(imx_pll1443x_tbl), 75 }; 76 EXPORT_SYMBOL_GPL(imx_1443x_pll); 77 78 struct imx_pll14xx_clk imx_1443x_dram_pll = { 79 .type = PLL_1443X, 80 .rate_table = imx_pll1443x_tbl, 81 .rate_count = ARRAY_SIZE(imx_pll1443x_tbl), 82 .flags = CLK_GET_RATE_NOCACHE, 83 }; 84 EXPORT_SYMBOL_GPL(imx_1443x_dram_pll); 85 86 struct imx_pll14xx_clk imx_1416x_pll = { 87 .type = PLL_1416X, 88 .rate_table = imx_pll1416x_tbl, 89 .rate_count = ARRAY_SIZE(imx_pll1416x_tbl), 90 }; 91 EXPORT_SYMBOL_GPL(imx_1416x_pll); 92 93 static const struct imx_pll14xx_rate_table *imx_get_pll_settings( 94 struct clk_pll14xx *pll, unsigned long rate) 95 { 96 const struct imx_pll14xx_rate_table *rate_table = pll->rate_table; 97 int i; 98 99 for (i = 0; i < pll->rate_count; i++) 100 if (rate == rate_table[i].rate) 101 return &rate_table[i]; 102 103 return NULL; 104 } 105 106 static long pll14xx_calc_rate(struct clk_pll14xx *pll, int mdiv, int pdiv, 107 int sdiv, int kdiv, unsigned long prate) 108 { 109 u64 fout = prate; 110 111 /* fout = (m * 65536 + k) * Fin / (p * 65536) / (1 << sdiv) */ 112 fout *= (mdiv * 65536 + kdiv); 113 pdiv *= 65536; 114 115 do_div(fout, pdiv << sdiv); 116 117 return fout; 118 } 119 120 static long pll1443x_calc_kdiv(int mdiv, int pdiv, int sdiv, 121 unsigned long rate, unsigned long prate) 122 { 123 long kdiv; 124 125 /* calc kdiv = round(rate * pdiv * 65536 * 2^sdiv / prate) - (mdiv * 65536) */ 126 kdiv = ((rate * ((pdiv * 65536) << sdiv) + prate / 2) / prate) - (mdiv * 65536); 127 128 return clamp_t(short, kdiv, KDIV_MIN, KDIV_MAX); 129 } 130 131 static void imx_pll14xx_calc_settings(struct clk_pll14xx *pll, unsigned long rate, 132 unsigned long prate, struct imx_pll14xx_rate_table *t) 133 { 134 u32 pll_div_ctl0, pll_div_ctl1; 135 int mdiv, pdiv, sdiv, kdiv; 136 long fout, rate_min, rate_max, dist, best = LONG_MAX; 137 const struct imx_pll14xx_rate_table *tt; 138 139 /* 140 * Fractional PLL constrains: 141 * 142 * a) 1 <= p <= 63 143 * b) 64 <= m <= 1023 144 * c) 0 <= s <= 6 145 * d) -32768 <= k <= 32767 146 * 147 * fvco = (m * 65536 + k) * prate / (p * 65536) 148 * fout = (m * 65536 + k) * prate / (p * 65536) / (1 << sdiv) 149 */ 150 151 /* First try if we can get the desired rate from one of the static entries */ 152 tt = imx_get_pll_settings(pll, rate); 153 if (tt) { 154 pr_debug("%s: in=%ld, want=%ld, Using PLL setting from table\n", 155 clk_hw_get_name(&pll->hw), prate, rate); 156 t->rate = tt->rate; 157 t->mdiv = tt->mdiv; 158 t->pdiv = tt->pdiv; 159 t->sdiv = tt->sdiv; 160 t->kdiv = tt->kdiv; 161 return; 162 } 163 164 pll_div_ctl0 = readl_relaxed(pll->base + DIV_CTL0); 165 mdiv = FIELD_GET(MDIV_MASK, pll_div_ctl0); 166 pdiv = FIELD_GET(PDIV_MASK, pll_div_ctl0); 167 sdiv = FIELD_GET(SDIV_MASK, pll_div_ctl0); 168 pll_div_ctl1 = readl_relaxed(pll->base + DIV_CTL1); 169 170 /* Then see if we can get the desired rate by only adjusting kdiv (glitch free) */ 171 rate_min = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, KDIV_MIN, prate); 172 rate_max = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, KDIV_MAX, prate); 173 174 if (rate >= rate_min && rate <= rate_max) { 175 kdiv = pll1443x_calc_kdiv(mdiv, pdiv, sdiv, rate, prate); 176 pr_debug("%s: in=%ld, want=%ld Only adjust kdiv %ld -> %d\n", 177 clk_hw_get_name(&pll->hw), prate, rate, 178 FIELD_GET(KDIV_MASK, pll_div_ctl1), kdiv); 179 fout = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, kdiv, prate); 180 t->rate = (unsigned int)fout; 181 t->mdiv = mdiv; 182 t->pdiv = pdiv; 183 t->sdiv = sdiv; 184 t->kdiv = kdiv; 185 return; 186 } 187 188 /* Finally calculate best values */ 189 for (pdiv = 1; pdiv <= 63; pdiv++) { 190 for (sdiv = 0; sdiv <= 6; sdiv++) { 191 /* calc mdiv = round(rate * pdiv * 2^sdiv) / prate) */ 192 mdiv = DIV_ROUND_CLOSEST(rate * (pdiv << sdiv), prate); 193 mdiv = clamp(mdiv, 64, 1023); 194 195 kdiv = pll1443x_calc_kdiv(mdiv, pdiv, sdiv, rate, prate); 196 fout = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, kdiv, prate); 197 198 /* best match */ 199 dist = abs((long)rate - (long)fout); 200 if (dist < best) { 201 best = dist; 202 t->rate = (unsigned int)fout; 203 t->mdiv = mdiv; 204 t->pdiv = pdiv; 205 t->sdiv = sdiv; 206 t->kdiv = kdiv; 207 208 if (!dist) 209 goto found; 210 } 211 } 212 } 213 found: 214 pr_debug("%s: in=%ld, want=%ld got=%d (pdiv=%d sdiv=%d mdiv=%d kdiv=%d)\n", 215 clk_hw_get_name(&pll->hw), prate, rate, t->rate, t->pdiv, t->sdiv, 216 t->mdiv, t->kdiv); 217 } 218 219 static int clk_pll1416x_determine_rate(struct clk_hw *hw, 220 struct clk_rate_request *req) 221 { 222 struct clk_pll14xx *pll = to_clk_pll14xx(hw); 223 const struct imx_pll14xx_rate_table *rate_table = pll->rate_table; 224 int i; 225 226 /* Assuming rate_table is in descending order */ 227 for (i = 0; i < pll->rate_count; i++) 228 if (req->rate >= rate_table[i].rate) { 229 req->rate = rate_table[i].rate; 230 231 return 0; 232 } 233 234 /* return minimum supported value */ 235 req->rate = rate_table[pll->rate_count - 1].rate; 236 237 return 0; 238 } 239 240 static int clk_pll1443x_determine_rate(struct clk_hw *hw, 241 struct clk_rate_request *req) 242 { 243 struct clk_pll14xx *pll = to_clk_pll14xx(hw); 244 struct imx_pll14xx_rate_table t; 245 246 imx_pll14xx_calc_settings(pll, req->rate, req->best_parent_rate, &t); 247 248 req->rate = t.rate; 249 250 return 0; 251 } 252 253 static unsigned long clk_pll14xx_recalc_rate(struct clk_hw *hw, 254 unsigned long parent_rate) 255 { 256 struct clk_pll14xx *pll = to_clk_pll14xx(hw); 257 u32 mdiv, pdiv, sdiv, kdiv, pll_div_ctl0, pll_div_ctl1; 258 259 pll_div_ctl0 = readl_relaxed(pll->base + DIV_CTL0); 260 mdiv = FIELD_GET(MDIV_MASK, pll_div_ctl0); 261 pdiv = FIELD_GET(PDIV_MASK, pll_div_ctl0); 262 sdiv = FIELD_GET(SDIV_MASK, pll_div_ctl0); 263 264 if (pll->type == PLL_1443X) { 265 pll_div_ctl1 = readl_relaxed(pll->base + DIV_CTL1); 266 kdiv = (s16)FIELD_GET(KDIV_MASK, pll_div_ctl1); 267 } else { 268 kdiv = 0; 269 } 270 271 return pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, kdiv, parent_rate); 272 } 273 274 static inline bool clk_pll14xx_mp_change(const struct imx_pll14xx_rate_table *rate, 275 u32 pll_div) 276 { 277 u32 old_mdiv, old_pdiv; 278 279 old_mdiv = FIELD_GET(MDIV_MASK, pll_div); 280 old_pdiv = FIELD_GET(PDIV_MASK, pll_div); 281 282 return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv; 283 } 284 285 static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll) 286 { 287 u32 val; 288 289 return readl_poll_timeout(pll->base + GNRL_CTL, val, val & LOCK_STATUS, 0, 290 LOCK_TIMEOUT_US); 291 } 292 293 static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate, 294 unsigned long prate) 295 { 296 struct clk_pll14xx *pll = to_clk_pll14xx(hw); 297 const struct imx_pll14xx_rate_table *rate; 298 u32 tmp, div_val; 299 int ret; 300 301 rate = imx_get_pll_settings(pll, drate); 302 if (!rate) { 303 pr_err("Invalid rate %lu for pll clk %s\n", drate, 304 clk_hw_get_name(hw)); 305 return -EINVAL; 306 } 307 308 tmp = readl_relaxed(pll->base + DIV_CTL0); 309 310 if (!clk_pll14xx_mp_change(rate, tmp)) { 311 tmp &= ~SDIV_MASK; 312 tmp |= FIELD_PREP(SDIV_MASK, rate->sdiv); 313 writel_relaxed(tmp, pll->base + DIV_CTL0); 314 315 return 0; 316 } 317 318 /* Bypass clock and set lock to pll output lock */ 319 tmp = readl_relaxed(pll->base + GNRL_CTL); 320 tmp |= LOCK_SEL_MASK; 321 writel_relaxed(tmp, pll->base + GNRL_CTL); 322 323 /* Enable RST */ 324 tmp &= ~RST_MASK; 325 writel_relaxed(tmp, pll->base + GNRL_CTL); 326 327 /* Enable BYPASS */ 328 tmp |= BYPASS_MASK; 329 writel(tmp, pll->base + GNRL_CTL); 330 331 div_val = FIELD_PREP(MDIV_MASK, rate->mdiv) | FIELD_PREP(PDIV_MASK, rate->pdiv) | 332 FIELD_PREP(SDIV_MASK, rate->sdiv); 333 writel_relaxed(div_val, pll->base + DIV_CTL0); 334 335 /* 336 * According to SPEC, t3 - t2 need to be greater than 337 * 1us and 1/FREF, respectively. 338 * FREF is FIN / Prediv, the prediv is [1, 63], so choose 339 * 3us. 340 */ 341 udelay(3); 342 343 /* Disable RST */ 344 tmp |= RST_MASK; 345 writel_relaxed(tmp, pll->base + GNRL_CTL); 346 347 /* Wait Lock */ 348 ret = clk_pll14xx_wait_lock(pll); 349 if (ret) 350 return ret; 351 352 /* Bypass */ 353 tmp &= ~BYPASS_MASK; 354 writel_relaxed(tmp, pll->base + GNRL_CTL); 355 356 return 0; 357 } 358 359 static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate, 360 unsigned long prate) 361 { 362 struct clk_pll14xx *pll = to_clk_pll14xx(hw); 363 struct imx_pll14xx_rate_table rate; 364 u32 gnrl_ctl, div_ctl0; 365 int ret; 366 367 imx_pll14xx_calc_settings(pll, drate, prate, &rate); 368 369 div_ctl0 = readl_relaxed(pll->base + DIV_CTL0); 370 371 if (!clk_pll14xx_mp_change(&rate, div_ctl0)) { 372 /* only sdiv and/or kdiv changed - no need to RESET PLL */ 373 div_ctl0 &= ~SDIV_MASK; 374 div_ctl0 |= FIELD_PREP(SDIV_MASK, rate.sdiv); 375 writel_relaxed(div_ctl0, pll->base + DIV_CTL0); 376 377 writel_relaxed(FIELD_PREP(KDIV_MASK, rate.kdiv), 378 pll->base + DIV_CTL1); 379 380 return 0; 381 } 382 383 /* Enable RST */ 384 gnrl_ctl = readl_relaxed(pll->base + GNRL_CTL); 385 gnrl_ctl &= ~RST_MASK; 386 writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL); 387 388 /* Enable BYPASS */ 389 gnrl_ctl |= BYPASS_MASK; 390 writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL); 391 392 div_ctl0 = FIELD_PREP(MDIV_MASK, rate.mdiv) | 393 FIELD_PREP(PDIV_MASK, rate.pdiv) | 394 FIELD_PREP(SDIV_MASK, rate.sdiv); 395 writel_relaxed(div_ctl0, pll->base + DIV_CTL0); 396 397 writel_relaxed(FIELD_PREP(KDIV_MASK, rate.kdiv), pll->base + DIV_CTL1); 398 399 /* 400 * According to SPEC, t3 - t2 need to be greater than 401 * 1us and 1/FREF, respectively. 402 * FREF is FIN / Prediv, the prediv is [1, 63], so choose 403 * 3us. 404 */ 405 udelay(3); 406 407 /* Disable RST */ 408 gnrl_ctl |= RST_MASK; 409 writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL); 410 411 /* Wait Lock*/ 412 ret = clk_pll14xx_wait_lock(pll); 413 if (ret) 414 return ret; 415 416 /* Bypass */ 417 gnrl_ctl &= ~BYPASS_MASK; 418 writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL); 419 420 return 0; 421 } 422 423 static int clk_pll14xx_prepare(struct clk_hw *hw) 424 { 425 struct clk_pll14xx *pll = to_clk_pll14xx(hw); 426 u32 val; 427 int ret; 428 429 /* 430 * RESETB = 1 from 0, PLL starts its normal 431 * operation after lock time 432 */ 433 val = readl_relaxed(pll->base + GNRL_CTL); 434 if (val & RST_MASK) 435 return 0; 436 val |= BYPASS_MASK; 437 writel_relaxed(val, pll->base + GNRL_CTL); 438 val |= RST_MASK; 439 writel_relaxed(val, pll->base + GNRL_CTL); 440 441 ret = clk_pll14xx_wait_lock(pll); 442 if (ret) 443 return ret; 444 445 val &= ~BYPASS_MASK; 446 writel_relaxed(val, pll->base + GNRL_CTL); 447 448 return 0; 449 } 450 451 static int clk_pll14xx_is_prepared(struct clk_hw *hw) 452 { 453 struct clk_pll14xx *pll = to_clk_pll14xx(hw); 454 u32 val; 455 456 val = readl_relaxed(pll->base + GNRL_CTL); 457 458 return (val & RST_MASK) ? 1 : 0; 459 } 460 461 static void clk_pll14xx_unprepare(struct clk_hw *hw) 462 { 463 struct clk_pll14xx *pll = to_clk_pll14xx(hw); 464 u32 val; 465 466 /* 467 * Set RST to 0, power down mode is enabled and 468 * every digital block is reset 469 */ 470 val = readl_relaxed(pll->base + GNRL_CTL); 471 val &= ~RST_MASK; 472 writel_relaxed(val, pll->base + GNRL_CTL); 473 } 474 475 static const struct clk_ops clk_pll1416x_ops = { 476 .prepare = clk_pll14xx_prepare, 477 .unprepare = clk_pll14xx_unprepare, 478 .is_prepared = clk_pll14xx_is_prepared, 479 .recalc_rate = clk_pll14xx_recalc_rate, 480 .determine_rate = clk_pll1416x_determine_rate, 481 .set_rate = clk_pll1416x_set_rate, 482 }; 483 484 static const struct clk_ops clk_pll1416x_min_ops = { 485 .recalc_rate = clk_pll14xx_recalc_rate, 486 }; 487 488 static const struct clk_ops clk_pll1443x_ops = { 489 .prepare = clk_pll14xx_prepare, 490 .unprepare = clk_pll14xx_unprepare, 491 .is_prepared = clk_pll14xx_is_prepared, 492 .recalc_rate = clk_pll14xx_recalc_rate, 493 .determine_rate = clk_pll1443x_determine_rate, 494 .set_rate = clk_pll1443x_set_rate, 495 }; 496 497 struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name, 498 const char *parent_name, void __iomem *base, 499 const struct imx_pll14xx_clk *pll_clk) 500 { 501 struct clk_pll14xx *pll; 502 struct clk_hw *hw; 503 struct clk_init_data init; 504 int ret; 505 u32 val; 506 507 pll = kzalloc(sizeof(*pll), GFP_KERNEL); 508 if (!pll) 509 return ERR_PTR(-ENOMEM); 510 511 init.name = name; 512 init.flags = pll_clk->flags; 513 init.parent_names = &parent_name; 514 init.num_parents = 1; 515 516 switch (pll_clk->type) { 517 case PLL_1416X: 518 if (!pll_clk->rate_table) 519 init.ops = &clk_pll1416x_min_ops; 520 else 521 init.ops = &clk_pll1416x_ops; 522 break; 523 case PLL_1443X: 524 init.ops = &clk_pll1443x_ops; 525 break; 526 default: 527 pr_err("Unknown pll type for pll clk %s\n", name); 528 kfree(pll); 529 return ERR_PTR(-EINVAL); 530 } 531 532 pll->base = base; 533 pll->hw.init = &init; 534 pll->type = pll_clk->type; 535 pll->rate_table = pll_clk->rate_table; 536 pll->rate_count = pll_clk->rate_count; 537 538 val = readl_relaxed(pll->base + GNRL_CTL); 539 val &= ~BYPASS_MASK; 540 writel_relaxed(val, pll->base + GNRL_CTL); 541 542 hw = &pll->hw; 543 544 ret = clk_hw_register(dev, hw); 545 if (ret) { 546 pr_err("failed to register pll %s %d\n", name, ret); 547 kfree(pll); 548 return ERR_PTR(ret); 549 } 550 551 return hw; 552 } 553 EXPORT_SYMBOL_GPL(imx_dev_clk_hw_pll14xx); 554