1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2014 STMicroelectronics R&D Ltd 4 */ 5 6 /* 7 * Authors: 8 * Stephen Gallimore <stephen.gallimore@st.com>, 9 * Pankaj Dev <pankaj.dev@st.com>. 10 */ 11 12 #include <linux/slab.h> 13 #include <linux/of_address.h> 14 #include <linux/clk.h> 15 #include <linux/clk-provider.h> 16 17 #include "clkgen.h" 18 19 /* 20 * Maximum input clock to the PLL before we divide it down by 2 21 * although in reality in actual systems this has never been seen to 22 * be used. 23 */ 24 #define QUADFS_NDIV_THRESHOLD 30000000 25 26 #define PLL_BW_GOODREF (0L) 27 #define PLL_BW_VBADREF (1L) 28 #define PLL_BW_BADREF (2L) 29 #define PLL_BW_VGOODREF (3L) 30 31 #define QUADFS_MAX_CHAN 4 32 33 struct stm_fs { 34 unsigned long ndiv; 35 unsigned long mdiv; 36 unsigned long pe; 37 unsigned long sdiv; 38 unsigned long nsdiv; 39 }; 40 41 struct clkgen_quadfs_data { 42 bool reset_present; 43 bool bwfilter_present; 44 bool lockstatus_present; 45 bool powerup_polarity; 46 bool standby_polarity; 47 bool nsdiv_present; 48 bool nrst_present; 49 struct clkgen_field ndiv; 50 struct clkgen_field ref_bw; 51 struct clkgen_field nreset; 52 struct clkgen_field npda; 53 struct clkgen_field lock_status; 54 55 struct clkgen_field nrst[QUADFS_MAX_CHAN]; 56 struct clkgen_field nsb[QUADFS_MAX_CHAN]; 57 struct clkgen_field en[QUADFS_MAX_CHAN]; 58 struct clkgen_field mdiv[QUADFS_MAX_CHAN]; 59 struct clkgen_field pe[QUADFS_MAX_CHAN]; 60 struct clkgen_field sdiv[QUADFS_MAX_CHAN]; 61 struct clkgen_field nsdiv[QUADFS_MAX_CHAN]; 62 63 const struct clk_ops *pll_ops; 64 int (*get_params)(unsigned long, unsigned long, struct stm_fs *); 65 int (*get_rate)(unsigned long , const struct stm_fs *, 66 unsigned long *); 67 }; 68 69 struct clkgen_clk_out { 70 const char *name; 71 unsigned long flags; 72 }; 73 74 struct clkgen_quadfs_data_clks { 75 struct clkgen_quadfs_data *data; 76 const struct clkgen_clk_out *outputs; 77 }; 78 79 static const struct clk_ops st_quadfs_pll_c32_ops; 80 81 static int clk_fs660c32_dig_get_params(unsigned long input, 82 unsigned long output, struct stm_fs *fs); 83 static int clk_fs660c32_dig_get_rate(unsigned long, const struct stm_fs *, 84 unsigned long *); 85 86 static const struct clkgen_quadfs_data st_fs660c32_C = { 87 .nrst_present = true, 88 .nrst = { CLKGEN_FIELD(0x2f0, 0x1, 0), 89 CLKGEN_FIELD(0x2f0, 0x1, 1), 90 CLKGEN_FIELD(0x2f0, 0x1, 2), 91 CLKGEN_FIELD(0x2f0, 0x1, 3) }, 92 .npda = CLKGEN_FIELD(0x2f0, 0x1, 12), 93 .nsb = { CLKGEN_FIELD(0x2f0, 0x1, 8), 94 CLKGEN_FIELD(0x2f0, 0x1, 9), 95 CLKGEN_FIELD(0x2f0, 0x1, 10), 96 CLKGEN_FIELD(0x2f0, 0x1, 11) }, 97 .nsdiv_present = true, 98 .nsdiv = { CLKGEN_FIELD(0x304, 0x1, 24), 99 CLKGEN_FIELD(0x308, 0x1, 24), 100 CLKGEN_FIELD(0x30c, 0x1, 24), 101 CLKGEN_FIELD(0x310, 0x1, 24) }, 102 .mdiv = { CLKGEN_FIELD(0x304, 0x1f, 15), 103 CLKGEN_FIELD(0x308, 0x1f, 15), 104 CLKGEN_FIELD(0x30c, 0x1f, 15), 105 CLKGEN_FIELD(0x310, 0x1f, 15) }, 106 .en = { CLKGEN_FIELD(0x2fc, 0x1, 0), 107 CLKGEN_FIELD(0x2fc, 0x1, 1), 108 CLKGEN_FIELD(0x2fc, 0x1, 2), 109 CLKGEN_FIELD(0x2fc, 0x1, 3) }, 110 .ndiv = CLKGEN_FIELD(0x2f4, 0x7, 16), 111 .pe = { CLKGEN_FIELD(0x304, 0x7fff, 0), 112 CLKGEN_FIELD(0x308, 0x7fff, 0), 113 CLKGEN_FIELD(0x30c, 0x7fff, 0), 114 CLKGEN_FIELD(0x310, 0x7fff, 0) }, 115 .sdiv = { CLKGEN_FIELD(0x304, 0xf, 20), 116 CLKGEN_FIELD(0x308, 0xf, 20), 117 CLKGEN_FIELD(0x30c, 0xf, 20), 118 CLKGEN_FIELD(0x310, 0xf, 20) }, 119 .lockstatus_present = true, 120 .lock_status = CLKGEN_FIELD(0x2f0, 0x1, 24), 121 .powerup_polarity = 1, 122 .standby_polarity = 1, 123 .pll_ops = &st_quadfs_pll_c32_ops, 124 .get_params = clk_fs660c32_dig_get_params, 125 .get_rate = clk_fs660c32_dig_get_rate, 126 }; 127 128 static const struct clkgen_clk_out st_fs660c32_C_clks[] = { 129 { .name = "clk-s-c0-fs0-ch0", }, 130 { .name = "clk-s-c0-fs0-ch1", }, 131 { .name = "clk-s-c0-fs0-ch2", }, 132 { .name = "clk-s-c0-fs0-ch3", }, 133 }; 134 135 static const struct clkgen_quadfs_data_clks st_fs660c32_C_data = { 136 .data = (struct clkgen_quadfs_data *)&st_fs660c32_C, 137 .outputs = st_fs660c32_C_clks, 138 }; 139 140 static const struct clkgen_quadfs_data st_fs660c32_D = { 141 .nrst_present = true, 142 .nrst = { CLKGEN_FIELD(0x2a0, 0x1, 0), 143 CLKGEN_FIELD(0x2a0, 0x1, 1), 144 CLKGEN_FIELD(0x2a0, 0x1, 2), 145 CLKGEN_FIELD(0x2a0, 0x1, 3) }, 146 .ndiv = CLKGEN_FIELD(0x2a4, 0x7, 16), 147 .pe = { CLKGEN_FIELD(0x2b4, 0x7fff, 0), 148 CLKGEN_FIELD(0x2b8, 0x7fff, 0), 149 CLKGEN_FIELD(0x2bc, 0x7fff, 0), 150 CLKGEN_FIELD(0x2c0, 0x7fff, 0) }, 151 .sdiv = { CLKGEN_FIELD(0x2b4, 0xf, 20), 152 CLKGEN_FIELD(0x2b8, 0xf, 20), 153 CLKGEN_FIELD(0x2bc, 0xf, 20), 154 CLKGEN_FIELD(0x2c0, 0xf, 20) }, 155 .npda = CLKGEN_FIELD(0x2a0, 0x1, 12), 156 .nsb = { CLKGEN_FIELD(0x2a0, 0x1, 8), 157 CLKGEN_FIELD(0x2a0, 0x1, 9), 158 CLKGEN_FIELD(0x2a0, 0x1, 10), 159 CLKGEN_FIELD(0x2a0, 0x1, 11) }, 160 .nsdiv_present = true, 161 .nsdiv = { CLKGEN_FIELD(0x2b4, 0x1, 24), 162 CLKGEN_FIELD(0x2b8, 0x1, 24), 163 CLKGEN_FIELD(0x2bc, 0x1, 24), 164 CLKGEN_FIELD(0x2c0, 0x1, 24) }, 165 .mdiv = { CLKGEN_FIELD(0x2b4, 0x1f, 15), 166 CLKGEN_FIELD(0x2b8, 0x1f, 15), 167 CLKGEN_FIELD(0x2bc, 0x1f, 15), 168 CLKGEN_FIELD(0x2c0, 0x1f, 15) }, 169 .en = { CLKGEN_FIELD(0x2ac, 0x1, 0), 170 CLKGEN_FIELD(0x2ac, 0x1, 1), 171 CLKGEN_FIELD(0x2ac, 0x1, 2), 172 CLKGEN_FIELD(0x2ac, 0x1, 3) }, 173 .lockstatus_present = true, 174 .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24), 175 .powerup_polarity = 1, 176 .standby_polarity = 1, 177 .pll_ops = &st_quadfs_pll_c32_ops, 178 .get_params = clk_fs660c32_dig_get_params, 179 .get_rate = clk_fs660c32_dig_get_rate,}; 180 181 static const struct clkgen_quadfs_data_clks st_fs660c32_D_data = { 182 .data = (struct clkgen_quadfs_data *)&st_fs660c32_D, 183 }; 184 185 static const struct clkgen_clk_out st_fs660c32_D0_clks[] = { 186 { .name = "clk-s-d0-fs0-ch0", }, 187 { .name = "clk-s-d0-fs0-ch1", }, 188 { .name = "clk-s-d0-fs0-ch2", }, 189 { .name = "clk-s-d0-fs0-ch3", }, 190 }; 191 192 static const struct clkgen_quadfs_data_clks st_fs660c32_D0_data = { 193 .data = (struct clkgen_quadfs_data *)&st_fs660c32_D, 194 .outputs = st_fs660c32_D0_clks, 195 }; 196 197 static const struct clkgen_clk_out st_fs660c32_D2_clks[] = { 198 { .name = "clk-s-d2-fs0-ch0", }, 199 { .name = "clk-s-d2-fs0-ch1", }, 200 { .name = "clk-s-d2-fs0-ch2", }, 201 { .name = "clk-s-d2-fs0-ch3", }, 202 }; 203 204 static const struct clkgen_quadfs_data_clks st_fs660c32_D2_data = { 205 .data = (struct clkgen_quadfs_data *)&st_fs660c32_D, 206 .outputs = st_fs660c32_D2_clks, 207 }; 208 209 static const struct clkgen_clk_out st_fs660c32_D3_clks[] = { 210 { .name = "clk-s-d3-fs0-ch0", }, 211 { .name = "clk-s-d3-fs0-ch1", }, 212 { .name = "clk-s-d3-fs0-ch2", }, 213 { .name = "clk-s-d3-fs0-ch3", }, 214 }; 215 216 static const struct clkgen_quadfs_data_clks st_fs660c32_D3_data = { 217 .data = (struct clkgen_quadfs_data *)&st_fs660c32_D, 218 .outputs = st_fs660c32_D3_clks, 219 }; 220 221 /** 222 * DOC: A Frequency Synthesizer that multiples its input clock by a fixed factor 223 * 224 * Traits of this clock: 225 * prepare - clk_(un)prepare only ensures parent is (un)prepared 226 * enable - clk_enable and clk_disable are functional & control the Fsyn 227 * rate - inherits rate from parent. set_rate/round_rate/recalc_rate 228 * parent - fixed parent. No clk_set_parent support 229 */ 230 231 /** 232 * struct st_clk_quadfs_pll - A pll which outputs a fixed multiplier of 233 * its parent clock, found inside a type of 234 * ST quad channel frequency synthesizer block 235 * 236 * @hw: handle between common and hardware-specific interfaces. 237 * @regs_base: base address of the configuration registers. 238 * @lock: spinlock. 239 * @data: local driver data 240 * @ndiv: regmap field for the ndiv control. 241 */ 242 struct st_clk_quadfs_pll { 243 struct clk_hw hw; 244 void __iomem *regs_base; 245 spinlock_t *lock; 246 struct clkgen_quadfs_data *data; 247 u32 ndiv; 248 }; 249 250 #define to_quadfs_pll(_hw) container_of(_hw, struct st_clk_quadfs_pll, hw) 251 252 static int quadfs_pll_enable(struct clk_hw *hw) 253 { 254 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); 255 unsigned long flags = 0, timeout = jiffies + msecs_to_jiffies(10); 256 257 if (pll->lock) 258 spin_lock_irqsave(pll->lock, flags); 259 260 /* 261 * Bring block out of reset if we have reset control. 262 */ 263 if (pll->data->reset_present) 264 CLKGEN_WRITE(pll, nreset, 1); 265 266 /* 267 * Use a fixed input clock noise bandwidth filter for the moment 268 */ 269 if (pll->data->bwfilter_present) 270 CLKGEN_WRITE(pll, ref_bw, PLL_BW_GOODREF); 271 272 273 CLKGEN_WRITE(pll, ndiv, pll->ndiv); 274 275 /* 276 * Power up the PLL 277 */ 278 CLKGEN_WRITE(pll, npda, !pll->data->powerup_polarity); 279 280 if (pll->lock) 281 spin_unlock_irqrestore(pll->lock, flags); 282 283 if (pll->data->lockstatus_present) 284 while (!CLKGEN_READ(pll, lock_status)) { 285 if (time_after(jiffies, timeout)) 286 return -ETIMEDOUT; 287 cpu_relax(); 288 } 289 290 return 0; 291 } 292 293 static void quadfs_pll_disable(struct clk_hw *hw) 294 { 295 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); 296 unsigned long flags = 0; 297 298 if (pll->lock) 299 spin_lock_irqsave(pll->lock, flags); 300 301 /* 302 * Powerdown the PLL and then put block into soft reset if we have 303 * reset control. 304 */ 305 CLKGEN_WRITE(pll, npda, pll->data->powerup_polarity); 306 307 if (pll->data->reset_present) 308 CLKGEN_WRITE(pll, nreset, 0); 309 310 if (pll->lock) 311 spin_unlock_irqrestore(pll->lock, flags); 312 } 313 314 static int quadfs_pll_is_enabled(struct clk_hw *hw) 315 { 316 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); 317 u32 npda = CLKGEN_READ(pll, npda); 318 319 return pll->data->powerup_polarity ? !npda : !!npda; 320 } 321 322 static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs, 323 unsigned long *rate) 324 { 325 unsigned long nd = fs->ndiv + 16; /* ndiv value */ 326 327 *rate = input * nd; 328 329 return 0; 330 } 331 332 static unsigned long quadfs_pll_fs660c32_recalc_rate(struct clk_hw *hw, 333 unsigned long parent_rate) 334 { 335 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); 336 unsigned long rate = 0; 337 struct stm_fs params; 338 339 params.ndiv = CLKGEN_READ(pll, ndiv); 340 if (clk_fs660c32_vco_get_rate(parent_rate, ¶ms, &rate)) 341 pr_err("%s:%s error calculating rate\n", 342 clk_hw_get_name(hw), __func__); 343 344 pll->ndiv = params.ndiv; 345 346 return rate; 347 } 348 349 static int clk_fs660c32_vco_get_params(unsigned long input, 350 unsigned long output, struct stm_fs *fs) 351 { 352 /* Formula 353 VCO frequency = (fin x ndiv) / pdiv 354 ndiv = VCOfreq * pdiv / fin 355 */ 356 unsigned long pdiv = 1, n; 357 358 /* Output clock range: 384Mhz to 660Mhz */ 359 if (output < 384000000 || output > 660000000) 360 return -EINVAL; 361 362 if (input > 40000000) 363 /* This means that PDIV would be 2 instead of 1. 364 Not supported today. */ 365 return -EINVAL; 366 367 input /= 1000; 368 output /= 1000; 369 370 n = output * pdiv / input; 371 if (n < 16) 372 n = 16; 373 fs->ndiv = n - 16; /* Converting formula value to reg value */ 374 375 return 0; 376 } 377 378 static int quadfs_pll_fs660c32_determine_rate(struct clk_hw *hw, 379 struct clk_rate_request *req) 380 { 381 struct stm_fs params; 382 383 if (clk_fs660c32_vco_get_params(req->best_parent_rate, req->rate, ¶ms)) 384 return 0; 385 386 clk_fs660c32_vco_get_rate(req->best_parent_rate, ¶ms, &req->rate); 387 388 pr_debug("%s: %s new rate %ld [ndiv=%u]\n", 389 __func__, clk_hw_get_name(hw), 390 req->rate, (unsigned int)params.ndiv); 391 392 return 0; 393 } 394 395 static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate, 396 unsigned long parent_rate) 397 { 398 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); 399 struct stm_fs params; 400 long hwrate = 0; 401 unsigned long flags = 0; 402 int ret; 403 404 if (!rate || !parent_rate) 405 return -EINVAL; 406 407 ret = clk_fs660c32_vco_get_params(parent_rate, rate, ¶ms); 408 if (ret) 409 return ret; 410 411 clk_fs660c32_vco_get_rate(parent_rate, ¶ms, &hwrate); 412 413 pr_debug("%s: %s new rate %ld [ndiv=0x%x]\n", 414 __func__, clk_hw_get_name(hw), 415 hwrate, (unsigned int)params.ndiv); 416 417 if (!hwrate) 418 return -EINVAL; 419 420 pll->ndiv = params.ndiv; 421 422 if (pll->lock) 423 spin_lock_irqsave(pll->lock, flags); 424 425 CLKGEN_WRITE(pll, ndiv, pll->ndiv); 426 427 if (pll->lock) 428 spin_unlock_irqrestore(pll->lock, flags); 429 430 return 0; 431 } 432 433 static const struct clk_ops st_quadfs_pll_c32_ops = { 434 .enable = quadfs_pll_enable, 435 .disable = quadfs_pll_disable, 436 .is_enabled = quadfs_pll_is_enabled, 437 .recalc_rate = quadfs_pll_fs660c32_recalc_rate, 438 .determine_rate = quadfs_pll_fs660c32_determine_rate, 439 .set_rate = quadfs_pll_fs660c32_set_rate, 440 }; 441 442 static struct clk * __init st_clk_register_quadfs_pll( 443 const char *name, const char *parent_name, 444 struct clkgen_quadfs_data *quadfs, void __iomem *reg, 445 spinlock_t *lock) 446 { 447 struct st_clk_quadfs_pll *pll; 448 struct clk *clk; 449 struct clk_init_data init; 450 451 /* 452 * Sanity check required pointers. 453 */ 454 if (WARN_ON(!name || !parent_name)) 455 return ERR_PTR(-EINVAL); 456 457 pll = kzalloc(sizeof(*pll), GFP_KERNEL); 458 if (!pll) 459 return ERR_PTR(-ENOMEM); 460 461 init.name = name; 462 init.ops = quadfs->pll_ops; 463 init.flags = CLK_GET_RATE_NOCACHE; 464 init.parent_names = &parent_name; 465 init.num_parents = 1; 466 467 pll->data = quadfs; 468 pll->regs_base = reg; 469 pll->lock = lock; 470 pll->hw.init = &init; 471 472 clk = clk_register(NULL, &pll->hw); 473 474 if (IS_ERR(clk)) 475 kfree(pll); 476 477 return clk; 478 } 479 480 /** 481 * DOC: A digital frequency synthesizer 482 * 483 * Traits of this clock: 484 * prepare - clk_(un)prepare only ensures parent is (un)prepared 485 * enable - clk_enable and clk_disable are functional 486 * rate - set rate is functional 487 * parent - fixed parent. No clk_set_parent support 488 */ 489 490 /* 491 * struct st_clk_quadfs_fsynth - One clock output from a four channel digital 492 * frequency synthesizer (fsynth) block. 493 * 494 * @hw: handle between common and hardware-specific interfaces 495 * 496 * @nsb: regmap field in the output control register for the digital 497 * standby of this fsynth channel. This control is active low so 498 * the channel is in standby when the control bit is cleared. 499 * 500 * @nsdiv: regmap field in the output control register for 501 * for the optional divide by 3 of this fsynth channel. This control 502 * is active low so the divide by 3 is active when the control bit is 503 * cleared and the divide is bypassed when the bit is set. 504 */ 505 struct st_clk_quadfs_fsynth { 506 struct clk_hw hw; 507 void __iomem *regs_base; 508 spinlock_t *lock; 509 struct clkgen_quadfs_data *data; 510 511 u32 chan; 512 /* 513 * Cached hardware values from set_rate so we can program the 514 * hardware in enable. There are two reasons for this: 515 * 516 * 1. The registers may not be writable until the parent has been 517 * enabled. 518 * 519 * 2. It restores the clock rate when a driver does an enable 520 * on PM restore, after a suspend to RAM has lost the hardware 521 * setup. 522 */ 523 u32 md; 524 u32 pe; 525 u32 sdiv; 526 u32 nsdiv; 527 }; 528 529 #define to_quadfs_fsynth(_hw) \ 530 container_of(_hw, struct st_clk_quadfs_fsynth, hw) 531 532 static void quadfs_fsynth_program_enable(struct st_clk_quadfs_fsynth *fs) 533 { 534 /* 535 * Pulse the program enable register lsb to make the hardware take 536 * notice of the new md/pe values with a glitchless transition. 537 */ 538 CLKGEN_WRITE(fs, en[fs->chan], 1); 539 CLKGEN_WRITE(fs, en[fs->chan], 0); 540 } 541 542 static void quadfs_fsynth_program_rate(struct st_clk_quadfs_fsynth *fs) 543 { 544 unsigned long flags = 0; 545 546 /* 547 * Ensure the md/pe parameters are ignored while we are 548 * reprogramming them so we can get a glitchless change 549 * when fine tuning the speed of a running clock. 550 */ 551 CLKGEN_WRITE(fs, en[fs->chan], 0); 552 553 CLKGEN_WRITE(fs, mdiv[fs->chan], fs->md); 554 CLKGEN_WRITE(fs, pe[fs->chan], fs->pe); 555 CLKGEN_WRITE(fs, sdiv[fs->chan], fs->sdiv); 556 557 if (fs->lock) 558 spin_lock_irqsave(fs->lock, flags); 559 560 if (fs->data->nsdiv_present) 561 CLKGEN_WRITE(fs, nsdiv[fs->chan], fs->nsdiv); 562 563 if (fs->lock) 564 spin_unlock_irqrestore(fs->lock, flags); 565 } 566 567 static int quadfs_fsynth_enable(struct clk_hw *hw) 568 { 569 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); 570 unsigned long flags = 0; 571 572 pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw)); 573 574 quadfs_fsynth_program_rate(fs); 575 576 if (fs->lock) 577 spin_lock_irqsave(fs->lock, flags); 578 579 CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity); 580 581 if (fs->data->nrst_present) 582 CLKGEN_WRITE(fs, nrst[fs->chan], 0); 583 584 if (fs->lock) 585 spin_unlock_irqrestore(fs->lock, flags); 586 587 quadfs_fsynth_program_enable(fs); 588 589 return 0; 590 } 591 592 static void quadfs_fsynth_disable(struct clk_hw *hw) 593 { 594 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); 595 unsigned long flags = 0; 596 597 pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw)); 598 599 if (fs->lock) 600 spin_lock_irqsave(fs->lock, flags); 601 602 CLKGEN_WRITE(fs, nsb[fs->chan], fs->data->standby_polarity); 603 604 if (fs->lock) 605 spin_unlock_irqrestore(fs->lock, flags); 606 } 607 608 static int quadfs_fsynth_is_enabled(struct clk_hw *hw) 609 { 610 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); 611 u32 nsb = CLKGEN_READ(fs, nsb[fs->chan]); 612 613 pr_debug("%s: %s enable bit = 0x%x\n", 614 __func__, clk_hw_get_name(hw), nsb); 615 616 return fs->data->standby_polarity ? !nsb : !!nsb; 617 } 618 619 #define P20 (uint64_t)(1 << 20) 620 621 static int clk_fs660c32_dig_get_rate(unsigned long input, 622 const struct stm_fs *fs, unsigned long *rate) 623 { 624 unsigned long s = (1 << fs->sdiv); 625 unsigned long ns; 626 uint64_t res; 627 628 /* 629 * 'nsdiv' is a register value ('BIN') which is translated 630 * to a decimal value according to following rules. 631 * 632 * nsdiv ns.dec 633 * 0 3 634 * 1 1 635 */ 636 ns = (fs->nsdiv == 1) ? 1 : 3; 637 638 res = (P20 * (32 + fs->mdiv) + 32 * fs->pe) * s * ns; 639 *rate = (unsigned long)div64_u64(input * P20 * 32, res); 640 641 return 0; 642 } 643 644 645 static int clk_fs660c32_get_pe(int m, int si, unsigned long *deviation, 646 signed long input, unsigned long output, uint64_t *p, 647 struct stm_fs *fs) 648 { 649 unsigned long new_freq, new_deviation; 650 struct stm_fs fs_tmp; 651 uint64_t val; 652 653 val = (uint64_t)output << si; 654 655 *p = (uint64_t)input * P20 - (32LL + (uint64_t)m) * val * (P20 / 32LL); 656 657 *p = div64_u64(*p, val); 658 659 if (*p > 32767LL) 660 return 1; 661 662 fs_tmp.mdiv = (unsigned long) m; 663 fs_tmp.pe = (unsigned long)*p; 664 fs_tmp.sdiv = si; 665 fs_tmp.nsdiv = 1; 666 667 clk_fs660c32_dig_get_rate(input, &fs_tmp, &new_freq); 668 669 new_deviation = abs(output - new_freq); 670 671 if (new_deviation < *deviation) { 672 fs->mdiv = m; 673 fs->pe = (unsigned long)*p; 674 fs->sdiv = si; 675 fs->nsdiv = 1; 676 *deviation = new_deviation; 677 } 678 return 0; 679 } 680 681 static int clk_fs660c32_dig_get_params(unsigned long input, 682 unsigned long output, struct stm_fs *fs) 683 { 684 int si; /* sdiv_reg (8 downto 0) */ 685 int m; /* md value */ 686 unsigned long new_freq, new_deviation; 687 /* initial condition to say: "infinite deviation" */ 688 unsigned long deviation = ~0; 689 uint64_t p, p1, p2; /* pe value */ 690 int r1, r2; 691 692 struct stm_fs fs_tmp; 693 694 for (si = 0; (si <= 8) && deviation; si++) { 695 696 /* Boundary test to avoid useless iteration */ 697 r1 = clk_fs660c32_get_pe(0, si, &deviation, 698 input, output, &p1, fs); 699 r2 = clk_fs660c32_get_pe(31, si, &deviation, 700 input, output, &p2, fs); 701 702 /* No solution */ 703 if (r1 && r2 && (p1 > p2)) 704 continue; 705 706 /* Try to find best deviation */ 707 for (m = 1; (m < 31) && deviation; m++) 708 clk_fs660c32_get_pe(m, si, &deviation, 709 input, output, &p, fs); 710 711 } 712 713 if (deviation == ~0) /* No solution found */ 714 return -1; 715 716 /* pe fine tuning if deviation not 0: +/- 2 around computed pe value */ 717 if (deviation) { 718 fs_tmp.mdiv = fs->mdiv; 719 fs_tmp.sdiv = fs->sdiv; 720 fs_tmp.nsdiv = fs->nsdiv; 721 722 if (fs->pe > 2) 723 p2 = fs->pe - 2; 724 else 725 p2 = 0; 726 727 for (; p2 < 32768ll && (p2 <= (fs->pe + 2)); p2++) { 728 fs_tmp.pe = (unsigned long)p2; 729 730 clk_fs660c32_dig_get_rate(input, &fs_tmp, &new_freq); 731 732 new_deviation = abs(output - new_freq); 733 734 /* Check if this is a better solution */ 735 if (new_deviation < deviation) { 736 fs->pe = (unsigned long)p2; 737 deviation = new_deviation; 738 739 } 740 } 741 } 742 return 0; 743 } 744 745 static int quadfs_fsynt_get_hw_value_for_recalc(struct st_clk_quadfs_fsynth *fs, 746 struct stm_fs *params) 747 { 748 /* 749 * Get the initial hardware values for recalc_rate 750 */ 751 params->mdiv = CLKGEN_READ(fs, mdiv[fs->chan]); 752 params->pe = CLKGEN_READ(fs, pe[fs->chan]); 753 params->sdiv = CLKGEN_READ(fs, sdiv[fs->chan]); 754 755 if (fs->data->nsdiv_present) 756 params->nsdiv = CLKGEN_READ(fs, nsdiv[fs->chan]); 757 else 758 params->nsdiv = 1; 759 760 /* 761 * If All are NULL then assume no clock rate is programmed. 762 */ 763 if (!params->mdiv && !params->pe && !params->sdiv) 764 return 1; 765 766 fs->md = params->mdiv; 767 fs->pe = params->pe; 768 fs->sdiv = params->sdiv; 769 fs->nsdiv = params->nsdiv; 770 771 return 0; 772 } 773 774 static long quadfs_find_best_rate(struct clk_hw *hw, unsigned long drate, 775 unsigned long prate, struct stm_fs *params) 776 { 777 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); 778 int (*clk_fs_get_rate)(unsigned long , 779 const struct stm_fs *, unsigned long *); 780 int (*clk_fs_get_params)(unsigned long, unsigned long, struct stm_fs *); 781 unsigned long rate = 0; 782 783 clk_fs_get_rate = fs->data->get_rate; 784 clk_fs_get_params = fs->data->get_params; 785 786 if (!clk_fs_get_params(prate, drate, params)) 787 clk_fs_get_rate(prate, params, &rate); 788 789 return rate; 790 } 791 792 static unsigned long quadfs_recalc_rate(struct clk_hw *hw, 793 unsigned long parent_rate) 794 { 795 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); 796 unsigned long rate = 0; 797 struct stm_fs params; 798 int (*clk_fs_get_rate)(unsigned long , 799 const struct stm_fs *, unsigned long *); 800 801 clk_fs_get_rate = fs->data->get_rate; 802 803 if (quadfs_fsynt_get_hw_value_for_recalc(fs, ¶ms)) 804 return 0; 805 806 if (clk_fs_get_rate(parent_rate, ¶ms, &rate)) { 807 pr_err("%s:%s error calculating rate\n", 808 clk_hw_get_name(hw), __func__); 809 } 810 811 pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate); 812 813 return rate; 814 } 815 816 static int quadfs_determine_rate(struct clk_hw *hw, 817 struct clk_rate_request *req) 818 { 819 struct stm_fs params; 820 821 req->rate = quadfs_find_best_rate(hw, req->rate, 822 req->best_parent_rate, ¶ms); 823 824 pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n", 825 __func__, clk_hw_get_name(hw), 826 req->rate, (unsigned int)params.sdiv, 827 (unsigned int)params.mdiv, 828 (unsigned int)params.pe, (unsigned int)params.nsdiv); 829 830 return 0; 831 } 832 833 834 static void quadfs_program_and_enable(struct st_clk_quadfs_fsynth *fs, 835 struct stm_fs *params) 836 { 837 fs->md = params->mdiv; 838 fs->pe = params->pe; 839 fs->sdiv = params->sdiv; 840 fs->nsdiv = params->nsdiv; 841 842 /* 843 * In some integrations you can only change the fsynth programming when 844 * the parent entity containing it is enabled. 845 */ 846 quadfs_fsynth_program_rate(fs); 847 quadfs_fsynth_program_enable(fs); 848 } 849 850 static int quadfs_set_rate(struct clk_hw *hw, unsigned long rate, 851 unsigned long parent_rate) 852 { 853 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); 854 struct stm_fs params; 855 long hwrate; 856 857 if (!rate || !parent_rate) 858 return -EINVAL; 859 860 memset(¶ms, 0, sizeof(struct stm_fs)); 861 862 hwrate = quadfs_find_best_rate(hw, rate, parent_rate, ¶ms); 863 if (!hwrate) 864 return -EINVAL; 865 866 quadfs_program_and_enable(fs, ¶ms); 867 868 return 0; 869 } 870 871 872 873 static const struct clk_ops st_quadfs_ops = { 874 .enable = quadfs_fsynth_enable, 875 .disable = quadfs_fsynth_disable, 876 .is_enabled = quadfs_fsynth_is_enabled, 877 .determine_rate = quadfs_determine_rate, 878 .set_rate = quadfs_set_rate, 879 .recalc_rate = quadfs_recalc_rate, 880 }; 881 882 static struct clk * __init st_clk_register_quadfs_fsynth( 883 const char *name, const char *parent_name, 884 struct clkgen_quadfs_data *quadfs, void __iomem *reg, u32 chan, 885 unsigned long flags, spinlock_t *lock) 886 { 887 struct st_clk_quadfs_fsynth *fs; 888 struct clk *clk; 889 struct clk_init_data init; 890 891 /* 892 * Sanity check required pointers, note that nsdiv3 is optional. 893 */ 894 if (WARN_ON(!name || !parent_name)) 895 return ERR_PTR(-EINVAL); 896 897 fs = kzalloc(sizeof(*fs), GFP_KERNEL); 898 if (!fs) 899 return ERR_PTR(-ENOMEM); 900 901 init.name = name; 902 init.ops = &st_quadfs_ops; 903 init.flags = flags | CLK_GET_RATE_NOCACHE; 904 init.parent_names = &parent_name; 905 init.num_parents = 1; 906 907 fs->data = quadfs; 908 fs->regs_base = reg; 909 fs->chan = chan; 910 fs->lock = lock; 911 fs->hw.init = &init; 912 913 clk = clk_register(NULL, &fs->hw); 914 915 if (IS_ERR(clk)) 916 kfree(fs); 917 918 return clk; 919 } 920 921 static void __init st_of_create_quadfs_fsynths( 922 struct device_node *np, const char *pll_name, 923 struct clkgen_quadfs_data_clks *quadfs, void __iomem *reg, 924 spinlock_t *lock) 925 { 926 struct clk_onecell_data *clk_data; 927 int fschan; 928 929 clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); 930 if (!clk_data) 931 return; 932 933 clk_data->clk_num = QUADFS_MAX_CHAN; 934 clk_data->clks = kcalloc(QUADFS_MAX_CHAN, sizeof(struct clk *), 935 GFP_KERNEL); 936 937 if (!clk_data->clks) { 938 kfree(clk_data); 939 return; 940 } 941 942 for (fschan = 0; fschan < QUADFS_MAX_CHAN; fschan++) { 943 struct clk *clk; 944 const char *clk_name; 945 unsigned long flags = 0; 946 947 if (quadfs->outputs) { 948 clk_name = quadfs->outputs[fschan].name; 949 flags = quadfs->outputs[fschan].flags; 950 } else { 951 if (of_property_read_string_index(np, 952 "clock-output-names", 953 fschan, &clk_name)) 954 break; 955 of_clk_detect_critical(np, fschan, &flags); 956 } 957 958 /* 959 * If we read an empty clock name then the channel is unused 960 */ 961 if (*clk_name == '\0') 962 continue; 963 964 clk = st_clk_register_quadfs_fsynth(clk_name, pll_name, 965 quadfs->data, reg, fschan, 966 flags, lock); 967 968 /* 969 * If there was an error registering this clock output, clean 970 * up and move on to the next one. 971 */ 972 if (!IS_ERR(clk)) { 973 clk_data->clks[fschan] = clk; 974 pr_debug("%s: parent %s rate %u\n", 975 __clk_get_name(clk), 976 __clk_get_name(clk_get_parent(clk)), 977 (unsigned int)clk_get_rate(clk)); 978 } 979 } 980 981 of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); 982 } 983 984 static void __init st_of_quadfs_setup(struct device_node *np, 985 struct clkgen_quadfs_data_clks *datac) 986 { 987 struct clk *clk; 988 const char *pll_name, *clk_parent_name; 989 void __iomem *reg; 990 spinlock_t *lock; 991 struct device_node *parent_np; 992 993 /* 994 * First check for reg property within the node to keep backward 995 * compatibility, then if reg doesn't exist look at the parent node 996 */ 997 reg = of_iomap(np, 0); 998 if (!reg) { 999 parent_np = of_get_parent(np); 1000 reg = of_iomap(parent_np, 0); 1001 of_node_put(parent_np); 1002 if (!reg) { 1003 pr_err("%s: Failed to get base address\n", __func__); 1004 return; 1005 } 1006 } 1007 1008 clk_parent_name = of_clk_get_parent_name(np, 0); 1009 if (!clk_parent_name) 1010 return; 1011 1012 pll_name = kasprintf(GFP_KERNEL, "%pOFn.pll", np); 1013 if (!pll_name) 1014 return; 1015 1016 lock = kzalloc(sizeof(*lock), GFP_KERNEL); 1017 if (!lock) 1018 goto err_exit; 1019 1020 spin_lock_init(lock); 1021 1022 clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, datac->data, 1023 reg, lock); 1024 if (IS_ERR(clk)) { 1025 kfree(lock); 1026 goto err_exit; 1027 } else 1028 pr_debug("%s: parent %s rate %u\n", 1029 __clk_get_name(clk), 1030 __clk_get_name(clk_get_parent(clk)), 1031 (unsigned int)clk_get_rate(clk)); 1032 1033 st_of_create_quadfs_fsynths(np, pll_name, datac, reg, lock); 1034 1035 err_exit: 1036 kfree(pll_name); /* No longer need local copy of the PLL name */ 1037 } 1038 1039 static void __init st_of_quadfs660C_setup(struct device_node *np) 1040 { 1041 st_of_quadfs_setup(np, 1042 (struct clkgen_quadfs_data_clks *) &st_fs660c32_C_data); 1043 } 1044 CLK_OF_DECLARE(quadfs660C, "st,quadfs-pll", st_of_quadfs660C_setup); 1045 1046 static void __init st_of_quadfs660D_setup(struct device_node *np) 1047 { 1048 st_of_quadfs_setup(np, 1049 (struct clkgen_quadfs_data_clks *) &st_fs660c32_D_data); 1050 } 1051 CLK_OF_DECLARE(quadfs660D, "st,quadfs", st_of_quadfs660D_setup); 1052 1053 static void __init st_of_quadfs660D0_setup(struct device_node *np) 1054 { 1055 st_of_quadfs_setup(np, 1056 (struct clkgen_quadfs_data_clks *) &st_fs660c32_D0_data); 1057 } 1058 CLK_OF_DECLARE(quadfs660D0, "st,quadfs-d0", st_of_quadfs660D0_setup); 1059 1060 static void __init st_of_quadfs660D2_setup(struct device_node *np) 1061 { 1062 st_of_quadfs_setup(np, 1063 (struct clkgen_quadfs_data_clks *) &st_fs660c32_D2_data); 1064 } 1065 CLK_OF_DECLARE(quadfs660D2, "st,quadfs-d2", st_of_quadfs660D2_setup); 1066 1067 static void __init st_of_quadfs660D3_setup(struct device_node *np) 1068 { 1069 st_of_quadfs_setup(np, 1070 (struct clkgen_quadfs_data_clks *) &st_fs660c32_D3_data); 1071 } 1072 CLK_OF_DECLARE(quadfs660D3, "st,quadfs-d3", st_of_quadfs660D3_setup); 1073