1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2020 SiFive, Inc. 4 * Copyright (C) 2020 Zong Li 5 */ 6 7 #include <linux/clkdev.h> 8 #include <linux/delay.h> 9 #include <linux/io.h> 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include "sifive-prci.h" 13 #include "fu540-prci.h" 14 #include "fu740-prci.h" 15 16 /* 17 * Private functions 18 */ 19 20 /** 21 * __prci_readl() - read from a PRCI register 22 * @pd: PRCI context 23 * @offs: register offset to read from (in bytes, from PRCI base address) 24 * 25 * Read the register located at offset @offs from the base virtual 26 * address of the PRCI register target described by @pd, and return 27 * the value to the caller. 28 * 29 * Context: Any context. 30 * 31 * Return: the contents of the register described by @pd and @offs. 32 */ 33 static u32 __prci_readl(struct __prci_data *pd, u32 offs) 34 { 35 return readl_relaxed(pd->va + offs); 36 } 37 38 static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd) 39 { 40 writel_relaxed(v, pd->va + offs); 41 } 42 43 /* WRPLL-related private functions */ 44 45 /** 46 * __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters 47 * @c: ptr to a struct wrpll_cfg record to write config into 48 * @r: value read from the PRCI PLL configuration register 49 * 50 * Given a value @r read from an FU740 PRCI PLL configuration register, 51 * split it into fields and populate it into the WRPLL configuration record 52 * pointed to by @c. 53 * 54 * The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros 55 * have the same register layout. 56 * 57 * Context: Any context. 58 */ 59 static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r) 60 { 61 u32 v; 62 63 v = r & PRCI_COREPLLCFG0_DIVR_MASK; 64 v >>= PRCI_COREPLLCFG0_DIVR_SHIFT; 65 c->divr = v; 66 67 v = r & PRCI_COREPLLCFG0_DIVF_MASK; 68 v >>= PRCI_COREPLLCFG0_DIVF_SHIFT; 69 c->divf = v; 70 71 v = r & PRCI_COREPLLCFG0_DIVQ_MASK; 72 v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT; 73 c->divq = v; 74 75 v = r & PRCI_COREPLLCFG0_RANGE_MASK; 76 v >>= PRCI_COREPLLCFG0_RANGE_SHIFT; 77 c->range = v; 78 79 c->flags &= 80 (WRPLL_FLAGS_INT_FEEDBACK_MASK | WRPLL_FLAGS_EXT_FEEDBACK_MASK); 81 82 /* external feedback mode not supported */ 83 c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK; 84 } 85 86 /** 87 * __prci_wrpll_pack() - pack PLL configuration parameters into a register value 88 * @c: pointer to a struct wrpll_cfg record containing the PLL's cfg 89 * 90 * Using a set of WRPLL configuration values pointed to by @c, 91 * assemble a PRCI PLL configuration register value, and return it to 92 * the caller. 93 * 94 * Context: Any context. Caller must ensure that the contents of the 95 * record pointed to by @c do not change during the execution 96 * of this function. 97 * 98 * Returns: a value suitable for writing into a PRCI PLL configuration 99 * register 100 */ 101 static u32 __prci_wrpll_pack(const struct wrpll_cfg *c) 102 { 103 u32 r = 0; 104 105 r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT; 106 r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT; 107 r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT; 108 r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT; 109 110 /* external feedback mode not supported */ 111 r |= PRCI_COREPLLCFG0_FSE_MASK; 112 113 return r; 114 } 115 116 /** 117 * __prci_wrpll_read_cfg0() - read the WRPLL configuration from the PRCI 118 * @pd: PRCI context 119 * @pwd: PRCI WRPLL metadata 120 * 121 * Read the current configuration of the PLL identified by @pwd from 122 * the PRCI identified by @pd, and store it into the local configuration 123 * cache in @pwd. 124 * 125 * Context: Any context. Caller must prevent the records pointed to by 126 * @pd and @pwd from changing during execution. 127 */ 128 static void __prci_wrpll_read_cfg0(struct __prci_data *pd, 129 struct __prci_wrpll_data *pwd) 130 { 131 __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs)); 132 } 133 134 /** 135 * __prci_wrpll_write_cfg0() - write WRPLL configuration into the PRCI 136 * @pd: PRCI context 137 * @pwd: PRCI WRPLL metadata 138 * @c: WRPLL configuration record to write 139 * 140 * Write the WRPLL configuration described by @c into the WRPLL 141 * configuration register identified by @pwd in the PRCI instance 142 * described by @c. Make a cached copy of the WRPLL's current 143 * configuration so it can be used by other code. 144 * 145 * Context: Any context. Caller must prevent the records pointed to by 146 * @pd and @pwd from changing during execution. 147 */ 148 static void __prci_wrpll_write_cfg0(struct __prci_data *pd, 149 struct __prci_wrpll_data *pwd, 150 struct wrpll_cfg *c) 151 { 152 __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd); 153 154 memcpy(&pwd->c, c, sizeof(*c)); 155 } 156 157 /** 158 * __prci_wrpll_write_cfg1() - write Clock enable/disable configuration 159 * into the PRCI 160 * @pd: PRCI context 161 * @pwd: PRCI WRPLL metadata 162 * @enable: Clock enable or disable value 163 */ 164 static void __prci_wrpll_write_cfg1(struct __prci_data *pd, 165 struct __prci_wrpll_data *pwd, 166 u32 enable) 167 { 168 __prci_writel(enable, pwd->cfg1_offs, pd); 169 } 170 171 /* 172 * Linux clock framework integration 173 * 174 * See the Linux clock framework documentation for more information on 175 * these functions. 176 */ 177 178 unsigned long sifive_prci_wrpll_recalc_rate(struct clk_hw *hw, 179 unsigned long parent_rate) 180 { 181 struct __prci_clock *pc = clk_hw_to_prci_clock(hw); 182 struct __prci_wrpll_data *pwd = pc->pwd; 183 184 return wrpll_calc_output_rate(&pwd->c, parent_rate); 185 } 186 187 long sifive_prci_wrpll_round_rate(struct clk_hw *hw, 188 unsigned long rate, 189 unsigned long *parent_rate) 190 { 191 struct __prci_clock *pc = clk_hw_to_prci_clock(hw); 192 struct __prci_wrpll_data *pwd = pc->pwd; 193 struct wrpll_cfg c; 194 195 memcpy(&c, &pwd->c, sizeof(c)); 196 197 wrpll_configure_for_rate(&c, rate, *parent_rate); 198 199 return wrpll_calc_output_rate(&c, *parent_rate); 200 } 201 202 int sifive_prci_wrpll_set_rate(struct clk_hw *hw, 203 unsigned long rate, unsigned long parent_rate) 204 { 205 struct __prci_clock *pc = clk_hw_to_prci_clock(hw); 206 struct __prci_wrpll_data *pwd = pc->pwd; 207 struct __prci_data *pd = pc->pd; 208 int r; 209 210 r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate); 211 if (r) 212 return r; 213 214 if (pwd->enable_bypass) 215 pwd->enable_bypass(pd); 216 217 __prci_wrpll_write_cfg0(pd, pwd, &pwd->c); 218 219 udelay(wrpll_calc_max_lock_us(&pwd->c)); 220 221 return 0; 222 } 223 224 int sifive_clk_is_enabled(struct clk_hw *hw) 225 { 226 struct __prci_clock *pc = clk_hw_to_prci_clock(hw); 227 struct __prci_wrpll_data *pwd = pc->pwd; 228 struct __prci_data *pd = pc->pd; 229 u32 r; 230 231 r = __prci_readl(pd, pwd->cfg1_offs); 232 233 if (r & PRCI_COREPLLCFG1_CKE_MASK) 234 return 1; 235 else 236 return 0; 237 } 238 239 int sifive_prci_clock_enable(struct clk_hw *hw) 240 { 241 struct __prci_clock *pc = clk_hw_to_prci_clock(hw); 242 struct __prci_wrpll_data *pwd = pc->pwd; 243 struct __prci_data *pd = pc->pd; 244 245 if (sifive_clk_is_enabled(hw)) 246 return 0; 247 248 __prci_wrpll_write_cfg1(pd, pwd, PRCI_COREPLLCFG1_CKE_MASK); 249 250 if (pwd->disable_bypass) 251 pwd->disable_bypass(pd); 252 253 return 0; 254 } 255 256 void sifive_prci_clock_disable(struct clk_hw *hw) 257 { 258 struct __prci_clock *pc = clk_hw_to_prci_clock(hw); 259 struct __prci_wrpll_data *pwd = pc->pwd; 260 struct __prci_data *pd = pc->pd; 261 u32 r; 262 263 if (pwd->enable_bypass) 264 pwd->enable_bypass(pd); 265 266 r = __prci_readl(pd, pwd->cfg1_offs); 267 r &= ~PRCI_COREPLLCFG1_CKE_MASK; 268 269 __prci_wrpll_write_cfg1(pd, pwd, r); 270 } 271 272 /* TLCLKSEL clock integration */ 273 274 unsigned long sifive_prci_tlclksel_recalc_rate(struct clk_hw *hw, 275 unsigned long parent_rate) 276 { 277 struct __prci_clock *pc = clk_hw_to_prci_clock(hw); 278 struct __prci_data *pd = pc->pd; 279 u32 v; 280 u8 div; 281 282 v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET); 283 v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK; 284 div = v ? 1 : 2; 285 286 return div_u64(parent_rate, div); 287 } 288 289 /* HFPCLK clock integration */ 290 291 unsigned long sifive_prci_hfpclkplldiv_recalc_rate(struct clk_hw *hw, 292 unsigned long parent_rate) 293 { 294 struct __prci_clock *pc = clk_hw_to_prci_clock(hw); 295 struct __prci_data *pd = pc->pd; 296 u32 div = __prci_readl(pd, PRCI_HFPCLKPLLDIV_OFFSET); 297 298 return div_u64(parent_rate, div + 2); 299 } 300 301 /* 302 * Core clock mux control 303 */ 304 305 /** 306 * sifive_prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK 307 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg 308 * 309 * Switch the CORECLK mux to the HFCLK input source; return once complete. 310 * 311 * Context: Any context. Caller must prevent concurrent changes to the 312 * PRCI_CORECLKSEL_OFFSET register. 313 */ 314 void sifive_prci_coreclksel_use_hfclk(struct __prci_data *pd) 315 { 316 u32 r; 317 318 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); 319 r |= PRCI_CORECLKSEL_CORECLKSEL_MASK; 320 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd); 321 322 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */ 323 } 324 325 /** 326 * sifive_prci_coreclksel_use_corepll() - switch the CORECLK mux to output 327 * COREPLL 328 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg 329 * 330 * Switch the CORECLK mux to the COREPLL output clock; return once complete. 331 * 332 * Context: Any context. Caller must prevent concurrent changes to the 333 * PRCI_CORECLKSEL_OFFSET register. 334 */ 335 void sifive_prci_coreclksel_use_corepll(struct __prci_data *pd) 336 { 337 u32 r; 338 339 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); 340 r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK; 341 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd); 342 343 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */ 344 } 345 346 /** 347 * sifive_prci_coreclksel_use_final_corepll() - switch the CORECLK mux to output 348 * FINAL_COREPLL 349 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg 350 * 351 * Switch the CORECLK mux to the final COREPLL output clock; return once 352 * complete. 353 * 354 * Context: Any context. Caller must prevent concurrent changes to the 355 * PRCI_CORECLKSEL_OFFSET register. 356 */ 357 void sifive_prci_coreclksel_use_final_corepll(struct __prci_data *pd) 358 { 359 u32 r; 360 361 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); 362 r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK; 363 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd); 364 365 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */ 366 } 367 368 /** 369 * sifive_prci_corepllsel_use_dvfscorepll() - switch the COREPLL mux to 370 * output DVFS_COREPLL 371 * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg 372 * 373 * Switch the COREPLL mux to the DVFSCOREPLL output clock; return once complete. 374 * 375 * Context: Any context. Caller must prevent concurrent changes to the 376 * PRCI_COREPLLSEL_OFFSET register. 377 */ 378 void sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data *pd) 379 { 380 u32 r; 381 382 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); 383 r |= PRCI_COREPLLSEL_COREPLLSEL_MASK; 384 __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd); 385 386 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */ 387 } 388 389 /** 390 * sifive_prci_corepllsel_use_corepll() - switch the COREPLL mux to 391 * output COREPLL 392 * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg 393 * 394 * Switch the COREPLL mux to the COREPLL output clock; return once complete. 395 * 396 * Context: Any context. Caller must prevent concurrent changes to the 397 * PRCI_COREPLLSEL_OFFSET register. 398 */ 399 void sifive_prci_corepllsel_use_corepll(struct __prci_data *pd) 400 { 401 u32 r; 402 403 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); 404 r &= ~PRCI_COREPLLSEL_COREPLLSEL_MASK; 405 __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd); 406 407 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */ 408 } 409 410 /** 411 * sifive_prci_hfpclkpllsel_use_hfclk() - switch the HFPCLKPLL mux to 412 * output HFCLK 413 * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg 414 * 415 * Switch the HFPCLKPLL mux to the HFCLK input source; return once complete. 416 * 417 * Context: Any context. Caller must prevent concurrent changes to the 418 * PRCI_HFPCLKPLLSEL_OFFSET register. 419 */ 420 void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd) 421 { 422 u32 r; 423 424 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); 425 r |= PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK; 426 __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd); 427 428 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */ 429 } 430 431 /** 432 * sifive_prci_hfpclkpllsel_use_hfpclkpll() - switch the HFPCLKPLL mux to 433 * output HFPCLKPLL 434 * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg 435 * 436 * Switch the HFPCLKPLL mux to the HFPCLKPLL output clock; return once complete. 437 * 438 * Context: Any context. Caller must prevent concurrent changes to the 439 * PRCI_HFPCLKPLLSEL_OFFSET register. 440 */ 441 void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd) 442 { 443 u32 r; 444 445 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); 446 r &= ~PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK; 447 __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd); 448 449 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */ 450 } 451 452 /* PCIE AUX clock APIs for enable, disable. */ 453 int sifive_prci_pcie_aux_clock_is_enabled(struct clk_hw *hw) 454 { 455 struct __prci_clock *pc = clk_hw_to_prci_clock(hw); 456 struct __prci_data *pd = pc->pd; 457 u32 r; 458 459 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); 460 461 if (r & PRCI_PCIE_AUX_EN_MASK) 462 return 1; 463 else 464 return 0; 465 } 466 467 int sifive_prci_pcie_aux_clock_enable(struct clk_hw *hw) 468 { 469 struct __prci_clock *pc = clk_hw_to_prci_clock(hw); 470 struct __prci_data *pd = pc->pd; 471 u32 r __maybe_unused; 472 473 if (sifive_prci_pcie_aux_clock_is_enabled(hw)) 474 return 0; 475 476 __prci_writel(1, PRCI_PCIE_AUX_OFFSET, pd); 477 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); /* barrier */ 478 479 return 0; 480 } 481 482 void sifive_prci_pcie_aux_clock_disable(struct clk_hw *hw) 483 { 484 struct __prci_clock *pc = clk_hw_to_prci_clock(hw); 485 struct __prci_data *pd = pc->pd; 486 u32 r __maybe_unused; 487 488 __prci_writel(0, PRCI_PCIE_AUX_OFFSET, pd); 489 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); /* barrier */ 490 491 } 492 493 /** 494 * __prci_register_clocks() - register clock controls in the PRCI 495 * @dev: Linux struct device 496 * @pd: The pointer for PRCI per-device instance data 497 * @desc: The pointer for the information of clocks of each SoCs 498 * 499 * Register the list of clock controls described in __prci_init_clocks[] with 500 * the Linux clock framework. 501 * 502 * Return: 0 upon success or a negative error code upon failure. 503 */ 504 static int __prci_register_clocks(struct device *dev, struct __prci_data *pd, 505 const struct prci_clk_desc *desc) 506 { 507 struct clk_init_data init = { }; 508 struct __prci_clock *pic; 509 int parent_count, i, r; 510 511 parent_count = of_clk_get_parent_count(dev->of_node); 512 if (parent_count != EXPECTED_CLK_PARENT_COUNT) { 513 dev_err(dev, "expected only two parent clocks, found %d\n", 514 parent_count); 515 return -EINVAL; 516 } 517 518 /* Register PLLs */ 519 for (i = 0; i < desc->num_clks; ++i) { 520 pic = &(desc->clks[i]); 521 522 init.name = pic->name; 523 init.parent_names = &pic->parent_name; 524 init.num_parents = 1; 525 init.ops = pic->ops; 526 pic->hw.init = &init; 527 528 pic->pd = pd; 529 530 if (pic->pwd) 531 __prci_wrpll_read_cfg0(pd, pic->pwd); 532 533 r = devm_clk_hw_register(dev, &pic->hw); 534 if (r) { 535 dev_warn(dev, "Failed to register clock %s: %d\n", 536 init.name, r); 537 return r; 538 } 539 540 r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev)); 541 if (r) { 542 dev_warn(dev, "Failed to register clkdev for %s: %d\n", 543 init.name, r); 544 return r; 545 } 546 547 pd->hw_clks.hws[i] = &pic->hw; 548 } 549 550 pd->hw_clks.num = i; 551 552 r = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, 553 &pd->hw_clks); 554 if (r) { 555 dev_err(dev, "could not add hw_provider: %d\n", r); 556 return r; 557 } 558 559 return 0; 560 } 561 562 /** 563 * sifive_prci_probe() - initialize prci data and check parent count 564 * @pdev: platform device pointer for the prci 565 * 566 * Return: 0 upon success or a negative error code upon failure. 567 */ 568 static int sifive_prci_probe(struct platform_device *pdev) 569 { 570 struct device *dev = &pdev->dev; 571 struct __prci_data *pd; 572 const struct prci_clk_desc *desc; 573 int r; 574 575 desc = of_device_get_match_data(&pdev->dev); 576 577 pd = devm_kzalloc(dev, struct_size(pd, hw_clks.hws, desc->num_clks), GFP_KERNEL); 578 if (!pd) 579 return -ENOMEM; 580 581 pd->va = devm_platform_ioremap_resource(pdev, 0); 582 if (IS_ERR(pd->va)) 583 return PTR_ERR(pd->va); 584 585 pd->reset.rcdev.owner = THIS_MODULE; 586 pd->reset.rcdev.nr_resets = PRCI_RST_NR; 587 pd->reset.rcdev.ops = &reset_simple_ops; 588 pd->reset.rcdev.of_node = pdev->dev.of_node; 589 pd->reset.active_low = true; 590 pd->reset.membase = pd->va + PRCI_DEVICESRESETREG_OFFSET; 591 spin_lock_init(&pd->reset.lock); 592 593 r = devm_reset_controller_register(&pdev->dev, &pd->reset.rcdev); 594 if (r) { 595 dev_err(dev, "could not register reset controller: %d\n", r); 596 return r; 597 } 598 r = __prci_register_clocks(dev, pd, desc); 599 if (r) { 600 dev_err(dev, "could not register clocks: %d\n", r); 601 return r; 602 } 603 604 dev_dbg(dev, "SiFive PRCI probed\n"); 605 606 return 0; 607 } 608 609 static const struct of_device_id sifive_prci_of_match[] = { 610 {.compatible = "sifive,fu540-c000-prci", .data = &prci_clk_fu540}, 611 {.compatible = "sifive,fu740-c000-prci", .data = &prci_clk_fu740}, 612 {} 613 }; 614 615 static struct platform_driver sifive_prci_driver = { 616 .driver = { 617 .name = "sifive-clk-prci", 618 .of_match_table = sifive_prci_of_match, 619 }, 620 .probe = sifive_prci_probe, 621 }; 622 module_platform_driver(sifive_prci_driver); 623 624 MODULE_AUTHOR("Paul Walmsley <paul.walmsley@sifive.com>"); 625 MODULE_DESCRIPTION("SiFive Power Reset Clock Interface (PRCI) driver"); 626 MODULE_LICENSE("GPL"); 627