1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Renesas RZ/V2H(P) Clock Pulse Generator 4 * 5 * Copyright (C) 2024 Renesas Electronics Corp. 6 * 7 * Based on rzg2l-cpg.c 8 * 9 * Copyright (C) 2015 Glider bvba 10 * Copyright (C) 2013 Ideas On Board SPRL 11 * Copyright (C) 2015 Renesas Electronics Corp. 12 */ 13 14 #include <linux/bitfield.h> 15 #include <linux/clk.h> 16 #include <linux/clk-provider.h> 17 #include <linux/delay.h> 18 #include <linux/init.h> 19 #include <linux/iopoll.h> 20 #include <linux/mod_devicetable.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_clock.h> 25 #include <linux/pm_domain.h> 26 #include <linux/refcount.h> 27 #include <linux/reset-controller.h> 28 #include <linux/string_choices.h> 29 30 #include <dt-bindings/clock/renesas-cpg-mssr.h> 31 32 #include "rzv2h-cpg.h" 33 34 #ifdef DEBUG 35 #define WARN_DEBUG(x) WARN_ON(x) 36 #else 37 #define WARN_DEBUG(x) do { } while (0) 38 #endif 39 40 #define GET_CLK_ON_OFFSET(x) (0x600 + ((x) * 4)) 41 #define GET_CLK_MON_OFFSET(x) (0x800 + ((x) * 4)) 42 #define GET_RST_OFFSET(x) (0x900 + ((x) * 4)) 43 #define GET_RST_MON_OFFSET(x) (0xA00 + ((x) * 4)) 44 45 #define CPG_BUS_1_MSTOP (0xd00) 46 #define CPG_BUS_MSTOP(m) (CPG_BUS_1_MSTOP + ((m) - 1) * 4) 47 48 #define CPG_PLL_STBY(x) ((x)) 49 #define CPG_PLL_STBY_RESETB BIT(0) 50 #define CPG_PLL_STBY_RESETB_WEN BIT(16) 51 #define CPG_PLL_CLK1(x) ((x) + 0x004) 52 #define CPG_PLL_CLK1_KDIV(x) ((s16)FIELD_GET(GENMASK(31, 16), (x))) 53 #define CPG_PLL_CLK1_MDIV(x) FIELD_GET(GENMASK(15, 6), (x)) 54 #define CPG_PLL_CLK1_PDIV(x) FIELD_GET(GENMASK(5, 0), (x)) 55 #define CPG_PLL_CLK2(x) ((x) + 0x008) 56 #define CPG_PLL_CLK2_SDIV(x) FIELD_GET(GENMASK(2, 0), (x)) 57 #define CPG_PLL_MON(x) ((x) + 0x010) 58 #define CPG_PLL_MON_RESETB BIT(0) 59 #define CPG_PLL_MON_LOCK BIT(4) 60 61 #define DDIV_DIVCTL_WEN(shift) BIT((shift) + 16) 62 63 #define GET_MOD_CLK_ID(base, index, bit) \ 64 ((base) + ((((index) * (16))) + (bit))) 65 66 #define CPG_CLKSTATUS0 (0x700) 67 68 /** 69 * struct rzv2h_cpg_priv - Clock Pulse Generator Private Data 70 * 71 * @dev: CPG device 72 * @base: CPG register block base address 73 * @rmw_lock: protects register accesses 74 * @clks: Array containing all Core and Module Clocks 75 * @num_core_clks: Number of Core Clocks in clks[] 76 * @num_mod_clks: Number of Module Clocks in clks[] 77 * @resets: Array of resets 78 * @num_resets: Number of Module Resets in info->resets[] 79 * @last_dt_core_clk: ID of the last Core Clock exported to DT 80 * @ff_mod_status_ops: Fixed Factor Module Status Clock operations 81 * @mstop_count: Array of mstop values 82 * @rcdev: Reset controller entity 83 */ 84 struct rzv2h_cpg_priv { 85 struct device *dev; 86 void __iomem *base; 87 spinlock_t rmw_lock; 88 89 struct clk **clks; 90 unsigned int num_core_clks; 91 unsigned int num_mod_clks; 92 struct rzv2h_reset *resets; 93 unsigned int num_resets; 94 unsigned int last_dt_core_clk; 95 96 struct clk_ops *ff_mod_status_ops; 97 98 atomic_t *mstop_count; 99 100 struct reset_controller_dev rcdev; 101 }; 102 103 #define rcdev_to_priv(x) container_of(x, struct rzv2h_cpg_priv, rcdev) 104 105 struct pll_clk { 106 struct rzv2h_cpg_priv *priv; 107 struct clk_hw hw; 108 struct pll pll; 109 }; 110 111 #define to_pll(_hw) container_of(_hw, struct pll_clk, hw) 112 113 /** 114 * struct mod_clock - Module clock 115 * 116 * @priv: CPG private data 117 * @mstop_data: mstop data relating to module clock 118 * @hw: handle between common and hardware-specific interfaces 119 * @no_pm: flag to indicate PM is not supported 120 * @on_index: register offset 121 * @on_bit: ON/MON bit 122 * @mon_index: monitor register offset 123 * @mon_bit: monitor bit 124 * @ext_clk_mux_index: mux index for external clock source, or -1 if internal 125 */ 126 struct mod_clock { 127 struct rzv2h_cpg_priv *priv; 128 unsigned int mstop_data; 129 struct clk_hw hw; 130 bool no_pm; 131 u8 on_index; 132 u8 on_bit; 133 s8 mon_index; 134 u8 mon_bit; 135 s8 ext_clk_mux_index; 136 }; 137 138 #define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw) 139 140 /** 141 * struct ddiv_clk - DDIV clock 142 * 143 * @priv: CPG private data 144 * @div: divider clk 145 * @mon: monitor bit in CPG_CLKSTATUS0 register 146 */ 147 struct ddiv_clk { 148 struct rzv2h_cpg_priv *priv; 149 struct clk_divider div; 150 u8 mon; 151 }; 152 153 #define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div) 154 155 /** 156 * struct rzv2h_ff_mod_status_clk - Fixed Factor Module Status Clock 157 * 158 * @priv: CPG private data 159 * @conf: fixed mod configuration 160 * @fix: fixed factor clock 161 */ 162 struct rzv2h_ff_mod_status_clk { 163 struct rzv2h_cpg_priv *priv; 164 struct fixed_mod_conf conf; 165 struct clk_fixed_factor fix; 166 }; 167 168 #define to_rzv2h_ff_mod_status_clk(_hw) \ 169 container_of(_hw, struct rzv2h_ff_mod_status_clk, fix.hw) 170 171 static int rzv2h_cpg_pll_clk_is_enabled(struct clk_hw *hw) 172 { 173 struct pll_clk *pll_clk = to_pll(hw); 174 struct rzv2h_cpg_priv *priv = pll_clk->priv; 175 u32 val = readl(priv->base + CPG_PLL_MON(pll_clk->pll.offset)); 176 177 /* Ensure both RESETB and LOCK bits are set */ 178 return (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) == 179 (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK); 180 } 181 182 static int rzv2h_cpg_pll_clk_enable(struct clk_hw *hw) 183 { 184 struct pll_clk *pll_clk = to_pll(hw); 185 struct rzv2h_cpg_priv *priv = pll_clk->priv; 186 struct pll pll = pll_clk->pll; 187 u32 stby_offset; 188 u32 mon_offset; 189 u32 val; 190 int ret; 191 192 if (rzv2h_cpg_pll_clk_is_enabled(hw)) 193 return 0; 194 195 stby_offset = CPG_PLL_STBY(pll.offset); 196 mon_offset = CPG_PLL_MON(pll.offset); 197 198 writel(CPG_PLL_STBY_RESETB_WEN | CPG_PLL_STBY_RESETB, 199 priv->base + stby_offset); 200 201 /* 202 * Ensure PLL enters into normal mode 203 * 204 * Note: There is no HW information about the worst case latency. 205 * 206 * Since this latency might depend on external crystal or PLL rate, 207 * use a "super" safe timeout value. 208 */ 209 ret = readl_poll_timeout_atomic(priv->base + mon_offset, val, 210 (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) == 211 (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK), 200, 2000); 212 if (ret) 213 dev_err(priv->dev, "Failed to enable PLL 0x%x/%pC\n", 214 stby_offset, hw->clk); 215 216 return ret; 217 } 218 219 static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw, 220 unsigned long parent_rate) 221 { 222 struct pll_clk *pll_clk = to_pll(hw); 223 struct rzv2h_cpg_priv *priv = pll_clk->priv; 224 struct pll pll = pll_clk->pll; 225 unsigned int clk1, clk2; 226 u64 rate; 227 228 if (!pll.has_clkn) 229 return 0; 230 231 clk1 = readl(priv->base + CPG_PLL_CLK1(pll.offset)); 232 clk2 = readl(priv->base + CPG_PLL_CLK2(pll.offset)); 233 234 rate = mul_u64_u32_shr(parent_rate, (CPG_PLL_CLK1_MDIV(clk1) << 16) + 235 CPG_PLL_CLK1_KDIV(clk1), 16 + CPG_PLL_CLK2_SDIV(clk2)); 236 237 return DIV_ROUND_CLOSEST_ULL(rate, CPG_PLL_CLK1_PDIV(clk1)); 238 } 239 240 static const struct clk_ops rzv2h_cpg_pll_ops = { 241 .is_enabled = rzv2h_cpg_pll_clk_is_enabled, 242 .enable = rzv2h_cpg_pll_clk_enable, 243 .recalc_rate = rzv2h_cpg_pll_clk_recalc_rate, 244 }; 245 246 static struct clk * __init 247 rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core, 248 struct rzv2h_cpg_priv *priv, 249 const struct clk_ops *ops) 250 { 251 struct device *dev = priv->dev; 252 struct clk_init_data init; 253 const struct clk *parent; 254 const char *parent_name; 255 struct pll_clk *pll_clk; 256 int ret; 257 258 parent = priv->clks[core->parent]; 259 if (IS_ERR(parent)) 260 return ERR_CAST(parent); 261 262 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL); 263 if (!pll_clk) 264 return ERR_PTR(-ENOMEM); 265 266 parent_name = __clk_get_name(parent); 267 init.name = core->name; 268 init.ops = ops; 269 init.flags = 0; 270 init.parent_names = &parent_name; 271 init.num_parents = 1; 272 273 pll_clk->hw.init = &init; 274 pll_clk->pll = core->cfg.pll; 275 pll_clk->priv = priv; 276 277 ret = devm_clk_hw_register(dev, &pll_clk->hw); 278 if (ret) 279 return ERR_PTR(ret); 280 281 return pll_clk->hw.clk; 282 } 283 284 static unsigned long rzv2h_ddiv_recalc_rate(struct clk_hw *hw, 285 unsigned long parent_rate) 286 { 287 struct clk_divider *divider = to_clk_divider(hw); 288 unsigned int val; 289 290 val = readl(divider->reg) >> divider->shift; 291 val &= clk_div_mask(divider->width); 292 293 return divider_recalc_rate(hw, parent_rate, val, divider->table, 294 divider->flags, divider->width); 295 } 296 297 static long rzv2h_ddiv_round_rate(struct clk_hw *hw, unsigned long rate, 298 unsigned long *prate) 299 { 300 struct clk_divider *divider = to_clk_divider(hw); 301 302 return divider_round_rate(hw, rate, prate, divider->table, 303 divider->width, divider->flags); 304 } 305 306 static int rzv2h_ddiv_determine_rate(struct clk_hw *hw, 307 struct clk_rate_request *req) 308 { 309 struct clk_divider *divider = to_clk_divider(hw); 310 311 return divider_determine_rate(hw, req, divider->table, divider->width, 312 divider->flags); 313 } 314 315 static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon) 316 { 317 u32 bitmask = BIT(mon); 318 u32 val; 319 320 if (mon == CSDIV_NO_MON) 321 return 0; 322 323 return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200); 324 } 325 326 static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate, 327 unsigned long parent_rate) 328 { 329 struct clk_divider *divider = to_clk_divider(hw); 330 struct ddiv_clk *ddiv = to_ddiv_clock(divider); 331 struct rzv2h_cpg_priv *priv = ddiv->priv; 332 unsigned long flags = 0; 333 int value; 334 u32 val; 335 int ret; 336 337 value = divider_get_val(rate, parent_rate, divider->table, 338 divider->width, divider->flags); 339 if (value < 0) 340 return value; 341 342 spin_lock_irqsave(divider->lock, flags); 343 344 ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon); 345 if (ret) 346 goto ddiv_timeout; 347 348 val = readl(divider->reg) | DDIV_DIVCTL_WEN(divider->shift); 349 val &= ~(clk_div_mask(divider->width) << divider->shift); 350 val |= (u32)value << divider->shift; 351 writel(val, divider->reg); 352 353 ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon); 354 355 ddiv_timeout: 356 spin_unlock_irqrestore(divider->lock, flags); 357 return ret; 358 } 359 360 static const struct clk_ops rzv2h_ddiv_clk_divider_ops = { 361 .recalc_rate = rzv2h_ddiv_recalc_rate, 362 .round_rate = rzv2h_ddiv_round_rate, 363 .determine_rate = rzv2h_ddiv_determine_rate, 364 .set_rate = rzv2h_ddiv_set_rate, 365 }; 366 367 static struct clk * __init 368 rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core, 369 struct rzv2h_cpg_priv *priv) 370 { 371 struct ddiv cfg_ddiv = core->cfg.ddiv; 372 struct clk_init_data init = {}; 373 struct device *dev = priv->dev; 374 u8 shift = cfg_ddiv.shift; 375 u8 width = cfg_ddiv.width; 376 const struct clk *parent; 377 const char *parent_name; 378 struct clk_divider *div; 379 struct ddiv_clk *ddiv; 380 int ret; 381 382 parent = priv->clks[core->parent]; 383 if (IS_ERR(parent)) 384 return ERR_CAST(parent); 385 386 parent_name = __clk_get_name(parent); 387 388 if ((shift + width) > 16) 389 return ERR_PTR(-EINVAL); 390 391 ddiv = devm_kzalloc(priv->dev, sizeof(*ddiv), GFP_KERNEL); 392 if (!ddiv) 393 return ERR_PTR(-ENOMEM); 394 395 init.name = core->name; 396 if (cfg_ddiv.no_rmw) 397 init.ops = &clk_divider_ops; 398 else 399 init.ops = &rzv2h_ddiv_clk_divider_ops; 400 init.parent_names = &parent_name; 401 init.num_parents = 1; 402 init.flags = CLK_SET_RATE_PARENT; 403 404 ddiv->priv = priv; 405 ddiv->mon = cfg_ddiv.monbit; 406 div = &ddiv->div; 407 div->reg = priv->base + cfg_ddiv.offset; 408 div->shift = shift; 409 div->width = width; 410 div->flags = core->flag; 411 div->lock = &priv->rmw_lock; 412 div->hw.init = &init; 413 div->table = core->dtable; 414 415 ret = devm_clk_hw_register(dev, &div->hw); 416 if (ret) 417 return ERR_PTR(ret); 418 419 return div->hw.clk; 420 } 421 422 static struct clk * __init 423 rzv2h_cpg_mux_clk_register(const struct cpg_core_clk *core, 424 struct rzv2h_cpg_priv *priv) 425 { 426 struct smuxed mux = core->cfg.smux; 427 const struct clk_hw *clk_hw; 428 429 clk_hw = devm_clk_hw_register_mux(priv->dev, core->name, 430 core->parent_names, core->num_parents, 431 core->flag, priv->base + mux.offset, 432 mux.shift, mux.width, 433 core->mux_flags, &priv->rmw_lock); 434 if (IS_ERR(clk_hw)) 435 return ERR_CAST(clk_hw); 436 437 return clk_hw->clk; 438 } 439 440 static int 441 rzv2h_clk_ff_mod_status_is_enabled(struct clk_hw *hw) 442 { 443 struct rzv2h_ff_mod_status_clk *fix = to_rzv2h_ff_mod_status_clk(hw); 444 struct rzv2h_cpg_priv *priv = fix->priv; 445 u32 offset = GET_CLK_MON_OFFSET(fix->conf.mon_index); 446 u32 bitmask = BIT(fix->conf.mon_bit); 447 u32 val; 448 449 val = readl(priv->base + offset); 450 return !!(val & bitmask); 451 } 452 453 static struct clk * __init 454 rzv2h_cpg_fixed_mod_status_clk_register(const struct cpg_core_clk *core, 455 struct rzv2h_cpg_priv *priv) 456 { 457 struct rzv2h_ff_mod_status_clk *clk_hw_data; 458 struct clk_init_data init = { }; 459 struct clk_fixed_factor *fix; 460 const struct clk *parent; 461 const char *parent_name; 462 int ret; 463 464 WARN_DEBUG(core->parent >= priv->num_core_clks); 465 parent = priv->clks[core->parent]; 466 if (IS_ERR(parent)) 467 return ERR_CAST(parent); 468 469 parent_name = __clk_get_name(parent); 470 parent = priv->clks[core->parent]; 471 if (IS_ERR(parent)) 472 return ERR_CAST(parent); 473 474 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL); 475 if (!clk_hw_data) 476 return ERR_PTR(-ENOMEM); 477 478 clk_hw_data->priv = priv; 479 clk_hw_data->conf = core->cfg.fixed_mod; 480 481 init.name = core->name; 482 init.ops = priv->ff_mod_status_ops; 483 init.flags = CLK_SET_RATE_PARENT; 484 init.parent_names = &parent_name; 485 init.num_parents = 1; 486 487 fix = &clk_hw_data->fix; 488 fix->hw.init = &init; 489 fix->mult = core->mult; 490 fix->div = core->div; 491 492 ret = devm_clk_hw_register(priv->dev, &clk_hw_data->fix.hw); 493 if (ret) 494 return ERR_PTR(ret); 495 496 return clk_hw_data->fix.hw.clk; 497 } 498 499 static struct clk 500 *rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec, 501 void *data) 502 { 503 unsigned int clkidx = clkspec->args[1]; 504 struct rzv2h_cpg_priv *priv = data; 505 struct device *dev = priv->dev; 506 const char *type; 507 struct clk *clk; 508 509 switch (clkspec->args[0]) { 510 case CPG_CORE: 511 type = "core"; 512 if (clkidx > priv->last_dt_core_clk) { 513 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx); 514 return ERR_PTR(-EINVAL); 515 } 516 clk = priv->clks[clkidx]; 517 break; 518 519 case CPG_MOD: 520 type = "module"; 521 if (clkidx >= priv->num_mod_clks) { 522 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx); 523 return ERR_PTR(-EINVAL); 524 } 525 clk = priv->clks[priv->num_core_clks + clkidx]; 526 break; 527 528 default: 529 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]); 530 return ERR_PTR(-EINVAL); 531 } 532 533 if (IS_ERR(clk)) 534 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx, 535 PTR_ERR(clk)); 536 else 537 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n", 538 clkspec->args[0], clkspec->args[1], clk, 539 clk_get_rate(clk)); 540 return clk; 541 } 542 543 static void __init 544 rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core, 545 struct rzv2h_cpg_priv *priv) 546 { 547 struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent; 548 unsigned int id = core->id, div = core->div; 549 struct device *dev = priv->dev; 550 const char *parent_name; 551 struct clk_hw *clk_hw; 552 553 WARN_DEBUG(id >= priv->num_core_clks); 554 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT); 555 556 switch (core->type) { 557 case CLK_TYPE_IN: 558 clk = of_clk_get_by_name(priv->dev->of_node, core->name); 559 break; 560 case CLK_TYPE_FF: 561 WARN_DEBUG(core->parent >= priv->num_core_clks); 562 parent = priv->clks[core->parent]; 563 if (IS_ERR(parent)) { 564 clk = parent; 565 goto fail; 566 } 567 568 parent_name = __clk_get_name(parent); 569 clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name, 570 parent_name, CLK_SET_RATE_PARENT, 571 core->mult, div); 572 if (IS_ERR(clk_hw)) 573 clk = ERR_CAST(clk_hw); 574 else 575 clk = clk_hw->clk; 576 break; 577 case CLK_TYPE_FF_MOD_STATUS: 578 if (!priv->ff_mod_status_ops) { 579 priv->ff_mod_status_ops = 580 devm_kzalloc(dev, sizeof(*priv->ff_mod_status_ops), GFP_KERNEL); 581 if (!priv->ff_mod_status_ops) { 582 clk = ERR_PTR(-ENOMEM); 583 goto fail; 584 } 585 memcpy(priv->ff_mod_status_ops, &clk_fixed_factor_ops, 586 sizeof(const struct clk_ops)); 587 priv->ff_mod_status_ops->is_enabled = rzv2h_clk_ff_mod_status_is_enabled; 588 } 589 clk = rzv2h_cpg_fixed_mod_status_clk_register(core, priv); 590 break; 591 case CLK_TYPE_PLL: 592 clk = rzv2h_cpg_pll_clk_register(core, priv, &rzv2h_cpg_pll_ops); 593 break; 594 case CLK_TYPE_DDIV: 595 clk = rzv2h_cpg_ddiv_clk_register(core, priv); 596 break; 597 case CLK_TYPE_SMUX: 598 clk = rzv2h_cpg_mux_clk_register(core, priv); 599 break; 600 default: 601 goto fail; 602 } 603 604 if (IS_ERR_OR_NULL(clk)) 605 goto fail; 606 607 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk)); 608 priv->clks[id] = clk; 609 return; 610 611 fail: 612 dev_err(dev, "Failed to register core clock %s: %ld\n", 613 core->name, PTR_ERR(clk)); 614 } 615 616 static void rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv *priv, 617 u32 mstop_data) 618 { 619 unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data); 620 u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data); 621 atomic_t *mstop = &priv->mstop_count[mstop_index * 16]; 622 unsigned long flags; 623 unsigned int i; 624 u32 val = 0; 625 626 spin_lock_irqsave(&priv->rmw_lock, flags); 627 for_each_set_bit(i, &mstop_mask, 16) { 628 if (!atomic_read(&mstop[i])) 629 val |= BIT(i) << 16; 630 atomic_inc(&mstop[i]); 631 } 632 if (val) 633 writel(val, priv->base + CPG_BUS_MSTOP(mstop_index)); 634 spin_unlock_irqrestore(&priv->rmw_lock, flags); 635 } 636 637 static void rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv *priv, 638 u32 mstop_data) 639 { 640 unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data); 641 u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data); 642 atomic_t *mstop = &priv->mstop_count[mstop_index * 16]; 643 unsigned long flags; 644 unsigned int i; 645 u32 val = 0; 646 647 spin_lock_irqsave(&priv->rmw_lock, flags); 648 for_each_set_bit(i, &mstop_mask, 16) { 649 if (!atomic_read(&mstop[i]) || 650 atomic_dec_and_test(&mstop[i])) 651 val |= BIT(i) << 16 | BIT(i); 652 } 653 if (val) 654 writel(val, priv->base + CPG_BUS_MSTOP(mstop_index)); 655 spin_unlock_irqrestore(&priv->rmw_lock, flags); 656 } 657 658 static int rzv2h_parent_clk_mux_to_index(struct clk_hw *hw) 659 { 660 struct clk_hw *parent_hw; 661 struct clk *parent_clk; 662 struct clk_mux *mux; 663 u32 val; 664 665 /* This will always succeed, so no need to check for IS_ERR() */ 666 parent_clk = clk_get_parent(hw->clk); 667 668 parent_hw = __clk_get_hw(parent_clk); 669 mux = to_clk_mux(parent_hw); 670 671 val = readl(mux->reg) >> mux->shift; 672 val &= mux->mask; 673 return clk_mux_val_to_index(parent_hw, mux->table, 0, val); 674 } 675 676 static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw) 677 { 678 struct mod_clock *clock = to_mod_clock(hw); 679 struct rzv2h_cpg_priv *priv = clock->priv; 680 int mon_index = clock->mon_index; 681 u32 bitmask; 682 u32 offset; 683 684 if (clock->ext_clk_mux_index >= 0 && 685 rzv2h_parent_clk_mux_to_index(hw) == clock->ext_clk_mux_index) 686 mon_index = -1; 687 688 if (mon_index >= 0) { 689 offset = GET_CLK_MON_OFFSET(mon_index); 690 bitmask = BIT(clock->mon_bit); 691 692 if (!(readl(priv->base + offset) & bitmask)) 693 return 0; 694 } 695 696 offset = GET_CLK_ON_OFFSET(clock->on_index); 697 bitmask = BIT(clock->on_bit); 698 699 return readl(priv->base + offset) & bitmask; 700 } 701 702 static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable) 703 { 704 bool enabled = rzv2h_mod_clock_is_enabled(hw); 705 struct mod_clock *clock = to_mod_clock(hw); 706 unsigned int reg = GET_CLK_ON_OFFSET(clock->on_index); 707 struct rzv2h_cpg_priv *priv = clock->priv; 708 u32 bitmask = BIT(clock->on_bit); 709 struct device *dev = priv->dev; 710 u32 value; 711 int error; 712 713 dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk, 714 str_on_off(enable)); 715 716 if (enabled == enable) 717 return 0; 718 719 value = bitmask << 16; 720 if (enable) { 721 value |= bitmask; 722 writel(value, priv->base + reg); 723 if (clock->mstop_data != BUS_MSTOP_NONE) 724 rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data); 725 } else { 726 if (clock->mstop_data != BUS_MSTOP_NONE) 727 rzv2h_mod_clock_mstop_disable(priv, clock->mstop_data); 728 writel(value, priv->base + reg); 729 } 730 731 if (!enable || clock->mon_index < 0) 732 return 0; 733 734 reg = GET_CLK_MON_OFFSET(clock->mon_index); 735 bitmask = BIT(clock->mon_bit); 736 error = readl_poll_timeout_atomic(priv->base + reg, value, 737 value & bitmask, 0, 10); 738 if (error) 739 dev_err(dev, "Failed to enable CLK_ON 0x%x/%pC\n", 740 GET_CLK_ON_OFFSET(clock->on_index), hw->clk); 741 742 return error; 743 } 744 745 static int rzv2h_mod_clock_enable(struct clk_hw *hw) 746 { 747 return rzv2h_mod_clock_endisable(hw, true); 748 } 749 750 static void rzv2h_mod_clock_disable(struct clk_hw *hw) 751 { 752 rzv2h_mod_clock_endisable(hw, false); 753 } 754 755 static const struct clk_ops rzv2h_mod_clock_ops = { 756 .enable = rzv2h_mod_clock_enable, 757 .disable = rzv2h_mod_clock_disable, 758 .is_enabled = rzv2h_mod_clock_is_enabled, 759 }; 760 761 static void __init 762 rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod, 763 struct rzv2h_cpg_priv *priv) 764 { 765 struct mod_clock *clock = NULL; 766 struct device *dev = priv->dev; 767 struct clk_init_data init; 768 struct clk *parent, *clk; 769 const char *parent_name; 770 unsigned int id; 771 int ret; 772 773 id = GET_MOD_CLK_ID(priv->num_core_clks, mod->on_index, mod->on_bit); 774 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks); 775 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks); 776 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT); 777 778 parent = priv->clks[mod->parent]; 779 if (IS_ERR(parent)) { 780 clk = parent; 781 goto fail; 782 } 783 784 clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL); 785 if (!clock) { 786 clk = ERR_PTR(-ENOMEM); 787 goto fail; 788 } 789 790 init.name = mod->name; 791 init.ops = &rzv2h_mod_clock_ops; 792 init.flags = CLK_SET_RATE_PARENT; 793 if (mod->critical) 794 init.flags |= CLK_IS_CRITICAL; 795 796 parent_name = __clk_get_name(parent); 797 init.parent_names = &parent_name; 798 init.num_parents = 1; 799 800 clock->on_index = mod->on_index; 801 clock->on_bit = mod->on_bit; 802 clock->mon_index = mod->mon_index; 803 clock->mon_bit = mod->mon_bit; 804 clock->no_pm = mod->no_pm; 805 clock->ext_clk_mux_index = mod->ext_clk_mux_index; 806 clock->priv = priv; 807 clock->hw.init = &init; 808 clock->mstop_data = mod->mstop_data; 809 810 ret = devm_clk_hw_register(dev, &clock->hw); 811 if (ret) { 812 clk = ERR_PTR(ret); 813 goto fail; 814 } 815 816 priv->clks[id] = clock->hw.clk; 817 818 /* 819 * Ensure the module clocks and MSTOP bits are synchronized when they are 820 * turned ON by the bootloader. Enable MSTOP bits for module clocks that were 821 * turned ON in an earlier boot stage. 822 */ 823 if (clock->mstop_data != BUS_MSTOP_NONE && 824 !mod->critical && rzv2h_mod_clock_is_enabled(&clock->hw)) { 825 rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data); 826 } else if (clock->mstop_data != BUS_MSTOP_NONE && mod->critical) { 827 unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, clock->mstop_data); 828 u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, clock->mstop_data); 829 atomic_t *mstop = &priv->mstop_count[mstop_index * 16]; 830 unsigned long flags; 831 unsigned int i; 832 u32 val = 0; 833 834 /* 835 * Critical clocks are turned ON immediately upon registration, and the 836 * MSTOP counter is updated through the rzv2h_mod_clock_enable() path. 837 * However, if the critical clocks were already turned ON by the initial 838 * bootloader, synchronize the atomic counter here and clear the MSTOP bit. 839 */ 840 spin_lock_irqsave(&priv->rmw_lock, flags); 841 for_each_set_bit(i, &mstop_mask, 16) { 842 if (atomic_read(&mstop[i])) 843 continue; 844 val |= BIT(i) << 16; 845 atomic_inc(&mstop[i]); 846 } 847 if (val) 848 writel(val, priv->base + CPG_BUS_MSTOP(mstop_index)); 849 spin_unlock_irqrestore(&priv->rmw_lock, flags); 850 } 851 852 return; 853 854 fail: 855 dev_err(dev, "Failed to register module clock %s: %ld\n", 856 mod->name, PTR_ERR(clk)); 857 } 858 859 static int __rzv2h_cpg_assert(struct reset_controller_dev *rcdev, 860 unsigned long id, bool assert) 861 { 862 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev); 863 unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index); 864 u32 mask = BIT(priv->resets[id].reset_bit); 865 u8 monbit = priv->resets[id].mon_bit; 866 u32 value = mask << 16; 867 868 dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n", 869 assert ? "assert" : "deassert", id, reg); 870 871 if (!assert) 872 value |= mask; 873 writel(value, priv->base + reg); 874 875 reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index); 876 mask = BIT(monbit); 877 878 return readl_poll_timeout_atomic(priv->base + reg, value, 879 assert ? (value & mask) : !(value & mask), 880 10, 200); 881 } 882 883 static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev, 884 unsigned long id) 885 { 886 return __rzv2h_cpg_assert(rcdev, id, true); 887 } 888 889 static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev, 890 unsigned long id) 891 { 892 return __rzv2h_cpg_assert(rcdev, id, false); 893 } 894 895 static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev, 896 unsigned long id) 897 { 898 int ret; 899 900 ret = rzv2h_cpg_assert(rcdev, id); 901 if (ret) 902 return ret; 903 904 return rzv2h_cpg_deassert(rcdev, id); 905 } 906 907 static int rzv2h_cpg_status(struct reset_controller_dev *rcdev, 908 unsigned long id) 909 { 910 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev); 911 unsigned int reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index); 912 u8 monbit = priv->resets[id].mon_bit; 913 914 return !!(readl(priv->base + reg) & BIT(monbit)); 915 } 916 917 static const struct reset_control_ops rzv2h_cpg_reset_ops = { 918 .reset = rzv2h_cpg_reset, 919 .assert = rzv2h_cpg_assert, 920 .deassert = rzv2h_cpg_deassert, 921 .status = rzv2h_cpg_status, 922 }; 923 924 static int rzv2h_cpg_reset_xlate(struct reset_controller_dev *rcdev, 925 const struct of_phandle_args *reset_spec) 926 { 927 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev); 928 unsigned int id = reset_spec->args[0]; 929 u8 rst_index = id / 16; 930 u8 rst_bit = id % 16; 931 unsigned int i; 932 933 for (i = 0; i < rcdev->nr_resets; i++) { 934 if (rst_index == priv->resets[i].reset_index && 935 rst_bit == priv->resets[i].reset_bit) 936 return i; 937 } 938 939 return -EINVAL; 940 } 941 942 static int rzv2h_cpg_reset_controller_register(struct rzv2h_cpg_priv *priv) 943 { 944 priv->rcdev.ops = &rzv2h_cpg_reset_ops; 945 priv->rcdev.of_node = priv->dev->of_node; 946 priv->rcdev.dev = priv->dev; 947 priv->rcdev.of_reset_n_cells = 1; 948 priv->rcdev.of_xlate = rzv2h_cpg_reset_xlate; 949 priv->rcdev.nr_resets = priv->num_resets; 950 951 return devm_reset_controller_register(priv->dev, &priv->rcdev); 952 } 953 954 /** 955 * struct rzv2h_cpg_pd - RZ/V2H power domain data structure 956 * @priv: pointer to CPG private data structure 957 * @genpd: generic PM domain 958 */ 959 struct rzv2h_cpg_pd { 960 struct rzv2h_cpg_priv *priv; 961 struct generic_pm_domain genpd; 962 }; 963 964 static bool rzv2h_cpg_is_pm_clk(struct rzv2h_cpg_pd *pd, 965 const struct of_phandle_args *clkspec) 966 { 967 if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2) 968 return false; 969 970 switch (clkspec->args[0]) { 971 case CPG_MOD: { 972 struct rzv2h_cpg_priv *priv = pd->priv; 973 unsigned int id = clkspec->args[1]; 974 struct mod_clock *clock; 975 976 if (id >= priv->num_mod_clks) 977 return false; 978 979 if (priv->clks[priv->num_core_clks + id] == ERR_PTR(-ENOENT)) 980 return false; 981 982 clock = to_mod_clock(__clk_get_hw(priv->clks[priv->num_core_clks + id])); 983 984 return !clock->no_pm; 985 } 986 987 case CPG_CORE: 988 default: 989 return false; 990 } 991 } 992 993 static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev) 994 { 995 struct rzv2h_cpg_pd *pd = container_of(domain, struct rzv2h_cpg_pd, genpd); 996 struct device_node *np = dev->of_node; 997 struct of_phandle_args clkspec; 998 bool once = true; 999 struct clk *clk; 1000 unsigned int i; 1001 int error; 1002 1003 for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) { 1004 if (!rzv2h_cpg_is_pm_clk(pd, &clkspec)) { 1005 of_node_put(clkspec.np); 1006 continue; 1007 } 1008 1009 if (once) { 1010 once = false; 1011 error = pm_clk_create(dev); 1012 if (error) { 1013 of_node_put(clkspec.np); 1014 goto err; 1015 } 1016 } 1017 clk = of_clk_get_from_provider(&clkspec); 1018 of_node_put(clkspec.np); 1019 if (IS_ERR(clk)) { 1020 error = PTR_ERR(clk); 1021 goto fail_destroy; 1022 } 1023 1024 error = pm_clk_add_clk(dev, clk); 1025 if (error) { 1026 dev_err(dev, "pm_clk_add_clk failed %d\n", 1027 error); 1028 goto fail_put; 1029 } 1030 } 1031 1032 return 0; 1033 1034 fail_put: 1035 clk_put(clk); 1036 1037 fail_destroy: 1038 pm_clk_destroy(dev); 1039 err: 1040 return error; 1041 } 1042 1043 static void rzv2h_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev) 1044 { 1045 if (!pm_clk_no_clocks(dev)) 1046 pm_clk_destroy(dev); 1047 } 1048 1049 static void rzv2h_cpg_genpd_remove_simple(void *data) 1050 { 1051 pm_genpd_remove(data); 1052 } 1053 1054 static int __init rzv2h_cpg_add_pm_domains(struct rzv2h_cpg_priv *priv) 1055 { 1056 struct device *dev = priv->dev; 1057 struct device_node *np = dev->of_node; 1058 struct rzv2h_cpg_pd *pd; 1059 int ret; 1060 1061 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); 1062 if (!pd) 1063 return -ENOMEM; 1064 1065 pd->genpd.name = np->name; 1066 pd->priv = priv; 1067 pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; 1068 pd->genpd.attach_dev = rzv2h_cpg_attach_dev; 1069 pd->genpd.detach_dev = rzv2h_cpg_detach_dev; 1070 ret = pm_genpd_init(&pd->genpd, &pm_domain_always_on_gov, false); 1071 if (ret) 1072 return ret; 1073 1074 ret = devm_add_action_or_reset(dev, rzv2h_cpg_genpd_remove_simple, &pd->genpd); 1075 if (ret) 1076 return ret; 1077 1078 return of_genpd_add_provider_simple(np, &pd->genpd); 1079 } 1080 1081 static void rzv2h_cpg_del_clk_provider(void *data) 1082 { 1083 of_clk_del_provider(data); 1084 } 1085 1086 static int __init rzv2h_cpg_probe(struct platform_device *pdev) 1087 { 1088 struct device *dev = &pdev->dev; 1089 struct device_node *np = dev->of_node; 1090 const struct rzv2h_cpg_info *info; 1091 struct rzv2h_cpg_priv *priv; 1092 unsigned int nclks, i; 1093 struct clk **clks; 1094 int error; 1095 1096 info = of_device_get_match_data(dev); 1097 1098 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 1099 if (!priv) 1100 return -ENOMEM; 1101 1102 spin_lock_init(&priv->rmw_lock); 1103 1104 priv->dev = dev; 1105 1106 priv->base = devm_platform_ioremap_resource(pdev, 0); 1107 if (IS_ERR(priv->base)) 1108 return PTR_ERR(priv->base); 1109 1110 nclks = info->num_total_core_clks + info->num_hw_mod_clks; 1111 clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL); 1112 if (!clks) 1113 return -ENOMEM; 1114 1115 priv->mstop_count = devm_kcalloc(dev, info->num_mstop_bits, 1116 sizeof(*priv->mstop_count), GFP_KERNEL); 1117 if (!priv->mstop_count) 1118 return -ENOMEM; 1119 1120 /* Adjust for CPG_BUS_m_MSTOP starting from m = 1 */ 1121 priv->mstop_count -= 16; 1122 1123 priv->resets = devm_kmemdup_array(dev, info->resets, info->num_resets, 1124 sizeof(*info->resets), GFP_KERNEL); 1125 if (!priv->resets) 1126 return -ENOMEM; 1127 1128 dev_set_drvdata(dev, priv); 1129 priv->clks = clks; 1130 priv->num_core_clks = info->num_total_core_clks; 1131 priv->num_mod_clks = info->num_hw_mod_clks; 1132 priv->last_dt_core_clk = info->last_dt_core_clk; 1133 priv->num_resets = info->num_resets; 1134 1135 for (i = 0; i < nclks; i++) 1136 clks[i] = ERR_PTR(-ENOENT); 1137 1138 for (i = 0; i < info->num_core_clks; i++) 1139 rzv2h_cpg_register_core_clk(&info->core_clks[i], priv); 1140 1141 for (i = 0; i < info->num_mod_clks; i++) 1142 rzv2h_cpg_register_mod_clk(&info->mod_clks[i], priv); 1143 1144 error = of_clk_add_provider(np, rzv2h_cpg_clk_src_twocell_get, priv); 1145 if (error) 1146 return error; 1147 1148 error = devm_add_action_or_reset(dev, rzv2h_cpg_del_clk_provider, np); 1149 if (error) 1150 return error; 1151 1152 error = rzv2h_cpg_add_pm_domains(priv); 1153 if (error) 1154 return error; 1155 1156 error = rzv2h_cpg_reset_controller_register(priv); 1157 if (error) 1158 return error; 1159 1160 return 0; 1161 } 1162 1163 static const struct of_device_id rzv2h_cpg_match[] = { 1164 #ifdef CONFIG_CLK_R9A09G047 1165 { 1166 .compatible = "renesas,r9a09g047-cpg", 1167 .data = &r9a09g047_cpg_info, 1168 }, 1169 #endif 1170 #ifdef CONFIG_CLK_R9A09G056 1171 { 1172 .compatible = "renesas,r9a09g056-cpg", 1173 .data = &r9a09g056_cpg_info, 1174 }, 1175 #endif 1176 #ifdef CONFIG_CLK_R9A09G057 1177 { 1178 .compatible = "renesas,r9a09g057-cpg", 1179 .data = &r9a09g057_cpg_info, 1180 }, 1181 #endif 1182 { /* sentinel */ } 1183 }; 1184 1185 static struct platform_driver rzv2h_cpg_driver = { 1186 .driver = { 1187 .name = "rzv2h-cpg", 1188 .of_match_table = rzv2h_cpg_match, 1189 }, 1190 }; 1191 1192 static int __init rzv2h_cpg_init(void) 1193 { 1194 return platform_driver_probe(&rzv2h_cpg_driver, rzv2h_cpg_probe); 1195 } 1196 1197 subsys_initcall(rzv2h_cpg_init); 1198 1199 MODULE_DESCRIPTION("Renesas RZ/V2H CPG Driver"); 1200