1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Renesas RZ/V2H(P) Clock Pulse Generator 4 * 5 * Copyright (C) 2024 Renesas Electronics Corp. 6 * 7 * Based on rzg2l-cpg.c 8 * 9 * Copyright (C) 2015 Glider bvba 10 * Copyright (C) 2013 Ideas On Board SPRL 11 * Copyright (C) 2015 Renesas Electronics Corp. 12 */ 13 14 #include <linux/bitfield.h> 15 #include <linux/clk.h> 16 #include <linux/clk-provider.h> 17 #include <linux/delay.h> 18 #include <linux/init.h> 19 #include <linux/iopoll.h> 20 #include <linux/mod_devicetable.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_clock.h> 25 #include <linux/pm_domain.h> 26 #include <linux/refcount.h> 27 #include <linux/reset-controller.h> 28 29 #include <dt-bindings/clock/renesas-cpg-mssr.h> 30 31 #include "rzv2h-cpg.h" 32 33 #ifdef DEBUG 34 #define WARN_DEBUG(x) WARN_ON(x) 35 #else 36 #define WARN_DEBUG(x) do { } while (0) 37 #endif 38 39 #define GET_CLK_ON_OFFSET(x) (0x600 + ((x) * 4)) 40 #define GET_CLK_MON_OFFSET(x) (0x800 + ((x) * 4)) 41 #define GET_RST_OFFSET(x) (0x900 + ((x) * 4)) 42 #define GET_RST_MON_OFFSET(x) (0xA00 + ((x) * 4)) 43 44 #define CPG_BUS_1_MSTOP (0xd00) 45 #define CPG_BUS_MSTOP(m) (CPG_BUS_1_MSTOP + ((m) - 1) * 4) 46 47 #define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val))) 48 #define MDIV(val) FIELD_GET(GENMASK(15, 6), (val)) 49 #define PDIV(val) FIELD_GET(GENMASK(5, 0), (val)) 50 #define SDIV(val) FIELD_GET(GENMASK(2, 0), (val)) 51 52 #define DDIV_DIVCTL_WEN(shift) BIT((shift) + 16) 53 54 #define GET_MOD_CLK_ID(base, index, bit) \ 55 ((base) + ((((index) * (16))) + (bit))) 56 57 #define CPG_CLKSTATUS0 (0x700) 58 59 /** 60 * struct rzv2h_cpg_priv - Clock Pulse Generator Private Data 61 * 62 * @dev: CPG device 63 * @base: CPG register block base address 64 * @rmw_lock: protects register accesses 65 * @clks: Array containing all Core and Module Clocks 66 * @num_core_clks: Number of Core Clocks in clks[] 67 * @num_mod_clks: Number of Module Clocks in clks[] 68 * @resets: Array of resets 69 * @num_resets: Number of Module Resets in info->resets[] 70 * @last_dt_core_clk: ID of the last Core Clock exported to DT 71 * @mstop_count: Array of mstop values 72 * @rcdev: Reset controller entity 73 */ 74 struct rzv2h_cpg_priv { 75 struct device *dev; 76 void __iomem *base; 77 spinlock_t rmw_lock; 78 79 struct clk **clks; 80 unsigned int num_core_clks; 81 unsigned int num_mod_clks; 82 struct rzv2h_reset *resets; 83 unsigned int num_resets; 84 unsigned int last_dt_core_clk; 85 86 atomic_t *mstop_count; 87 88 struct reset_controller_dev rcdev; 89 }; 90 91 #define rcdev_to_priv(x) container_of(x, struct rzv2h_cpg_priv, rcdev) 92 93 struct pll_clk { 94 struct rzv2h_cpg_priv *priv; 95 void __iomem *base; 96 struct clk_hw hw; 97 unsigned int conf; 98 unsigned int type; 99 }; 100 101 #define to_pll(_hw) container_of(_hw, struct pll_clk, hw) 102 103 /** 104 * struct mod_clock - Module clock 105 * 106 * @priv: CPG private data 107 * @mstop_data: mstop data relating to module clock 108 * @hw: handle between common and hardware-specific interfaces 109 * @no_pm: flag to indicate PM is not supported 110 * @on_index: register offset 111 * @on_bit: ON/MON bit 112 * @mon_index: monitor register offset 113 * @mon_bit: montor bit 114 */ 115 struct mod_clock { 116 struct rzv2h_cpg_priv *priv; 117 unsigned int mstop_data; 118 struct clk_hw hw; 119 bool no_pm; 120 u8 on_index; 121 u8 on_bit; 122 s8 mon_index; 123 u8 mon_bit; 124 }; 125 126 #define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw) 127 128 /** 129 * struct ddiv_clk - DDIV clock 130 * 131 * @priv: CPG private data 132 * @div: divider clk 133 * @mon: monitor bit in CPG_CLKSTATUS0 register 134 */ 135 struct ddiv_clk { 136 struct rzv2h_cpg_priv *priv; 137 struct clk_divider div; 138 u8 mon; 139 }; 140 141 #define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div) 142 143 static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw, 144 unsigned long parent_rate) 145 { 146 struct pll_clk *pll_clk = to_pll(hw); 147 struct rzv2h_cpg_priv *priv = pll_clk->priv; 148 unsigned int clk1, clk2; 149 u64 rate; 150 151 if (!PLL_CLK_ACCESS(pll_clk->conf)) 152 return 0; 153 154 clk1 = readl(priv->base + PLL_CLK1_OFFSET(pll_clk->conf)); 155 clk2 = readl(priv->base + PLL_CLK2_OFFSET(pll_clk->conf)); 156 157 rate = mul_u64_u32_shr(parent_rate, (MDIV(clk1) << 16) + KDIV(clk1), 158 16 + SDIV(clk2)); 159 160 return DIV_ROUND_CLOSEST_ULL(rate, PDIV(clk1)); 161 } 162 163 static const struct clk_ops rzv2h_cpg_pll_ops = { 164 .recalc_rate = rzv2h_cpg_pll_clk_recalc_rate, 165 }; 166 167 static struct clk * __init 168 rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core, 169 struct rzv2h_cpg_priv *priv, 170 const struct clk_ops *ops) 171 { 172 void __iomem *base = priv->base; 173 struct device *dev = priv->dev; 174 struct clk_init_data init; 175 const struct clk *parent; 176 const char *parent_name; 177 struct pll_clk *pll_clk; 178 int ret; 179 180 parent = priv->clks[core->parent]; 181 if (IS_ERR(parent)) 182 return ERR_CAST(parent); 183 184 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL); 185 if (!pll_clk) 186 return ERR_PTR(-ENOMEM); 187 188 parent_name = __clk_get_name(parent); 189 init.name = core->name; 190 init.ops = ops; 191 init.flags = 0; 192 init.parent_names = &parent_name; 193 init.num_parents = 1; 194 195 pll_clk->hw.init = &init; 196 pll_clk->conf = core->cfg.conf; 197 pll_clk->base = base; 198 pll_clk->priv = priv; 199 pll_clk->type = core->type; 200 201 ret = devm_clk_hw_register(dev, &pll_clk->hw); 202 if (ret) 203 return ERR_PTR(ret); 204 205 return pll_clk->hw.clk; 206 } 207 208 static unsigned long rzv2h_ddiv_recalc_rate(struct clk_hw *hw, 209 unsigned long parent_rate) 210 { 211 struct clk_divider *divider = to_clk_divider(hw); 212 unsigned int val; 213 214 val = readl(divider->reg) >> divider->shift; 215 val &= clk_div_mask(divider->width); 216 217 return divider_recalc_rate(hw, parent_rate, val, divider->table, 218 divider->flags, divider->width); 219 } 220 221 static long rzv2h_ddiv_round_rate(struct clk_hw *hw, unsigned long rate, 222 unsigned long *prate) 223 { 224 struct clk_divider *divider = to_clk_divider(hw); 225 226 return divider_round_rate(hw, rate, prate, divider->table, 227 divider->width, divider->flags); 228 } 229 230 static int rzv2h_ddiv_determine_rate(struct clk_hw *hw, 231 struct clk_rate_request *req) 232 { 233 struct clk_divider *divider = to_clk_divider(hw); 234 235 return divider_determine_rate(hw, req, divider->table, divider->width, 236 divider->flags); 237 } 238 239 static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon) 240 { 241 u32 bitmask = BIT(mon); 242 u32 val; 243 244 return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200); 245 } 246 247 static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate, 248 unsigned long parent_rate) 249 { 250 struct clk_divider *divider = to_clk_divider(hw); 251 struct ddiv_clk *ddiv = to_ddiv_clock(divider); 252 struct rzv2h_cpg_priv *priv = ddiv->priv; 253 unsigned long flags = 0; 254 int value; 255 u32 val; 256 int ret; 257 258 value = divider_get_val(rate, parent_rate, divider->table, 259 divider->width, divider->flags); 260 if (value < 0) 261 return value; 262 263 spin_lock_irqsave(divider->lock, flags); 264 265 ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon); 266 if (ret) 267 goto ddiv_timeout; 268 269 val = readl(divider->reg) | DDIV_DIVCTL_WEN(divider->shift); 270 val &= ~(clk_div_mask(divider->width) << divider->shift); 271 val |= (u32)value << divider->shift; 272 writel(val, divider->reg); 273 274 ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon); 275 if (ret) 276 goto ddiv_timeout; 277 278 spin_unlock_irqrestore(divider->lock, flags); 279 280 return 0; 281 282 ddiv_timeout: 283 spin_unlock_irqrestore(divider->lock, flags); 284 return ret; 285 } 286 287 static const struct clk_ops rzv2h_ddiv_clk_divider_ops = { 288 .recalc_rate = rzv2h_ddiv_recalc_rate, 289 .round_rate = rzv2h_ddiv_round_rate, 290 .determine_rate = rzv2h_ddiv_determine_rate, 291 .set_rate = rzv2h_ddiv_set_rate, 292 }; 293 294 static struct clk * __init 295 rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core, 296 struct rzv2h_cpg_priv *priv) 297 { 298 struct ddiv cfg_ddiv = core->cfg.ddiv; 299 struct clk_init_data init = {}; 300 struct device *dev = priv->dev; 301 u8 shift = cfg_ddiv.shift; 302 u8 width = cfg_ddiv.width; 303 const struct clk *parent; 304 const char *parent_name; 305 struct clk_divider *div; 306 struct ddiv_clk *ddiv; 307 int ret; 308 309 parent = priv->clks[core->parent]; 310 if (IS_ERR(parent)) 311 return ERR_CAST(parent); 312 313 parent_name = __clk_get_name(parent); 314 315 if ((shift + width) > 16) 316 return ERR_PTR(-EINVAL); 317 318 ddiv = devm_kzalloc(priv->dev, sizeof(*ddiv), GFP_KERNEL); 319 if (!ddiv) 320 return ERR_PTR(-ENOMEM); 321 322 init.name = core->name; 323 init.ops = &rzv2h_ddiv_clk_divider_ops; 324 init.parent_names = &parent_name; 325 init.num_parents = 1; 326 327 ddiv->priv = priv; 328 ddiv->mon = cfg_ddiv.monbit; 329 div = &ddiv->div; 330 div->reg = priv->base + cfg_ddiv.offset; 331 div->shift = shift; 332 div->width = width; 333 div->flags = core->flag; 334 div->lock = &priv->rmw_lock; 335 div->hw.init = &init; 336 div->table = core->dtable; 337 338 ret = devm_clk_hw_register(dev, &div->hw); 339 if (ret) 340 return ERR_PTR(ret); 341 342 return div->hw.clk; 343 } 344 345 static struct clk 346 *rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec, 347 void *data) 348 { 349 unsigned int clkidx = clkspec->args[1]; 350 struct rzv2h_cpg_priv *priv = data; 351 struct device *dev = priv->dev; 352 const char *type; 353 struct clk *clk; 354 355 switch (clkspec->args[0]) { 356 case CPG_CORE: 357 type = "core"; 358 if (clkidx > priv->last_dt_core_clk) { 359 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx); 360 return ERR_PTR(-EINVAL); 361 } 362 clk = priv->clks[clkidx]; 363 break; 364 365 case CPG_MOD: 366 type = "module"; 367 if (clkidx >= priv->num_mod_clks) { 368 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx); 369 return ERR_PTR(-EINVAL); 370 } 371 clk = priv->clks[priv->num_core_clks + clkidx]; 372 break; 373 374 default: 375 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]); 376 return ERR_PTR(-EINVAL); 377 } 378 379 if (IS_ERR(clk)) 380 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx, 381 PTR_ERR(clk)); 382 else 383 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n", 384 clkspec->args[0], clkspec->args[1], clk, 385 clk_get_rate(clk)); 386 return clk; 387 } 388 389 static void __init 390 rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core, 391 struct rzv2h_cpg_priv *priv) 392 { 393 struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent; 394 unsigned int id = core->id, div = core->div; 395 struct device *dev = priv->dev; 396 const char *parent_name; 397 struct clk_hw *clk_hw; 398 399 WARN_DEBUG(id >= priv->num_core_clks); 400 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT); 401 402 switch (core->type) { 403 case CLK_TYPE_IN: 404 clk = of_clk_get_by_name(priv->dev->of_node, core->name); 405 break; 406 case CLK_TYPE_FF: 407 WARN_DEBUG(core->parent >= priv->num_core_clks); 408 parent = priv->clks[core->parent]; 409 if (IS_ERR(parent)) { 410 clk = parent; 411 goto fail; 412 } 413 414 parent_name = __clk_get_name(parent); 415 clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name, 416 parent_name, CLK_SET_RATE_PARENT, 417 core->mult, div); 418 if (IS_ERR(clk_hw)) 419 clk = ERR_CAST(clk_hw); 420 else 421 clk = clk_hw->clk; 422 break; 423 case CLK_TYPE_PLL: 424 clk = rzv2h_cpg_pll_clk_register(core, priv, &rzv2h_cpg_pll_ops); 425 break; 426 case CLK_TYPE_DDIV: 427 clk = rzv2h_cpg_ddiv_clk_register(core, priv); 428 break; 429 default: 430 goto fail; 431 } 432 433 if (IS_ERR_OR_NULL(clk)) 434 goto fail; 435 436 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk)); 437 priv->clks[id] = clk; 438 return; 439 440 fail: 441 dev_err(dev, "Failed to register core clock %s: %ld\n", 442 core->name, PTR_ERR(clk)); 443 } 444 445 static void rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv *priv, 446 u32 mstop_data) 447 { 448 unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data); 449 u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data); 450 atomic_t *mstop = &priv->mstop_count[mstop_index * 16]; 451 unsigned long flags; 452 unsigned int i; 453 u32 val = 0; 454 455 spin_lock_irqsave(&priv->rmw_lock, flags); 456 for_each_set_bit(i, &mstop_mask, 16) { 457 if (!atomic_read(&mstop[i])) 458 val |= BIT(i) << 16; 459 atomic_inc(&mstop[i]); 460 } 461 if (val) 462 writel(val, priv->base + CPG_BUS_MSTOP(mstop_index)); 463 spin_unlock_irqrestore(&priv->rmw_lock, flags); 464 } 465 466 static void rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv *priv, 467 u32 mstop_data) 468 { 469 unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data); 470 u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data); 471 atomic_t *mstop = &priv->mstop_count[mstop_index * 16]; 472 unsigned long flags; 473 unsigned int i; 474 u32 val = 0; 475 476 spin_lock_irqsave(&priv->rmw_lock, flags); 477 for_each_set_bit(i, &mstop_mask, 16) { 478 if (!atomic_read(&mstop[i]) || 479 atomic_dec_and_test(&mstop[i])) 480 val |= BIT(i) << 16 | BIT(i); 481 } 482 if (val) 483 writel(val, priv->base + CPG_BUS_MSTOP(mstop_index)); 484 spin_unlock_irqrestore(&priv->rmw_lock, flags); 485 } 486 487 static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw) 488 { 489 struct mod_clock *clock = to_mod_clock(hw); 490 struct rzv2h_cpg_priv *priv = clock->priv; 491 u32 bitmask; 492 u32 offset; 493 494 if (clock->mon_index >= 0) { 495 offset = GET_CLK_MON_OFFSET(clock->mon_index); 496 bitmask = BIT(clock->mon_bit); 497 } else { 498 offset = GET_CLK_ON_OFFSET(clock->on_index); 499 bitmask = BIT(clock->on_bit); 500 } 501 502 return readl(priv->base + offset) & bitmask; 503 } 504 505 static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable) 506 { 507 bool enabled = rzv2h_mod_clock_is_enabled(hw); 508 struct mod_clock *clock = to_mod_clock(hw); 509 unsigned int reg = GET_CLK_ON_OFFSET(clock->on_index); 510 struct rzv2h_cpg_priv *priv = clock->priv; 511 u32 bitmask = BIT(clock->on_bit); 512 struct device *dev = priv->dev; 513 u32 value; 514 int error; 515 516 dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk, 517 enable ? "ON" : "OFF"); 518 519 if (enabled == enable) 520 return 0; 521 522 value = bitmask << 16; 523 if (enable) { 524 value |= bitmask; 525 writel(value, priv->base + reg); 526 if (clock->mstop_data != BUS_MSTOP_NONE) 527 rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data); 528 } else { 529 if (clock->mstop_data != BUS_MSTOP_NONE) 530 rzv2h_mod_clock_mstop_disable(priv, clock->mstop_data); 531 writel(value, priv->base + reg); 532 } 533 534 if (!enable || clock->mon_index < 0) 535 return 0; 536 537 reg = GET_CLK_MON_OFFSET(clock->mon_index); 538 bitmask = BIT(clock->mon_bit); 539 error = readl_poll_timeout_atomic(priv->base + reg, value, 540 value & bitmask, 0, 10); 541 if (error) 542 dev_err(dev, "Failed to enable CLK_ON 0x%x/%pC\n", 543 GET_CLK_ON_OFFSET(clock->on_index), hw->clk); 544 545 return error; 546 } 547 548 static int rzv2h_mod_clock_enable(struct clk_hw *hw) 549 { 550 return rzv2h_mod_clock_endisable(hw, true); 551 } 552 553 static void rzv2h_mod_clock_disable(struct clk_hw *hw) 554 { 555 rzv2h_mod_clock_endisable(hw, false); 556 } 557 558 static const struct clk_ops rzv2h_mod_clock_ops = { 559 .enable = rzv2h_mod_clock_enable, 560 .disable = rzv2h_mod_clock_disable, 561 .is_enabled = rzv2h_mod_clock_is_enabled, 562 }; 563 564 static void __init 565 rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod, 566 struct rzv2h_cpg_priv *priv) 567 { 568 struct mod_clock *clock = NULL; 569 struct device *dev = priv->dev; 570 struct clk_init_data init; 571 struct clk *parent, *clk; 572 const char *parent_name; 573 unsigned int id; 574 int ret; 575 576 id = GET_MOD_CLK_ID(priv->num_core_clks, mod->on_index, mod->on_bit); 577 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks); 578 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks); 579 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT); 580 581 parent = priv->clks[mod->parent]; 582 if (IS_ERR(parent)) { 583 clk = parent; 584 goto fail; 585 } 586 587 clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL); 588 if (!clock) { 589 clk = ERR_PTR(-ENOMEM); 590 goto fail; 591 } 592 593 init.name = mod->name; 594 init.ops = &rzv2h_mod_clock_ops; 595 init.flags = CLK_SET_RATE_PARENT; 596 if (mod->critical) 597 init.flags |= CLK_IS_CRITICAL; 598 599 parent_name = __clk_get_name(parent); 600 init.parent_names = &parent_name; 601 init.num_parents = 1; 602 603 clock->on_index = mod->on_index; 604 clock->on_bit = mod->on_bit; 605 clock->mon_index = mod->mon_index; 606 clock->mon_bit = mod->mon_bit; 607 clock->no_pm = mod->no_pm; 608 clock->priv = priv; 609 clock->hw.init = &init; 610 clock->mstop_data = mod->mstop_data; 611 612 ret = devm_clk_hw_register(dev, &clock->hw); 613 if (ret) { 614 clk = ERR_PTR(ret); 615 goto fail; 616 } 617 618 priv->clks[id] = clock->hw.clk; 619 620 /* 621 * Ensure the module clocks and MSTOP bits are synchronized when they are 622 * turned ON by the bootloader. Enable MSTOP bits for module clocks that were 623 * turned ON in an earlier boot stage. 624 */ 625 if (clock->mstop_data != BUS_MSTOP_NONE && 626 !mod->critical && rzv2h_mod_clock_is_enabled(&clock->hw)) { 627 rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data); 628 } else if (clock->mstop_data != BUS_MSTOP_NONE && mod->critical) { 629 unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, clock->mstop_data); 630 u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, clock->mstop_data); 631 atomic_t *mstop = &priv->mstop_count[mstop_index * 16]; 632 unsigned long flags; 633 unsigned int i; 634 u32 val = 0; 635 636 /* 637 * Critical clocks are turned ON immediately upon registration, and the 638 * MSTOP counter is updated through the rzv2h_mod_clock_enable() path. 639 * However, if the critical clocks were already turned ON by the initial 640 * bootloader, synchronize the atomic counter here and clear the MSTOP bit. 641 */ 642 spin_lock_irqsave(&priv->rmw_lock, flags); 643 for_each_set_bit(i, &mstop_mask, 16) { 644 if (atomic_read(&mstop[i])) 645 continue; 646 val |= BIT(i) << 16; 647 atomic_inc(&mstop[i]); 648 } 649 if (val) 650 writel(val, priv->base + CPG_BUS_MSTOP(mstop_index)); 651 spin_unlock_irqrestore(&priv->rmw_lock, flags); 652 } 653 654 return; 655 656 fail: 657 dev_err(dev, "Failed to register module clock %s: %ld\n", 658 mod->name, PTR_ERR(clk)); 659 } 660 661 static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev, 662 unsigned long id) 663 { 664 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev); 665 unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index); 666 u32 mask = BIT(priv->resets[id].reset_bit); 667 u8 monbit = priv->resets[id].mon_bit; 668 u32 value = mask << 16; 669 670 dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, reg); 671 672 writel(value, priv->base + reg); 673 674 reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index); 675 mask = BIT(monbit); 676 677 return readl_poll_timeout_atomic(priv->base + reg, value, 678 value & mask, 10, 200); 679 } 680 681 static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev, 682 unsigned long id) 683 { 684 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev); 685 unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index); 686 u32 mask = BIT(priv->resets[id].reset_bit); 687 u8 monbit = priv->resets[id].mon_bit; 688 u32 value = (mask << 16) | mask; 689 690 dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, reg); 691 692 writel(value, priv->base + reg); 693 694 reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index); 695 mask = BIT(monbit); 696 697 return readl_poll_timeout_atomic(priv->base + reg, value, 698 !(value & mask), 10, 200); 699 } 700 701 static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev, 702 unsigned long id) 703 { 704 int ret; 705 706 ret = rzv2h_cpg_assert(rcdev, id); 707 if (ret) 708 return ret; 709 710 return rzv2h_cpg_deassert(rcdev, id); 711 } 712 713 static int rzv2h_cpg_status(struct reset_controller_dev *rcdev, 714 unsigned long id) 715 { 716 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev); 717 unsigned int reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index); 718 u8 monbit = priv->resets[id].mon_bit; 719 720 return !!(readl(priv->base + reg) & BIT(monbit)); 721 } 722 723 static const struct reset_control_ops rzv2h_cpg_reset_ops = { 724 .reset = rzv2h_cpg_reset, 725 .assert = rzv2h_cpg_assert, 726 .deassert = rzv2h_cpg_deassert, 727 .status = rzv2h_cpg_status, 728 }; 729 730 static int rzv2h_cpg_reset_xlate(struct reset_controller_dev *rcdev, 731 const struct of_phandle_args *reset_spec) 732 { 733 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev); 734 unsigned int id = reset_spec->args[0]; 735 u8 rst_index = id / 16; 736 u8 rst_bit = id % 16; 737 unsigned int i; 738 739 for (i = 0; i < rcdev->nr_resets; i++) { 740 if (rst_index == priv->resets[i].reset_index && 741 rst_bit == priv->resets[i].reset_bit) 742 return i; 743 } 744 745 return -EINVAL; 746 } 747 748 static int rzv2h_cpg_reset_controller_register(struct rzv2h_cpg_priv *priv) 749 { 750 priv->rcdev.ops = &rzv2h_cpg_reset_ops; 751 priv->rcdev.of_node = priv->dev->of_node; 752 priv->rcdev.dev = priv->dev; 753 priv->rcdev.of_reset_n_cells = 1; 754 priv->rcdev.of_xlate = rzv2h_cpg_reset_xlate; 755 priv->rcdev.nr_resets = priv->num_resets; 756 757 return devm_reset_controller_register(priv->dev, &priv->rcdev); 758 } 759 760 /** 761 * struct rzv2h_cpg_pd - RZ/V2H power domain data structure 762 * @priv: pointer to CPG private data structure 763 * @genpd: generic PM domain 764 */ 765 struct rzv2h_cpg_pd { 766 struct rzv2h_cpg_priv *priv; 767 struct generic_pm_domain genpd; 768 }; 769 770 static bool rzv2h_cpg_is_pm_clk(struct rzv2h_cpg_pd *pd, 771 const struct of_phandle_args *clkspec) 772 { 773 if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2) 774 return false; 775 776 switch (clkspec->args[0]) { 777 case CPG_MOD: { 778 struct rzv2h_cpg_priv *priv = pd->priv; 779 unsigned int id = clkspec->args[1]; 780 struct mod_clock *clock; 781 782 if (id >= priv->num_mod_clks) 783 return false; 784 785 if (priv->clks[priv->num_core_clks + id] == ERR_PTR(-ENOENT)) 786 return false; 787 788 clock = to_mod_clock(__clk_get_hw(priv->clks[priv->num_core_clks + id])); 789 790 return !clock->no_pm; 791 } 792 793 case CPG_CORE: 794 default: 795 return false; 796 } 797 } 798 799 static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev) 800 { 801 struct rzv2h_cpg_pd *pd = container_of(domain, struct rzv2h_cpg_pd, genpd); 802 struct device_node *np = dev->of_node; 803 struct of_phandle_args clkspec; 804 bool once = true; 805 struct clk *clk; 806 unsigned int i; 807 int error; 808 809 for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) { 810 if (!rzv2h_cpg_is_pm_clk(pd, &clkspec)) { 811 of_node_put(clkspec.np); 812 continue; 813 } 814 815 if (once) { 816 once = false; 817 error = pm_clk_create(dev); 818 if (error) { 819 of_node_put(clkspec.np); 820 goto err; 821 } 822 } 823 clk = of_clk_get_from_provider(&clkspec); 824 of_node_put(clkspec.np); 825 if (IS_ERR(clk)) { 826 error = PTR_ERR(clk); 827 goto fail_destroy; 828 } 829 830 error = pm_clk_add_clk(dev, clk); 831 if (error) { 832 dev_err(dev, "pm_clk_add_clk failed %d\n", 833 error); 834 goto fail_put; 835 } 836 } 837 838 return 0; 839 840 fail_put: 841 clk_put(clk); 842 843 fail_destroy: 844 pm_clk_destroy(dev); 845 err: 846 return error; 847 } 848 849 static void rzv2h_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev) 850 { 851 if (!pm_clk_no_clocks(dev)) 852 pm_clk_destroy(dev); 853 } 854 855 static void rzv2h_cpg_genpd_remove_simple(void *data) 856 { 857 pm_genpd_remove(data); 858 } 859 860 static int __init rzv2h_cpg_add_pm_domains(struct rzv2h_cpg_priv *priv) 861 { 862 struct device *dev = priv->dev; 863 struct device_node *np = dev->of_node; 864 struct rzv2h_cpg_pd *pd; 865 int ret; 866 867 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); 868 if (!pd) 869 return -ENOMEM; 870 871 pd->genpd.name = np->name; 872 pd->priv = priv; 873 pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; 874 pd->genpd.attach_dev = rzv2h_cpg_attach_dev; 875 pd->genpd.detach_dev = rzv2h_cpg_detach_dev; 876 ret = pm_genpd_init(&pd->genpd, &pm_domain_always_on_gov, false); 877 if (ret) 878 return ret; 879 880 ret = devm_add_action_or_reset(dev, rzv2h_cpg_genpd_remove_simple, &pd->genpd); 881 if (ret) 882 return ret; 883 884 return of_genpd_add_provider_simple(np, &pd->genpd); 885 } 886 887 static void rzv2h_cpg_del_clk_provider(void *data) 888 { 889 of_clk_del_provider(data); 890 } 891 892 static int __init rzv2h_cpg_probe(struct platform_device *pdev) 893 { 894 struct device *dev = &pdev->dev; 895 struct device_node *np = dev->of_node; 896 const struct rzv2h_cpg_info *info; 897 struct rzv2h_cpg_priv *priv; 898 unsigned int nclks, i; 899 struct clk **clks; 900 int error; 901 902 info = of_device_get_match_data(dev); 903 904 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 905 if (!priv) 906 return -ENOMEM; 907 908 spin_lock_init(&priv->rmw_lock); 909 910 priv->dev = dev; 911 912 priv->base = devm_platform_ioremap_resource(pdev, 0); 913 if (IS_ERR(priv->base)) 914 return PTR_ERR(priv->base); 915 916 nclks = info->num_total_core_clks + info->num_hw_mod_clks; 917 clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL); 918 if (!clks) 919 return -ENOMEM; 920 921 priv->mstop_count = devm_kcalloc(dev, info->num_mstop_bits, 922 sizeof(*priv->mstop_count), GFP_KERNEL); 923 if (!priv->mstop_count) 924 return -ENOMEM; 925 926 /* Adjust for CPG_BUS_m_MSTOP starting from m = 1 */ 927 priv->mstop_count -= 16; 928 929 priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) * 930 info->num_resets, GFP_KERNEL); 931 if (!priv->resets) 932 return -ENOMEM; 933 934 dev_set_drvdata(dev, priv); 935 priv->clks = clks; 936 priv->num_core_clks = info->num_total_core_clks; 937 priv->num_mod_clks = info->num_hw_mod_clks; 938 priv->last_dt_core_clk = info->last_dt_core_clk; 939 priv->num_resets = info->num_resets; 940 941 for (i = 0; i < nclks; i++) 942 clks[i] = ERR_PTR(-ENOENT); 943 944 for (i = 0; i < info->num_core_clks; i++) 945 rzv2h_cpg_register_core_clk(&info->core_clks[i], priv); 946 947 for (i = 0; i < info->num_mod_clks; i++) 948 rzv2h_cpg_register_mod_clk(&info->mod_clks[i], priv); 949 950 error = of_clk_add_provider(np, rzv2h_cpg_clk_src_twocell_get, priv); 951 if (error) 952 return error; 953 954 error = devm_add_action_or_reset(dev, rzv2h_cpg_del_clk_provider, np); 955 if (error) 956 return error; 957 958 error = rzv2h_cpg_add_pm_domains(priv); 959 if (error) 960 return error; 961 962 error = rzv2h_cpg_reset_controller_register(priv); 963 if (error) 964 return error; 965 966 return 0; 967 } 968 969 static const struct of_device_id rzv2h_cpg_match[] = { 970 #ifdef CONFIG_CLK_R9A09G057 971 { 972 .compatible = "renesas,r9a09g057-cpg", 973 .data = &r9a09g057_cpg_info, 974 }, 975 #endif 976 #ifdef CONFIG_CLK_R9A09G047 977 { 978 .compatible = "renesas,r9a09g047-cpg", 979 .data = &r9a09g047_cpg_info, 980 }, 981 #endif 982 { /* sentinel */ } 983 }; 984 985 static struct platform_driver rzv2h_cpg_driver = { 986 .driver = { 987 .name = "rzv2h-cpg", 988 .of_match_table = rzv2h_cpg_match, 989 }, 990 }; 991 992 static int __init rzv2h_cpg_init(void) 993 { 994 return platform_driver_probe(&rzv2h_cpg_driver, rzv2h_cpg_probe); 995 } 996 997 subsys_initcall(rzv2h_cpg_init); 998 999 MODULE_DESCRIPTION("Renesas RZ/V2H CPG Driver"); 1000