Lines Matching +full:max +full:- +full:clk +full:- +full:rate +full:- +full:hz

1 // SPDX-License-Identifier: GPL-2.0
7 * Based on renesas-cpg-mssr.c
17 #include <linux/clk.h>
18 #include <linux/clk-provider.h>
19 #include <linux/clk/renesas.h>
31 #include <linux/reset-controller.h>
36 #include <dt-bindings/clock/renesas-cpg-mssr.h>
38 #include "rzg2l-cpg.h"
78 * struct clk_hw_data - clock hardware data
94 * struct sd_mux_hw_data - SD MUX clock hardware data
106 * struct div_hw_data - divider clock hardware data
109 * @invalid_rate: invalid rate for divider
110 * @max_rate: maximum rate for divider
139 * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
148 * @num_resets: Number of Module Resets in info->resets[]
160 struct clk **clks;
181 u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf);
192 struct clk_hw *hw = __clk_get_hw(cnd->clk);
194 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
195 u32 off = GET_REG_OFFSET(clk_hw_data->conf);
196 u32 shift = GET_SHIFT(clk_hw_data->conf);
201 if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266))
204 spin_lock_irqsave(&priv->rmw_lock, flags);
218 writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off);
221 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
223 spin_unlock_irqrestore(&priv->rmw_lock, flags);
226 dev_err(priv->dev, "failed to switch to safe clk source\n");
235 struct clk_hw *hw = __clk_get_hw(cnd->clk);
238 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
239 u32 off = GET_REG_OFFSET(clk_hw_data->conf);
240 u32 shift = GET_SHIFT(clk_hw_data->conf);
245 if (event != PRE_RATE_CHANGE || !div_hw_data->invalid_rate ||
246 div_hw_data->invalid_rate % cnd->new_rate)
249 spin_lock_irqsave(&priv->rmw_lock, flags);
251 val = readl(priv->base + off);
253 val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
257 * 1/ SD div cannot be 1 (val == 0) if parent rate is 800MHz
258 * 2/ OCTA / SPI div cannot be 1 (val == 0) if parent rate is 400MHz
260 * only one parent having 400MHz we took into account the parent rate
265 writel((CPG_WEN_BIT | 1) << shift, priv->base + off);
267 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
270 spin_unlock_irqrestore(&priv->rmw_lock, flags);
273 dev_err(priv->dev, "Failed to downgrade the div\n");
283 if (!core->notifier)
286 nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL);
288 return -ENOMEM;
290 nb->notifier_call = core->notifier;
292 return clk_notifier_register(hw->clk, nb);
300 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
303 val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
304 val >>= GET_SHIFT(clk_hw_data->conf);
305 val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
307 return divider_recalc_rate(hw, parent_rate, val, div_hw_data->dtable,
308 CLK_DIVIDER_ROUND_CLOSEST, div_hw_data->width);
316 if (div_hw_data->max_rate && req->rate > div_hw_data->max_rate)
317 req->rate = div_hw_data->max_rate;
319 return divider_determine_rate(hw, req, div_hw_data->dtable, div_hw_data->width,
323 static int rzg3s_div_clk_set_rate(struct clk_hw *hw, unsigned long rate,
328 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
329 u32 off = GET_REG_OFFSET(clk_hw_data->conf);
330 u32 shift = GET_SHIFT(clk_hw_data->conf);
335 val = divider_get_val(rate, parent_rate, div_hw_data->dtable, div_hw_data->width,
338 spin_lock_irqsave(&priv->rmw_lock, flags);
339 writel((CPG_WEN_BIT | val) << shift, priv->base + off);
341 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
342 spin_unlock_irqrestore(&priv->rmw_lock, flags);
353 static struct clk * __init
360 const struct clk *parent;
362 u32 max = 0;
365 parent = priv->clks[core->parent];
371 div_hw_data = devm_kzalloc(priv->dev, sizeof(*div_hw_data), GFP_KERNEL);
373 return ERR_PTR(-ENOMEM);
375 init.name = core->name;
376 init.flags = core->flag;
382 for (clkt = core->dtable; clkt->div; clkt++) {
383 if (max < clkt->div)
384 max = clkt->div;
387 div_hw_data->hw_data.priv = priv;
388 div_hw_data->hw_data.conf = core->conf;
389 div_hw_data->hw_data.sconf = core->sconf;
390 div_hw_data->dtable = core->dtable;
391 div_hw_data->invalid_rate = core->invalid_rate;
392 div_hw_data->max_rate = core->max_rate;
393 div_hw_data->width = fls(max) - 1;
395 clk_hw = &div_hw_data->hw_data.hw;
396 clk_hw->init = &init;
398 ret = devm_clk_hw_register(priv->dev, clk_hw);
404 dev_err(priv->dev, "Failed to register notifier for %s\n",
405 core->name);
409 return clk_hw->clk;
412 static struct clk * __init
416 void __iomem *base = priv->base;
417 struct device *dev = priv->dev;
418 const struct clk *parent;
422 parent = priv->clks[core->parent];
428 if (core->dtable)
429 clk_hw = clk_hw_register_divider_table(dev, core->name,
431 base + GET_REG_OFFSET(core->conf),
432 GET_SHIFT(core->conf),
433 GET_WIDTH(core->conf),
434 core->flag,
435 core->dtable,
436 &priv->rmw_lock);
438 clk_hw = clk_hw_register_divider(dev, core->name,
440 base + GET_REG_OFFSET(core->conf),
441 GET_SHIFT(core->conf),
442 GET_WIDTH(core->conf),
443 core->flag, &priv->rmw_lock);
448 return clk_hw->clk;
451 static struct clk * __init
457 clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
458 core->parent_names, core->num_parents,
459 core->flag,
460 priv->base + GET_REG_OFFSET(core->conf),
461 GET_SHIFT(core->conf),
462 GET_WIDTH(core->conf),
463 core->mux_flags, &priv->rmw_lock);
467 return clk_hw->clk;
474 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
475 u32 off = GET_REG_OFFSET(clk_hw_data->conf);
476 u32 shift = GET_SHIFT(clk_hw_data->conf);
481 val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index);
483 spin_lock_irqsave(&priv->rmw_lock, flags);
485 writel((CPG_WEN_BIT | val) << shift, priv->base + off);
488 ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
490 spin_unlock_irqrestore(&priv->rmw_lock, flags);
493 dev_err(priv->dev, "Failed to switch parent\n");
502 struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
505 val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
506 val >>= GET_SHIFT(clk_hw_data->conf);
507 val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
509 return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val);
518 static struct clk * __init
527 sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL);
529 return ERR_PTR(-ENOMEM);
531 sd_mux_hw_data->hw_data.priv = priv;
532 sd_mux_hw_data->hw_data.conf = core->conf;
533 sd_mux_hw_data->hw_data.sconf = core->sconf;
534 sd_mux_hw_data->mtable = core->mtable;
536 init.name = core->name;
538 init.flags = core->flag;
539 init.num_parents = core->num_parents;
540 init.parent_names = core->parent_names;
542 clk_hw = &sd_mux_hw_data->hw_data.hw;
543 clk_hw->init = &init;
545 ret = devm_clk_hw_register(priv->dev, clk_hw);
551 dev_err(priv->dev, "Failed to register notifier for %s\n",
552 core->name);
556 return clk_hw->clk;
561 unsigned long rate)
565 params->pl5_intin = rate / MEGA;
566 params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
567 params->pl5_refdiv = 2;
568 params->pl5_postdiv1 = 1;
569 params->pl5_postdiv2 = 1;
570 params->pl5_spread = 0x16;
573 (params->pl5_intin << 24) + params->pl5_fracin),
574 params->pl5_refdiv) >> 24;
576 params->pl5_postdiv1 * params->pl5_postdiv2);
584 unsigned long rate;
594 unsigned long rate = dsi_div->rate;
596 if (!rate)
597 rate = parent_rate;
599 return rate;
603 unsigned long rate)
606 struct rzg2l_cpg_priv *priv = dsi_div->priv;
610 parent_rate = rzg2l_cpg_get_foutpostdiv_rate(&params, rate);
612 if (priv->mux_dsi_div_params.clksrc)
621 if (req->rate > MAX_VCLK_FREQ)
622 req->rate = MAX_VCLK_FREQ;
624 req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
630 unsigned long rate,
634 struct rzg2l_cpg_priv *priv = dsi_div->priv;
637 * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
640 * calculates the pll parameters for generating FOUTPOSTDIV and the clk
644 if (!rate || rate > MAX_VCLK_FREQ)
645 return -EINVAL;
647 dsi_div->rate = rate;
649 (priv->mux_dsi_div_params.dsi_div_a << 0) |
650 (priv->mux_dsi_div_params.dsi_div_b << 8),
651 priv->base + CPG_PL5_SDIV);
662 static struct clk * __init
667 const struct clk *parent;
673 parent = priv->clks[core->parent];
677 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
679 return ERR_PTR(-ENOMEM);
681 clk_hw_data->priv = priv;
684 init.name = core->name;
690 clk_hw = &clk_hw_data->hw;
691 clk_hw->init = &init;
693 ret = devm_clk_hw_register(priv->dev, clk_hw);
697 return clk_hw->clk;
703 unsigned long rate;
714 struct rzg2l_cpg_priv *priv = hwdata->priv;
716 parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
717 req->best_parent_hw = parent;
718 req->best_parent_rate = req->rate;
726 struct rzg2l_cpg_priv *priv = hwdata->priv;
729 * FOUTPOSTDIV--->|
730 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
731 * |--FOUT1PH0-->|
734 * rate and clk source for the MUX. It propagates that info to
739 priv->base + CPG_OTHERFUNC1_REG);
747 struct rzg2l_cpg_priv *priv = hwdata->priv;
749 return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
758 static struct clk * __init
767 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
769 return ERR_PTR(-ENOMEM);
771 clk_hw_data->priv = priv;
772 clk_hw_data->conf = core->conf;
774 init.name = core->name;
777 init.num_parents = core->num_parents;
778 init.parent_names = core->parent_names;
780 clk_hw = &clk_hw_data->hw;
781 clk_hw->init = &init;
783 ret = devm_clk_hw_register(priv->dev, clk_hw);
787 return clk_hw->clk;
800 unsigned long rate)
803 struct rzg2l_cpg_priv *priv = sipll5->priv;
806 vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
807 (priv->mux_dsi_div_params.dsi_div_b + 1));
809 if (priv->mux_dsi_div_params.clksrc)
819 unsigned long pll5_rate = sipll5->foutpostdiv_rate;
828 unsigned long rate,
831 return rate;
835 unsigned long rate,
839 struct rzg2l_cpg_priv *priv = sipll5->priv;
846 * OSC --> PLL5 --> FOUTPOSTDIV-->|
847 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
848 * |--FOUT1PH0-->|
851 * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
854 * OSC --> PLL5 --> FOUTPOSTDIV
857 if (!rate)
858 return -EINVAL;
860 vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
861 sipll5->foutpostdiv_rate =
865 writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
866 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
869 dev_err(priv->dev, "failed to release pll5 lock");
875 (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
878 writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
882 priv->base + CPG_SIPLL5_CLK4);
885 writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
890 priv->base + CPG_SIPLL5_STBY);
893 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
896 dev_err(priv->dev, "failed to lock pll5");
909 static struct clk * __init
913 const struct clk *parent;
920 parent = priv->clks[core->parent];
924 sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
926 return ERR_PTR(-ENOMEM);
928 init.name = core->name;
935 sipll5->hw.init = &init;
936 sipll5->conf = core->conf;
937 sipll5->priv = priv;
940 CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
942 clk_hw = &sipll5->hw;
943 clk_hw->init = &init;
945 ret = devm_clk_hw_register(priv->dev, clk_hw);
949 priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
950 priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
951 priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
953 return clk_hw->clk;
971 struct rzg2l_cpg_priv *priv = pll_clk->priv;
973 u64 rate;
975 if (pll_clk->type != CLK_TYPE_SAM_PLL)
978 val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
979 val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
981 rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
984 return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
995 struct rzg2l_cpg_priv *priv = pll_clk->priv;
997 u64 rate;
999 if (pll_clk->type != CLK_TYPE_G3S_PLL)
1002 setting = GET_REG_SAMPLL_SETTING(pll_clk->conf);
1004 val = readl(priv->base + setting);
1006 return pll_clk->default_rate;
1009 val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
1020 rate = mul_u64_u32_shr(parent_rate, 4096 * nir + nfr, 12);
1022 return DIV_ROUND_CLOSEST_ULL(rate, (mr * pr));
1029 static struct clk * __init
1034 struct device *dev = priv->dev;
1035 const struct clk *parent;
1041 parent = priv->clks[core->parent];
1047 return ERR_PTR(-ENOMEM);
1050 init.name = core->name;
1056 pll_clk->hw.init = &init;
1057 pll_clk->conf = core->conf;
1058 pll_clk->base = priv->base;
1059 pll_clk->priv = priv;
1060 pll_clk->type = core->type;
1061 pll_clk->default_rate = core->default_rate;
1063 ret = devm_clk_hw_register(dev, &pll_clk->hw);
1067 return pll_clk->hw.clk;
1070 static struct clk
1074 unsigned int clkidx = clkspec->args[1];
1076 struct device *dev = priv->dev;
1078 struct clk *clk;
1080 switch (clkspec->args[0]) {
1083 if (clkidx > priv->last_dt_core_clk) {
1085 return ERR_PTR(-EINVAL);
1087 clk = priv->clks[clkidx];
1092 if (clkidx >= priv->num_mod_clks) {
1095 return ERR_PTR(-EINVAL);
1097 clk = priv->clks[priv->num_core_clks + clkidx];
1101 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
1102 return ERR_PTR(-EINVAL);
1105 if (IS_ERR(clk))
1107 PTR_ERR(clk));
1109 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
1110 clkspec->args[0], clkspec->args[1], clk,
1111 clk_get_rate(clk));
1112 return clk;
1120 struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
1121 struct device *dev = priv->dev;
1122 unsigned int id = core->id, div = core->div;
1126 WARN_DEBUG(id >= priv->num_core_clks);
1127 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1129 switch (core->type) {
1131 clk = of_clk_get_by_name(priv->dev->of_node, core->name);
1134 WARN_DEBUG(core->parent >= priv->num_core_clks);
1135 parent = priv->clks[core->parent];
1137 clk = parent;
1142 clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name, parent_name,
1144 core->mult, div);
1146 clk = ERR_CAST(clk_hw);
1148 clk = clk_hw->clk;
1151 clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg2l_cpg_pll_ops);
1154 clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg3s_cpg_pll_ops);
1157 clk = rzg2l_cpg_sipll5_register(core, priv);
1160 clk = rzg2l_cpg_div_clk_register(core, priv);
1163 clk = rzg3s_cpg_div_clk_register(core, priv);
1166 clk = rzg2l_cpg_mux_clk_register(core, priv);
1169 clk = rzg2l_cpg_sd_mux_clk_register(core, priv);
1172 clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
1175 clk = rzg2l_cpg_dsi_div_clk_register(core, priv);
1181 if (IS_ERR_OR_NULL(clk))
1184 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1185 priv->clks[id] = clk;
1190 core->name, PTR_ERR(clk));
1194 * struct mstop - MSTOP specific data structure
1205 * struct mod_clock - Module clock
1207 * @hw: handle between common and hardware-specific interfaces
1232 for (unsigned int i = 0; (priv) && i < (priv)->num_mod_clks; i++) \
1233 if ((priv)->clks[(priv)->num_core_clks + i] == ERR_PTR(-ENOENT)) \
1235 else if (((hw) = __clk_get_hw((priv)->clks[(priv)->num_core_clks + i])) && \
1238 /* Need to be called with a lock held to avoid concurrent access to mstop->usecnt. */
1242 struct rzg2l_cpg_priv *priv = clock->priv;
1243 struct mstop *mstop = clock->mstop;
1250 value = MSTOP_MASK(mstop->conf) << 16;
1255 for (unsigned int i = 0; i < clock->num_shared_mstop_clks; i++) {
1256 struct mod_clock *clk = clock->shared_mstop_clks[i];
1258 if (clk_hw_get_flags(&clk->hw) & CLK_IS_CRITICAL)
1262 if (!clock->num_shared_mstop_clks &&
1263 clk_hw_get_flags(&clock->hw) & CLK_IS_CRITICAL)
1275 if (criticals && criticals == atomic_read(&mstop->usecnt))
1278 value |= MSTOP_MASK(mstop->conf);
1281 if (!atomic_read(&mstop->usecnt))
1284 update = atomic_dec_and_test(&mstop->usecnt);
1286 if (!atomic_read(&mstop->usecnt))
1288 atomic_inc(&mstop->usecnt);
1292 writel(value, priv->base + MSTOP_OFF(mstop->conf));
1297 struct rzg2l_cpg_priv *priv = s->private;
1298 struct mod_clock *clk;
1301 seq_printf(s, "%-20s %-5s %-10s\n", "", "", "MSTOP");
1302 seq_printf(s, "%-20s %-5s %-10s\n", "", "clk", "-------------------------");
1303 seq_printf(s, "%-20s %-5s %-5s %-5s %-6s %-6s\n",
1305 seq_printf(s, "%-20s %-5s %-5s %-5s %-6s %-6s\n",
1306 "--------", "-----", "-----", "-----", "------", "------");
1308 for_each_mod_clock(clk, hw, priv) {
1311 if (!clk->mstop)
1314 val = readl(priv->base + MSTOP_OFF(clk->mstop->conf)) &
1315 MSTOP_MASK(clk->mstop->conf);
1317 seq_printf(s, "%-20s %-5d %-5d 0x%-3lx 0x%-4x", clk_hw_get_name(hw),
1318 __clk_get_enable_count(hw->clk), atomic_read(&clk->mstop->usecnt),
1319 MSTOP_OFF(clk->mstop->conf), val);
1321 for (unsigned int i = 0; i < clk->num_shared_mstop_clks; i++)
1322 seq_printf(s, " %pC", clk->shared_mstop_clks[i]->hw.clk);
1334 struct rzg2l_cpg_priv *priv = clock->priv;
1335 unsigned int reg = clock->off;
1336 struct device *dev = priv->dev;
1337 u32 bitmask = BIT(clock->bit);
1341 if (!clock->off) {
1342 dev_dbg(dev, "%pC does not support ON/OFF\n", hw->clk);
1346 dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk,
1353 scoped_guard(spinlock_irqsave, &priv->rmw_lock) {
1355 writel(value, priv->base + CLK_ON_R(reg));
1359 writel(value, priv->base + CLK_ON_R(reg));
1366 if (!priv->info->has_clk_mon_regs)
1369 error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
1373 CLK_ON_R(reg), hw->clk);
1382 if (clock->sibling) {
1383 struct rzg2l_cpg_priv *priv = clock->priv;
1387 spin_lock_irqsave(&priv->rmw_lock, flags);
1388 enabled = clock->sibling->enabled;
1389 clock->enabled = true;
1390 spin_unlock_irqrestore(&priv->rmw_lock, flags);
1402 if (clock->sibling) {
1403 struct rzg2l_cpg_priv *priv = clock->priv;
1407 spin_lock_irqsave(&priv->rmw_lock, flags);
1408 enabled = clock->sibling->enabled;
1409 clock->enabled = false;
1410 spin_unlock_irqrestore(&priv->rmw_lock, flags);
1421 struct rzg2l_cpg_priv *priv = clock->priv;
1422 u32 bitmask = BIT(clock->bit);
1425 if (!clock->off) {
1426 dev_dbg(priv->dev, "%pC does not support ON/OFF\n", hw->clk);
1430 if (clock->sibling)
1431 return clock->enabled;
1433 if (priv->info->has_clk_mon_regs)
1434 value = readl(priv->base + CLK_MON_R(clock->off));
1436 value = readl(priv->base + clock->off);
1451 struct mod_clock *clk;
1454 for_each_mod_clock(clk, hw, priv) {
1455 if (clock->off == clk->off && clock->bit == clk->bit)
1456 return clk;
1464 struct mod_clock *clk;
1467 for_each_mod_clock(clk, hw, priv) {
1468 if (!clk->mstop)
1471 if (clk->mstop->conf == conf)
1472 return clk->mstop;
1480 struct mod_clock *clk;
1483 for_each_mod_clock(clk, hw, priv) {
1484 if (!clk->mstop)
1492 scoped_guard(spinlock_irqsave, &priv->rmw_lock) {
1493 if (!rzg2l_mod_clock_is_enabled(&clk->hw))
1494 rzg2l_mod_clock_module_set_state(clk, true);
1502 struct mod_clock *clk;
1505 if (!clock->mstop)
1508 for_each_mod_clock(clk, hw, priv) {
1512 if (clk->mstop != clock->mstop)
1515 num_shared_mstop_clks = clk->num_shared_mstop_clks;
1519 new_clks = devm_krealloc(priv->dev, clk->shared_mstop_clks,
1523 return -ENOMEM;
1526 new_clks[num_shared_mstop_clks++] = clk;
1530 new_clks[i]->shared_mstop_clks = new_clks;
1531 new_clks[i]->num_shared_mstop_clks = num_shared_mstop_clks;
1545 struct device *dev = priv->dev;
1546 unsigned int id = mod->id;
1548 struct clk *parent, *clk;
1553 WARN_DEBUG(id < priv->num_core_clks);
1554 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1555 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1556 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1558 parent = priv->clks[mod->parent];
1560 clk = parent;
1566 clk = ERR_PTR(-ENOMEM);
1570 init.name = mod->name;
1573 for (i = 0; i < info->num_crit_mod_clks; i++)
1574 if (id == info->crit_mod_clks[i]) {
1576 mod->name);
1585 clock->off = mod->off;
1586 clock->bit = mod->bit;
1587 clock->priv = priv;
1588 clock->hw.init = &init;
1590 if (mod->mstop_conf) {
1591 struct mstop *mstop = rzg2l_mod_clock_get_mstop(priv, mod->mstop_conf);
1596 clk = ERR_PTR(-ENOMEM);
1599 mstop->conf = mod->mstop_conf;
1600 atomic_set(&mstop->usecnt, 0);
1602 clock->mstop = mstop;
1605 ret = devm_clk_hw_register(dev, &clock->hw);
1607 clk = ERR_PTR(ret);
1611 if (mod->is_coupled) {
1614 clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1617 clock->sibling = sibling;
1618 sibling->sibling = clock;
1622 /* Keep this before priv->clks[id] is updated. */
1625 clk = ERR_PTR(ret);
1629 clk = clock->hw.clk;
1630 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1631 priv->clks[id] = clk;
1637 mod->name, PTR_ERR(clk));
1646 const struct rzg2l_cpg_info *info = priv->info;
1647 unsigned int reg = info->resets[id].off;
1648 u32 mask = BIT(info->resets[id].bit);
1649 s8 monbit = info->resets[id].monbit;
1652 dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1654 writel(value, priv->base + CLK_RST_R(reg));
1656 if (info->has_clk_mon_regs) {
1667 return readl_poll_timeout_atomic(priv->base + reg, value,
1675 const struct rzg2l_cpg_info *info = priv->info;
1676 unsigned int reg = info->resets[id].off;
1677 u32 mask = BIT(info->resets[id].bit);
1678 s8 monbit = info->resets[id].monbit;
1681 dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1684 writel(value, priv->base + CLK_RST_R(reg));
1686 if (info->has_clk_mon_regs) {
1697 return readl_poll_timeout_atomic(priv->base + reg, value,
1717 const struct rzg2l_cpg_info *info = priv->info;
1718 s8 monbit = info->resets[id].monbit;
1722 if (info->has_clk_mon_regs) {
1723 reg = CLK_MRST_R(info->resets[id].off);
1724 bitmask = BIT(info->resets[id].bit);
1729 return -ENOTSUPP;
1732 return !!(readl(priv->base + reg) & bitmask);
1746 const struct rzg2l_cpg_info *info = priv->info;
1747 unsigned int id = reset_spec->args[0];
1749 if (id >= rcdev->nr_resets || !info->resets[id].off) {
1750 dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1751 return -EINVAL;
1759 priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1760 priv->rcdev.of_node = priv->dev->of_node;
1761 priv->rcdev.dev = priv->dev;
1762 priv->rcdev.of_reset_n_cells = 1;
1763 priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1764 priv->rcdev.nr_resets = priv->num_resets;
1766 return devm_reset_controller_register(priv->dev, &priv->rcdev);
1772 if (clkspec->np != priv->genpd.dev.of_node || clkspec->args_count != 2)
1775 switch (clkspec->args[0]) {
1777 const struct rzg2l_cpg_info *info = priv->info;
1778 unsigned int id = clkspec->args[1];
1780 if (id >= priv->num_mod_clks)
1783 id += info->num_total_core_clks;
1785 for (unsigned int i = 0; i < info->num_no_pm_mod_clks; i++) {
1786 if (info->no_pm_mod_clks[i] == id)
1802 struct device_node *np = dev->of_node;
1805 struct clk *clk;
1809 for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
1823 clk = of_clk_get_from_provider(&clkspec);
1825 if (IS_ERR(clk)) {
1826 error = PTR_ERR(clk);
1830 error = pm_clk_add_clk(dev, clk);
1840 clk_put(clk);
1861 struct device *dev = priv->dev;
1862 struct device_node *np = dev->of_node;
1863 struct generic_pm_domain *genpd = &priv->genpd;
1866 genpd->name = np->name;
1867 genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
1869 genpd->attach_dev = rzg2l_cpg_attach_dev;
1870 genpd->detach_dev = rzg2l_cpg_detach_dev;
1884 struct device *dev = &pdev->dev;
1885 struct device_node *np = dev->of_node;
1889 struct clk **clks;
1896 return -ENOMEM;
1898 priv->dev = dev;
1899 priv->info = info;
1900 spin_lock_init(&priv->rmw_lock);
1902 priv->base = devm_platform_ioremap_resource(pdev, 0);
1903 if (IS_ERR(priv->base))
1904 return PTR_ERR(priv->base);
1906 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1909 return -ENOMEM;
1912 priv->clks = clks;
1913 priv->num_core_clks = info->num_total_core_clks;
1914 priv->num_mod_clks = info->num_hw_mod_clks;
1915 priv->num_resets = info->num_resets;
1916 priv->last_dt_core_clk = info->last_dt_core_clk;
1919 clks[i] = ERR_PTR(-ENOENT);
1921 for (i = 0; i < info->num_core_clks; i++)
1922 rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1924 for (i = 0; i < info->num_mod_clks; i++)
1925 rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1930 * non-critical) share the same MSTOP.
1970 .compatible = "renesas,r9a07g043-cpg",
1976 .compatible = "renesas,r9a07g044-cpg",
1982 .compatible = "renesas,r9a07g054-cpg",
1988 .compatible = "renesas,r9a08g045-cpg",
1994 .compatible = "renesas,r9a09g011-cpg",
2003 .name = "rzg2l-cpg",