1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Renesas RZ/V2H(P) Clock Pulse Generator
4 *
5 * Copyright (C) 2024 Renesas Electronics Corp.
6 *
7 * Based on rzg2l-cpg.c
8 *
9 * Copyright (C) 2015 Glider bvba
10 * Copyright (C) 2013 Ideas On Board SPRL
11 * Copyright (C) 2015 Renesas Electronics Corp.
12 */
13
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/iopoll.h>
20 #include <linux/mod_devicetable.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_clock.h>
25 #include <linux/pm_domain.h>
26 #include <linux/refcount.h>
27 #include <linux/reset-controller.h>
28 #include <linux/string_choices.h>
29
30 #include <dt-bindings/clock/renesas-cpg-mssr.h>
31
32 #include "rzv2h-cpg.h"
33
34 #ifdef DEBUG
35 #define WARN_DEBUG(x) WARN_ON(x)
36 #else
37 #define WARN_DEBUG(x) do { } while (0)
38 #endif
39
40 #define GET_CLK_ON_OFFSET(x) (0x600 + ((x) * 4))
41 #define GET_CLK_MON_OFFSET(x) (0x800 + ((x) * 4))
42 #define GET_RST_OFFSET(x) (0x900 + ((x) * 4))
43 #define GET_RST_MON_OFFSET(x) (0xA00 + ((x) * 4))
44
45 #define CPG_BUS_1_MSTOP (0xd00)
46 #define CPG_BUS_MSTOP(m) (CPG_BUS_1_MSTOP + ((m) - 1) * 4)
47
48 #define CPG_PLL_STBY(x) ((x))
49 #define CPG_PLL_STBY_RESETB BIT(0)
50 #define CPG_PLL_STBY_RESETB_WEN BIT(16)
51 #define CPG_PLL_CLK1(x) ((x) + 0x004)
52 #define CPG_PLL_CLK1_KDIV(x) ((s16)FIELD_GET(GENMASK(31, 16), (x)))
53 #define CPG_PLL_CLK1_MDIV(x) FIELD_GET(GENMASK(15, 6), (x))
54 #define CPG_PLL_CLK1_PDIV(x) FIELD_GET(GENMASK(5, 0), (x))
55 #define CPG_PLL_CLK2(x) ((x) + 0x008)
56 #define CPG_PLL_CLK2_SDIV(x) FIELD_GET(GENMASK(2, 0), (x))
57 #define CPG_PLL_MON(x) ((x) + 0x010)
58 #define CPG_PLL_MON_RESETB BIT(0)
59 #define CPG_PLL_MON_LOCK BIT(4)
60
61 #define DDIV_DIVCTL_WEN(shift) BIT((shift) + 16)
62
63 #define GET_MOD_CLK_ID(base, index, bit) \
64 ((base) + ((((index) * (16))) + (bit)))
65
66 #define CPG_CLKSTATUS0 (0x700)
67
68 /**
69 * struct rzv2h_cpg_priv - Clock Pulse Generator Private Data
70 *
71 * @dev: CPG device
72 * @base: CPG register block base address
73 * @rmw_lock: protects register accesses
74 * @clks: Array containing all Core and Module Clocks
75 * @num_core_clks: Number of Core Clocks in clks[]
76 * @num_mod_clks: Number of Module Clocks in clks[]
77 * @resets: Array of resets
78 * @num_resets: Number of Module Resets in info->resets[]
79 * @last_dt_core_clk: ID of the last Core Clock exported to DT
80 * @ff_mod_status_ops: Fixed Factor Module Status Clock operations
81 * @mstop_count: Array of mstop values
82 * @rcdev: Reset controller entity
83 */
84 struct rzv2h_cpg_priv {
85 struct device *dev;
86 void __iomem *base;
87 spinlock_t rmw_lock;
88
89 struct clk **clks;
90 unsigned int num_core_clks;
91 unsigned int num_mod_clks;
92 struct rzv2h_reset *resets;
93 unsigned int num_resets;
94 unsigned int last_dt_core_clk;
95
96 struct clk_ops *ff_mod_status_ops;
97
98 atomic_t *mstop_count;
99
100 struct reset_controller_dev rcdev;
101 };
102
103 #define rcdev_to_priv(x) container_of(x, struct rzv2h_cpg_priv, rcdev)
104
105 struct pll_clk {
106 struct rzv2h_cpg_priv *priv;
107 struct clk_hw hw;
108 struct pll pll;
109 };
110
111 #define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
112
113 /**
114 * struct mod_clock - Module clock
115 *
116 * @priv: CPG private data
117 * @mstop_data: mstop data relating to module clock
118 * @hw: handle between common and hardware-specific interfaces
119 * @no_pm: flag to indicate PM is not supported
120 * @on_index: register offset
121 * @on_bit: ON/MON bit
122 * @mon_index: monitor register offset
123 * @mon_bit: monitor bit
124 * @ext_clk_mux_index: mux index for external clock source, or -1 if internal
125 */
126 struct mod_clock {
127 struct rzv2h_cpg_priv *priv;
128 unsigned int mstop_data;
129 struct clk_hw hw;
130 bool no_pm;
131 u8 on_index;
132 u8 on_bit;
133 s8 mon_index;
134 u8 mon_bit;
135 s8 ext_clk_mux_index;
136 };
137
138 #define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw)
139
140 /**
141 * struct ddiv_clk - DDIV clock
142 *
143 * @priv: CPG private data
144 * @div: divider clk
145 * @mon: monitor bit in CPG_CLKSTATUS0 register
146 */
147 struct ddiv_clk {
148 struct rzv2h_cpg_priv *priv;
149 struct clk_divider div;
150 u8 mon;
151 };
152
153 #define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div)
154
155 /**
156 * struct rzv2h_ff_mod_status_clk - Fixed Factor Module Status Clock
157 *
158 * @priv: CPG private data
159 * @conf: fixed mod configuration
160 * @fix: fixed factor clock
161 */
162 struct rzv2h_ff_mod_status_clk {
163 struct rzv2h_cpg_priv *priv;
164 struct fixed_mod_conf conf;
165 struct clk_fixed_factor fix;
166 };
167
168 #define to_rzv2h_ff_mod_status_clk(_hw) \
169 container_of(_hw, struct rzv2h_ff_mod_status_clk, fix.hw)
170
rzv2h_cpg_pll_clk_is_enabled(struct clk_hw * hw)171 static int rzv2h_cpg_pll_clk_is_enabled(struct clk_hw *hw)
172 {
173 struct pll_clk *pll_clk = to_pll(hw);
174 struct rzv2h_cpg_priv *priv = pll_clk->priv;
175 u32 val = readl(priv->base + CPG_PLL_MON(pll_clk->pll.offset));
176
177 /* Ensure both RESETB and LOCK bits are set */
178 return (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) ==
179 (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK);
180 }
181
rzv2h_cpg_pll_clk_enable(struct clk_hw * hw)182 static int rzv2h_cpg_pll_clk_enable(struct clk_hw *hw)
183 {
184 struct pll_clk *pll_clk = to_pll(hw);
185 struct rzv2h_cpg_priv *priv = pll_clk->priv;
186 struct pll pll = pll_clk->pll;
187 u32 stby_offset;
188 u32 mon_offset;
189 u32 val;
190 int ret;
191
192 if (rzv2h_cpg_pll_clk_is_enabled(hw))
193 return 0;
194
195 stby_offset = CPG_PLL_STBY(pll.offset);
196 mon_offset = CPG_PLL_MON(pll.offset);
197
198 writel(CPG_PLL_STBY_RESETB_WEN | CPG_PLL_STBY_RESETB,
199 priv->base + stby_offset);
200
201 /*
202 * Ensure PLL enters into normal mode
203 *
204 * Note: There is no HW information about the worst case latency.
205 *
206 * Since this latency might depend on external crystal or PLL rate,
207 * use a "super" safe timeout value.
208 */
209 ret = readl_poll_timeout_atomic(priv->base + mon_offset, val,
210 (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) ==
211 (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK), 200, 2000);
212 if (ret)
213 dev_err(priv->dev, "Failed to enable PLL 0x%x/%pC\n",
214 stby_offset, hw->clk);
215
216 return ret;
217 }
218
rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)219 static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
220 unsigned long parent_rate)
221 {
222 struct pll_clk *pll_clk = to_pll(hw);
223 struct rzv2h_cpg_priv *priv = pll_clk->priv;
224 struct pll pll = pll_clk->pll;
225 unsigned int clk1, clk2;
226 u64 rate;
227
228 if (!pll.has_clkn)
229 return 0;
230
231 clk1 = readl(priv->base + CPG_PLL_CLK1(pll.offset));
232 clk2 = readl(priv->base + CPG_PLL_CLK2(pll.offset));
233
234 rate = mul_u64_u32_shr(parent_rate, (CPG_PLL_CLK1_MDIV(clk1) << 16) +
235 CPG_PLL_CLK1_KDIV(clk1), 16 + CPG_PLL_CLK2_SDIV(clk2));
236
237 return DIV_ROUND_CLOSEST_ULL(rate, CPG_PLL_CLK1_PDIV(clk1));
238 }
239
240 static const struct clk_ops rzv2h_cpg_pll_ops = {
241 .is_enabled = rzv2h_cpg_pll_clk_is_enabled,
242 .enable = rzv2h_cpg_pll_clk_enable,
243 .recalc_rate = rzv2h_cpg_pll_clk_recalc_rate,
244 };
245
246 static struct clk * __init
rzv2h_cpg_pll_clk_register(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv,const struct clk_ops * ops)247 rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
248 struct rzv2h_cpg_priv *priv,
249 const struct clk_ops *ops)
250 {
251 struct device *dev = priv->dev;
252 struct clk_init_data init;
253 const struct clk *parent;
254 const char *parent_name;
255 struct pll_clk *pll_clk;
256 int ret;
257
258 parent = priv->clks[core->parent];
259 if (IS_ERR(parent))
260 return ERR_CAST(parent);
261
262 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
263 if (!pll_clk)
264 return ERR_PTR(-ENOMEM);
265
266 parent_name = __clk_get_name(parent);
267 init.name = core->name;
268 init.ops = ops;
269 init.flags = 0;
270 init.parent_names = &parent_name;
271 init.num_parents = 1;
272
273 pll_clk->hw.init = &init;
274 pll_clk->pll = core->cfg.pll;
275 pll_clk->priv = priv;
276
277 ret = devm_clk_hw_register(dev, &pll_clk->hw);
278 if (ret)
279 return ERR_PTR(ret);
280
281 return pll_clk->hw.clk;
282 }
283
rzv2h_ddiv_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)284 static unsigned long rzv2h_ddiv_recalc_rate(struct clk_hw *hw,
285 unsigned long parent_rate)
286 {
287 struct clk_divider *divider = to_clk_divider(hw);
288 unsigned int val;
289
290 val = readl(divider->reg) >> divider->shift;
291 val &= clk_div_mask(divider->width);
292
293 return divider_recalc_rate(hw, parent_rate, val, divider->table,
294 divider->flags, divider->width);
295 }
296
rzv2h_ddiv_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)297 static int rzv2h_ddiv_determine_rate(struct clk_hw *hw,
298 struct clk_rate_request *req)
299 {
300 struct clk_divider *divider = to_clk_divider(hw);
301
302 return divider_determine_rate(hw, req, divider->table, divider->width,
303 divider->flags);
304 }
305
rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem * base,u8 mon)306 static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon)
307 {
308 u32 bitmask = BIT(mon);
309 u32 val;
310
311 if (mon == CSDIV_NO_MON)
312 return 0;
313
314 return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200);
315 }
316
rzv2h_ddiv_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)317 static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
318 unsigned long parent_rate)
319 {
320 struct clk_divider *divider = to_clk_divider(hw);
321 struct ddiv_clk *ddiv = to_ddiv_clock(divider);
322 struct rzv2h_cpg_priv *priv = ddiv->priv;
323 unsigned long flags = 0;
324 int value;
325 u32 val;
326 int ret;
327
328 value = divider_get_val(rate, parent_rate, divider->table,
329 divider->width, divider->flags);
330 if (value < 0)
331 return value;
332
333 spin_lock_irqsave(divider->lock, flags);
334
335 ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
336 if (ret)
337 goto ddiv_timeout;
338
339 val = readl(divider->reg) | DDIV_DIVCTL_WEN(divider->shift);
340 val &= ~(clk_div_mask(divider->width) << divider->shift);
341 val |= (u32)value << divider->shift;
342 writel(val, divider->reg);
343
344 ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
345
346 ddiv_timeout:
347 spin_unlock_irqrestore(divider->lock, flags);
348 return ret;
349 }
350
351 static const struct clk_ops rzv2h_ddiv_clk_divider_ops = {
352 .recalc_rate = rzv2h_ddiv_recalc_rate,
353 .determine_rate = rzv2h_ddiv_determine_rate,
354 .set_rate = rzv2h_ddiv_set_rate,
355 };
356
357 static struct clk * __init
rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv)358 rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
359 struct rzv2h_cpg_priv *priv)
360 {
361 struct ddiv cfg_ddiv = core->cfg.ddiv;
362 struct clk_init_data init = {};
363 struct device *dev = priv->dev;
364 u8 shift = cfg_ddiv.shift;
365 u8 width = cfg_ddiv.width;
366 const struct clk *parent;
367 const char *parent_name;
368 struct clk_divider *div;
369 struct ddiv_clk *ddiv;
370 int ret;
371
372 parent = priv->clks[core->parent];
373 if (IS_ERR(parent))
374 return ERR_CAST(parent);
375
376 parent_name = __clk_get_name(parent);
377
378 if ((shift + width) > 16)
379 return ERR_PTR(-EINVAL);
380
381 ddiv = devm_kzalloc(priv->dev, sizeof(*ddiv), GFP_KERNEL);
382 if (!ddiv)
383 return ERR_PTR(-ENOMEM);
384
385 init.name = core->name;
386 if (cfg_ddiv.no_rmw)
387 init.ops = &clk_divider_ops;
388 else
389 init.ops = &rzv2h_ddiv_clk_divider_ops;
390 init.parent_names = &parent_name;
391 init.num_parents = 1;
392 init.flags = CLK_SET_RATE_PARENT;
393
394 ddiv->priv = priv;
395 ddiv->mon = cfg_ddiv.monbit;
396 div = &ddiv->div;
397 div->reg = priv->base + cfg_ddiv.offset;
398 div->shift = shift;
399 div->width = width;
400 div->flags = core->flag;
401 div->lock = &priv->rmw_lock;
402 div->hw.init = &init;
403 div->table = core->dtable;
404
405 ret = devm_clk_hw_register(dev, &div->hw);
406 if (ret)
407 return ERR_PTR(ret);
408
409 return div->hw.clk;
410 }
411
412 static struct clk * __init
rzv2h_cpg_mux_clk_register(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv)413 rzv2h_cpg_mux_clk_register(const struct cpg_core_clk *core,
414 struct rzv2h_cpg_priv *priv)
415 {
416 struct smuxed mux = core->cfg.smux;
417 const struct clk_hw *clk_hw;
418
419 clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
420 core->parent_names, core->num_parents,
421 core->flag, priv->base + mux.offset,
422 mux.shift, mux.width,
423 core->mux_flags, &priv->rmw_lock);
424 if (IS_ERR(clk_hw))
425 return ERR_CAST(clk_hw);
426
427 return clk_hw->clk;
428 }
429
430 static int
rzv2h_clk_ff_mod_status_is_enabled(struct clk_hw * hw)431 rzv2h_clk_ff_mod_status_is_enabled(struct clk_hw *hw)
432 {
433 struct rzv2h_ff_mod_status_clk *fix = to_rzv2h_ff_mod_status_clk(hw);
434 struct rzv2h_cpg_priv *priv = fix->priv;
435 u32 offset = GET_CLK_MON_OFFSET(fix->conf.mon_index);
436 u32 bitmask = BIT(fix->conf.mon_bit);
437 u32 val;
438
439 val = readl(priv->base + offset);
440 return !!(val & bitmask);
441 }
442
443 static struct clk * __init
rzv2h_cpg_fixed_mod_status_clk_register(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv)444 rzv2h_cpg_fixed_mod_status_clk_register(const struct cpg_core_clk *core,
445 struct rzv2h_cpg_priv *priv)
446 {
447 struct rzv2h_ff_mod_status_clk *clk_hw_data;
448 struct clk_init_data init = { };
449 struct clk_fixed_factor *fix;
450 const struct clk *parent;
451 const char *parent_name;
452 int ret;
453
454 WARN_DEBUG(core->parent >= priv->num_core_clks);
455 parent = priv->clks[core->parent];
456 if (IS_ERR(parent))
457 return ERR_CAST(parent);
458
459 parent_name = __clk_get_name(parent);
460 parent = priv->clks[core->parent];
461 if (IS_ERR(parent))
462 return ERR_CAST(parent);
463
464 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
465 if (!clk_hw_data)
466 return ERR_PTR(-ENOMEM);
467
468 clk_hw_data->priv = priv;
469 clk_hw_data->conf = core->cfg.fixed_mod;
470
471 init.name = core->name;
472 init.ops = priv->ff_mod_status_ops;
473 init.flags = CLK_SET_RATE_PARENT;
474 init.parent_names = &parent_name;
475 init.num_parents = 1;
476
477 fix = &clk_hw_data->fix;
478 fix->hw.init = &init;
479 fix->mult = core->mult;
480 fix->div = core->div;
481
482 ret = devm_clk_hw_register(priv->dev, &clk_hw_data->fix.hw);
483 if (ret)
484 return ERR_PTR(ret);
485
486 return clk_hw_data->fix.hw.clk;
487 }
488
489 static struct clk
rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args * clkspec,void * data)490 *rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
491 void *data)
492 {
493 unsigned int clkidx = clkspec->args[1];
494 struct rzv2h_cpg_priv *priv = data;
495 struct device *dev = priv->dev;
496 const char *type;
497 struct clk *clk;
498
499 switch (clkspec->args[0]) {
500 case CPG_CORE:
501 type = "core";
502 if (clkidx > priv->last_dt_core_clk) {
503 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
504 return ERR_PTR(-EINVAL);
505 }
506 clk = priv->clks[clkidx];
507 break;
508
509 case CPG_MOD:
510 type = "module";
511 if (clkidx >= priv->num_mod_clks) {
512 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
513 return ERR_PTR(-EINVAL);
514 }
515 clk = priv->clks[priv->num_core_clks + clkidx];
516 break;
517
518 default:
519 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
520 return ERR_PTR(-EINVAL);
521 }
522
523 if (IS_ERR(clk))
524 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
525 PTR_ERR(clk));
526 else
527 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
528 clkspec->args[0], clkspec->args[1], clk,
529 clk_get_rate(clk));
530 return clk;
531 }
532
533 static void __init
rzv2h_cpg_register_core_clk(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv)534 rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
535 struct rzv2h_cpg_priv *priv)
536 {
537 struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
538 unsigned int id = core->id, div = core->div;
539 struct device *dev = priv->dev;
540 const char *parent_name;
541 struct clk_hw *clk_hw;
542
543 WARN_DEBUG(id >= priv->num_core_clks);
544 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
545
546 switch (core->type) {
547 case CLK_TYPE_IN:
548 clk = of_clk_get_by_name(priv->dev->of_node, core->name);
549 break;
550 case CLK_TYPE_FF:
551 WARN_DEBUG(core->parent >= priv->num_core_clks);
552 parent = priv->clks[core->parent];
553 if (IS_ERR(parent)) {
554 clk = parent;
555 goto fail;
556 }
557
558 parent_name = __clk_get_name(parent);
559 clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name,
560 parent_name, CLK_SET_RATE_PARENT,
561 core->mult, div);
562 if (IS_ERR(clk_hw))
563 clk = ERR_CAST(clk_hw);
564 else
565 clk = clk_hw->clk;
566 break;
567 case CLK_TYPE_FF_MOD_STATUS:
568 if (!priv->ff_mod_status_ops) {
569 priv->ff_mod_status_ops =
570 devm_kzalloc(dev, sizeof(*priv->ff_mod_status_ops), GFP_KERNEL);
571 if (!priv->ff_mod_status_ops) {
572 clk = ERR_PTR(-ENOMEM);
573 goto fail;
574 }
575 memcpy(priv->ff_mod_status_ops, &clk_fixed_factor_ops,
576 sizeof(const struct clk_ops));
577 priv->ff_mod_status_ops->is_enabled = rzv2h_clk_ff_mod_status_is_enabled;
578 }
579 clk = rzv2h_cpg_fixed_mod_status_clk_register(core, priv);
580 break;
581 case CLK_TYPE_PLL:
582 clk = rzv2h_cpg_pll_clk_register(core, priv, &rzv2h_cpg_pll_ops);
583 break;
584 case CLK_TYPE_DDIV:
585 clk = rzv2h_cpg_ddiv_clk_register(core, priv);
586 break;
587 case CLK_TYPE_SMUX:
588 clk = rzv2h_cpg_mux_clk_register(core, priv);
589 break;
590 default:
591 goto fail;
592 }
593
594 if (IS_ERR_OR_NULL(clk))
595 goto fail;
596
597 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
598 priv->clks[id] = clk;
599 return;
600
601 fail:
602 dev_err(dev, "Failed to register core clock %s: %ld\n",
603 core->name, PTR_ERR(clk));
604 }
605
rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv * priv,u32 mstop_data)606 static void rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv *priv,
607 u32 mstop_data)
608 {
609 unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
610 u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
611 atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
612 unsigned long flags;
613 unsigned int i;
614 u32 val = 0;
615
616 spin_lock_irqsave(&priv->rmw_lock, flags);
617 for_each_set_bit(i, &mstop_mask, 16) {
618 if (!atomic_read(&mstop[i]))
619 val |= BIT(i) << 16;
620 atomic_inc(&mstop[i]);
621 }
622 if (val)
623 writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
624 spin_unlock_irqrestore(&priv->rmw_lock, flags);
625 }
626
rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv * priv,u32 mstop_data)627 static void rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv *priv,
628 u32 mstop_data)
629 {
630 unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
631 u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
632 atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
633 unsigned long flags;
634 unsigned int i;
635 u32 val = 0;
636
637 spin_lock_irqsave(&priv->rmw_lock, flags);
638 for_each_set_bit(i, &mstop_mask, 16) {
639 if (!atomic_read(&mstop[i]) ||
640 atomic_dec_and_test(&mstop[i]))
641 val |= BIT(i) << 16 | BIT(i);
642 }
643 if (val)
644 writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
645 spin_unlock_irqrestore(&priv->rmw_lock, flags);
646 }
647
rzv2h_parent_clk_mux_to_index(struct clk_hw * hw)648 static int rzv2h_parent_clk_mux_to_index(struct clk_hw *hw)
649 {
650 struct clk_hw *parent_hw;
651 struct clk *parent_clk;
652 struct clk_mux *mux;
653 u32 val;
654
655 /* This will always succeed, so no need to check for IS_ERR() */
656 parent_clk = clk_get_parent(hw->clk);
657
658 parent_hw = __clk_get_hw(parent_clk);
659 mux = to_clk_mux(parent_hw);
660
661 val = readl(mux->reg) >> mux->shift;
662 val &= mux->mask;
663 return clk_mux_val_to_index(parent_hw, mux->table, 0, val);
664 }
665
rzv2h_mod_clock_is_enabled(struct clk_hw * hw)666 static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
667 {
668 struct mod_clock *clock = to_mod_clock(hw);
669 struct rzv2h_cpg_priv *priv = clock->priv;
670 int mon_index = clock->mon_index;
671 u32 bitmask;
672 u32 offset;
673
674 if (clock->ext_clk_mux_index >= 0 &&
675 rzv2h_parent_clk_mux_to_index(hw) == clock->ext_clk_mux_index)
676 mon_index = -1;
677
678 if (mon_index >= 0) {
679 offset = GET_CLK_MON_OFFSET(mon_index);
680 bitmask = BIT(clock->mon_bit);
681
682 if (!(readl(priv->base + offset) & bitmask))
683 return 0;
684 }
685
686 offset = GET_CLK_ON_OFFSET(clock->on_index);
687 bitmask = BIT(clock->on_bit);
688
689 return readl(priv->base + offset) & bitmask;
690 }
691
rzv2h_mod_clock_endisable(struct clk_hw * hw,bool enable)692 static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
693 {
694 bool enabled = rzv2h_mod_clock_is_enabled(hw);
695 struct mod_clock *clock = to_mod_clock(hw);
696 unsigned int reg = GET_CLK_ON_OFFSET(clock->on_index);
697 struct rzv2h_cpg_priv *priv = clock->priv;
698 u32 bitmask = BIT(clock->on_bit);
699 struct device *dev = priv->dev;
700 u32 value;
701 int error;
702
703 dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk,
704 str_on_off(enable));
705
706 if (enabled == enable)
707 return 0;
708
709 value = bitmask << 16;
710 if (enable) {
711 value |= bitmask;
712 writel(value, priv->base + reg);
713 if (clock->mstop_data != BUS_MSTOP_NONE)
714 rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data);
715 } else {
716 if (clock->mstop_data != BUS_MSTOP_NONE)
717 rzv2h_mod_clock_mstop_disable(priv, clock->mstop_data);
718 writel(value, priv->base + reg);
719 }
720
721 if (!enable || clock->mon_index < 0)
722 return 0;
723
724 reg = GET_CLK_MON_OFFSET(clock->mon_index);
725 bitmask = BIT(clock->mon_bit);
726 error = readl_poll_timeout_atomic(priv->base + reg, value,
727 value & bitmask, 0, 10);
728 if (error)
729 dev_err(dev, "Failed to enable CLK_ON 0x%x/%pC\n",
730 GET_CLK_ON_OFFSET(clock->on_index), hw->clk);
731
732 return error;
733 }
734
rzv2h_mod_clock_enable(struct clk_hw * hw)735 static int rzv2h_mod_clock_enable(struct clk_hw *hw)
736 {
737 return rzv2h_mod_clock_endisable(hw, true);
738 }
739
rzv2h_mod_clock_disable(struct clk_hw * hw)740 static void rzv2h_mod_clock_disable(struct clk_hw *hw)
741 {
742 rzv2h_mod_clock_endisable(hw, false);
743 }
744
745 static const struct clk_ops rzv2h_mod_clock_ops = {
746 .enable = rzv2h_mod_clock_enable,
747 .disable = rzv2h_mod_clock_disable,
748 .is_enabled = rzv2h_mod_clock_is_enabled,
749 };
750
751 static void __init
rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk * mod,struct rzv2h_cpg_priv * priv)752 rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
753 struct rzv2h_cpg_priv *priv)
754 {
755 struct mod_clock *clock = NULL;
756 struct device *dev = priv->dev;
757 struct clk_init_data init;
758 struct clk *parent, *clk;
759 const char *parent_name;
760 unsigned int id;
761 int ret;
762
763 id = GET_MOD_CLK_ID(priv->num_core_clks, mod->on_index, mod->on_bit);
764 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
765 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
766 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
767
768 parent = priv->clks[mod->parent];
769 if (IS_ERR(parent)) {
770 clk = parent;
771 goto fail;
772 }
773
774 clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
775 if (!clock) {
776 clk = ERR_PTR(-ENOMEM);
777 goto fail;
778 }
779
780 init.name = mod->name;
781 init.ops = &rzv2h_mod_clock_ops;
782 init.flags = CLK_SET_RATE_PARENT;
783 if (mod->critical)
784 init.flags |= CLK_IS_CRITICAL;
785
786 parent_name = __clk_get_name(parent);
787 init.parent_names = &parent_name;
788 init.num_parents = 1;
789
790 clock->on_index = mod->on_index;
791 clock->on_bit = mod->on_bit;
792 clock->mon_index = mod->mon_index;
793 clock->mon_bit = mod->mon_bit;
794 clock->no_pm = mod->no_pm;
795 clock->ext_clk_mux_index = mod->ext_clk_mux_index;
796 clock->priv = priv;
797 clock->hw.init = &init;
798 clock->mstop_data = mod->mstop_data;
799
800 ret = devm_clk_hw_register(dev, &clock->hw);
801 if (ret) {
802 clk = ERR_PTR(ret);
803 goto fail;
804 }
805
806 priv->clks[id] = clock->hw.clk;
807
808 /*
809 * Ensure the module clocks and MSTOP bits are synchronized when they are
810 * turned ON by the bootloader. Enable MSTOP bits for module clocks that were
811 * turned ON in an earlier boot stage.
812 */
813 if (clock->mstop_data != BUS_MSTOP_NONE &&
814 !mod->critical && rzv2h_mod_clock_is_enabled(&clock->hw)) {
815 rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data);
816 } else if (clock->mstop_data != BUS_MSTOP_NONE && mod->critical) {
817 unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, clock->mstop_data);
818 u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, clock->mstop_data);
819 atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
820 unsigned long flags;
821 unsigned int i;
822 u32 val = 0;
823
824 /*
825 * Critical clocks are turned ON immediately upon registration, and the
826 * MSTOP counter is updated through the rzv2h_mod_clock_enable() path.
827 * However, if the critical clocks were already turned ON by the initial
828 * bootloader, synchronize the atomic counter here and clear the MSTOP bit.
829 */
830 spin_lock_irqsave(&priv->rmw_lock, flags);
831 for_each_set_bit(i, &mstop_mask, 16) {
832 if (atomic_read(&mstop[i]))
833 continue;
834 val |= BIT(i) << 16;
835 atomic_inc(&mstop[i]);
836 }
837 if (val)
838 writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
839 spin_unlock_irqrestore(&priv->rmw_lock, flags);
840 }
841
842 return;
843
844 fail:
845 dev_err(dev, "Failed to register module clock %s: %ld\n",
846 mod->name, PTR_ERR(clk));
847 }
848
__rzv2h_cpg_assert(struct reset_controller_dev * rcdev,unsigned long id,bool assert)849 static int __rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
850 unsigned long id, bool assert)
851 {
852 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
853 unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
854 u32 mask = BIT(priv->resets[id].reset_bit);
855 u8 monbit = priv->resets[id].mon_bit;
856 u32 value = mask << 16;
857 int ret;
858
859 dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n",
860 assert ? "assert" : "deassert", id, reg);
861
862 if (!assert)
863 value |= mask;
864 writel(value, priv->base + reg);
865
866 reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
867 mask = BIT(monbit);
868
869 ret = readl_poll_timeout_atomic(priv->base + reg, value,
870 assert == !!(value & mask), 10, 200);
871 if (ret && !assert) {
872 value = mask << 16;
873 writel(value, priv->base + GET_RST_OFFSET(priv->resets[id].reset_index));
874 }
875
876 return ret;
877 }
878
rzv2h_cpg_assert(struct reset_controller_dev * rcdev,unsigned long id)879 static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
880 unsigned long id)
881 {
882 return __rzv2h_cpg_assert(rcdev, id, true);
883 }
884
rzv2h_cpg_deassert(struct reset_controller_dev * rcdev,unsigned long id)885 static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev,
886 unsigned long id)
887 {
888 return __rzv2h_cpg_assert(rcdev, id, false);
889 }
890
rzv2h_cpg_reset(struct reset_controller_dev * rcdev,unsigned long id)891 static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev,
892 unsigned long id)
893 {
894 int ret;
895
896 ret = rzv2h_cpg_assert(rcdev, id);
897 if (ret)
898 return ret;
899
900 return rzv2h_cpg_deassert(rcdev, id);
901 }
902
rzv2h_cpg_status(struct reset_controller_dev * rcdev,unsigned long id)903 static int rzv2h_cpg_status(struct reset_controller_dev *rcdev,
904 unsigned long id)
905 {
906 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
907 unsigned int reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
908 u8 monbit = priv->resets[id].mon_bit;
909
910 return !!(readl(priv->base + reg) & BIT(monbit));
911 }
912
913 static const struct reset_control_ops rzv2h_cpg_reset_ops = {
914 .reset = rzv2h_cpg_reset,
915 .assert = rzv2h_cpg_assert,
916 .deassert = rzv2h_cpg_deassert,
917 .status = rzv2h_cpg_status,
918 };
919
rzv2h_cpg_reset_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)920 static int rzv2h_cpg_reset_xlate(struct reset_controller_dev *rcdev,
921 const struct of_phandle_args *reset_spec)
922 {
923 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
924 unsigned int id = reset_spec->args[0];
925 u8 rst_index = id / 16;
926 u8 rst_bit = id % 16;
927 unsigned int i;
928
929 for (i = 0; i < rcdev->nr_resets; i++) {
930 if (rst_index == priv->resets[i].reset_index &&
931 rst_bit == priv->resets[i].reset_bit)
932 return i;
933 }
934
935 return -EINVAL;
936 }
937
rzv2h_cpg_reset_controller_register(struct rzv2h_cpg_priv * priv)938 static int rzv2h_cpg_reset_controller_register(struct rzv2h_cpg_priv *priv)
939 {
940 priv->rcdev.ops = &rzv2h_cpg_reset_ops;
941 priv->rcdev.of_node = priv->dev->of_node;
942 priv->rcdev.dev = priv->dev;
943 priv->rcdev.of_reset_n_cells = 1;
944 priv->rcdev.of_xlate = rzv2h_cpg_reset_xlate;
945 priv->rcdev.nr_resets = priv->num_resets;
946
947 return devm_reset_controller_register(priv->dev, &priv->rcdev);
948 }
949
950 /**
951 * struct rzv2h_cpg_pd - RZ/V2H power domain data structure
952 * @priv: pointer to CPG private data structure
953 * @genpd: generic PM domain
954 */
955 struct rzv2h_cpg_pd {
956 struct rzv2h_cpg_priv *priv;
957 struct generic_pm_domain genpd;
958 };
959
rzv2h_cpg_is_pm_clk(struct rzv2h_cpg_pd * pd,const struct of_phandle_args * clkspec)960 static bool rzv2h_cpg_is_pm_clk(struct rzv2h_cpg_pd *pd,
961 const struct of_phandle_args *clkspec)
962 {
963 if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
964 return false;
965
966 switch (clkspec->args[0]) {
967 case CPG_MOD: {
968 struct rzv2h_cpg_priv *priv = pd->priv;
969 unsigned int id = clkspec->args[1];
970 struct mod_clock *clock;
971
972 if (id >= priv->num_mod_clks)
973 return false;
974
975 if (priv->clks[priv->num_core_clks + id] == ERR_PTR(-ENOENT))
976 return false;
977
978 clock = to_mod_clock(__clk_get_hw(priv->clks[priv->num_core_clks + id]));
979
980 return !clock->no_pm;
981 }
982
983 case CPG_CORE:
984 default:
985 return false;
986 }
987 }
988
rzv2h_cpg_attach_dev(struct generic_pm_domain * domain,struct device * dev)989 static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
990 {
991 struct rzv2h_cpg_pd *pd = container_of(domain, struct rzv2h_cpg_pd, genpd);
992 struct device_node *np = dev->of_node;
993 struct of_phandle_args clkspec;
994 bool once = true;
995 struct clk *clk;
996 unsigned int i;
997 int error;
998
999 for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
1000 if (!rzv2h_cpg_is_pm_clk(pd, &clkspec)) {
1001 of_node_put(clkspec.np);
1002 continue;
1003 }
1004
1005 if (once) {
1006 once = false;
1007 error = pm_clk_create(dev);
1008 if (error) {
1009 of_node_put(clkspec.np);
1010 goto err;
1011 }
1012 }
1013 clk = of_clk_get_from_provider(&clkspec);
1014 of_node_put(clkspec.np);
1015 if (IS_ERR(clk)) {
1016 error = PTR_ERR(clk);
1017 goto fail_destroy;
1018 }
1019
1020 error = pm_clk_add_clk(dev, clk);
1021 if (error) {
1022 dev_err(dev, "pm_clk_add_clk failed %d\n",
1023 error);
1024 goto fail_put;
1025 }
1026 }
1027
1028 return 0;
1029
1030 fail_put:
1031 clk_put(clk);
1032
1033 fail_destroy:
1034 pm_clk_destroy(dev);
1035 err:
1036 return error;
1037 }
1038
rzv2h_cpg_detach_dev(struct generic_pm_domain * unused,struct device * dev)1039 static void rzv2h_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1040 {
1041 if (!pm_clk_no_clocks(dev))
1042 pm_clk_destroy(dev);
1043 }
1044
rzv2h_cpg_genpd_remove_simple(void * data)1045 static void rzv2h_cpg_genpd_remove_simple(void *data)
1046 {
1047 pm_genpd_remove(data);
1048 }
1049
rzv2h_cpg_add_pm_domains(struct rzv2h_cpg_priv * priv)1050 static int __init rzv2h_cpg_add_pm_domains(struct rzv2h_cpg_priv *priv)
1051 {
1052 struct device *dev = priv->dev;
1053 struct device_node *np = dev->of_node;
1054 struct rzv2h_cpg_pd *pd;
1055 int ret;
1056
1057 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1058 if (!pd)
1059 return -ENOMEM;
1060
1061 pd->genpd.name = np->name;
1062 pd->priv = priv;
1063 pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
1064 pd->genpd.attach_dev = rzv2h_cpg_attach_dev;
1065 pd->genpd.detach_dev = rzv2h_cpg_detach_dev;
1066 ret = pm_genpd_init(&pd->genpd, &pm_domain_always_on_gov, false);
1067 if (ret)
1068 return ret;
1069
1070 ret = devm_add_action_or_reset(dev, rzv2h_cpg_genpd_remove_simple, &pd->genpd);
1071 if (ret)
1072 return ret;
1073
1074 return of_genpd_add_provider_simple(np, &pd->genpd);
1075 }
1076
rzv2h_cpg_del_clk_provider(void * data)1077 static void rzv2h_cpg_del_clk_provider(void *data)
1078 {
1079 of_clk_del_provider(data);
1080 }
1081
rzv2h_cpg_probe(struct platform_device * pdev)1082 static int __init rzv2h_cpg_probe(struct platform_device *pdev)
1083 {
1084 struct device *dev = &pdev->dev;
1085 struct device_node *np = dev->of_node;
1086 const struct rzv2h_cpg_info *info;
1087 struct rzv2h_cpg_priv *priv;
1088 unsigned int nclks, i;
1089 struct clk **clks;
1090 int error;
1091
1092 info = of_device_get_match_data(dev);
1093
1094 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1095 if (!priv)
1096 return -ENOMEM;
1097
1098 spin_lock_init(&priv->rmw_lock);
1099
1100 priv->dev = dev;
1101
1102 priv->base = devm_platform_ioremap_resource(pdev, 0);
1103 if (IS_ERR(priv->base))
1104 return PTR_ERR(priv->base);
1105
1106 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1107 clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1108 if (!clks)
1109 return -ENOMEM;
1110
1111 priv->mstop_count = devm_kcalloc(dev, info->num_mstop_bits,
1112 sizeof(*priv->mstop_count), GFP_KERNEL);
1113 if (!priv->mstop_count)
1114 return -ENOMEM;
1115
1116 /* Adjust for CPG_BUS_m_MSTOP starting from m = 1 */
1117 priv->mstop_count -= 16;
1118
1119 priv->resets = devm_kmemdup_array(dev, info->resets, info->num_resets,
1120 sizeof(*info->resets), GFP_KERNEL);
1121 if (!priv->resets)
1122 return -ENOMEM;
1123
1124 dev_set_drvdata(dev, priv);
1125 priv->clks = clks;
1126 priv->num_core_clks = info->num_total_core_clks;
1127 priv->num_mod_clks = info->num_hw_mod_clks;
1128 priv->last_dt_core_clk = info->last_dt_core_clk;
1129 priv->num_resets = info->num_resets;
1130
1131 for (i = 0; i < nclks; i++)
1132 clks[i] = ERR_PTR(-ENOENT);
1133
1134 for (i = 0; i < info->num_core_clks; i++)
1135 rzv2h_cpg_register_core_clk(&info->core_clks[i], priv);
1136
1137 for (i = 0; i < info->num_mod_clks; i++)
1138 rzv2h_cpg_register_mod_clk(&info->mod_clks[i], priv);
1139
1140 error = of_clk_add_provider(np, rzv2h_cpg_clk_src_twocell_get, priv);
1141 if (error)
1142 return error;
1143
1144 error = devm_add_action_or_reset(dev, rzv2h_cpg_del_clk_provider, np);
1145 if (error)
1146 return error;
1147
1148 error = rzv2h_cpg_add_pm_domains(priv);
1149 if (error)
1150 return error;
1151
1152 error = rzv2h_cpg_reset_controller_register(priv);
1153 if (error)
1154 return error;
1155
1156 return 0;
1157 }
1158
1159 static const struct of_device_id rzv2h_cpg_match[] = {
1160 #ifdef CONFIG_CLK_R9A09G047
1161 {
1162 .compatible = "renesas,r9a09g047-cpg",
1163 .data = &r9a09g047_cpg_info,
1164 },
1165 #endif
1166 #ifdef CONFIG_CLK_R9A09G056
1167 {
1168 .compatible = "renesas,r9a09g056-cpg",
1169 .data = &r9a09g056_cpg_info,
1170 },
1171 #endif
1172 #ifdef CONFIG_CLK_R9A09G057
1173 {
1174 .compatible = "renesas,r9a09g057-cpg",
1175 .data = &r9a09g057_cpg_info,
1176 },
1177 #endif
1178 { /* sentinel */ }
1179 };
1180
1181 static struct platform_driver rzv2h_cpg_driver = {
1182 .driver = {
1183 .name = "rzv2h-cpg",
1184 .of_match_table = rzv2h_cpg_match,
1185 },
1186 };
1187
rzv2h_cpg_init(void)1188 static int __init rzv2h_cpg_init(void)
1189 {
1190 return platform_driver_probe(&rzv2h_cpg_driver, rzv2h_cpg_probe);
1191 }
1192
1193 subsys_initcall(rzv2h_cpg_init);
1194
1195 MODULE_DESCRIPTION("Renesas RZ/V2H CPG Driver");
1196