xref: /linux/drivers/clk/renesas/rzv2h-cpg.c (revision 9f32a03e3e0d372c520d829dd4da6022fe88832a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Renesas RZ/V2H(P) Clock Pulse Generator
4  *
5  * Copyright (C) 2024 Renesas Electronics Corp.
6  *
7  * Based on rzg2l-cpg.c
8  *
9  * Copyright (C) 2015 Glider bvba
10  * Copyright (C) 2013 Ideas On Board SPRL
11  * Copyright (C) 2015 Renesas Electronics Corp.
12  */
13 
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/iopoll.h>
20 #include <linux/mod_devicetable.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_clock.h>
25 #include <linux/pm_domain.h>
26 #include <linux/refcount.h>
27 #include <linux/reset-controller.h>
28 #include <linux/string_choices.h>
29 
30 #include <dt-bindings/clock/renesas-cpg-mssr.h>
31 
32 #include "rzv2h-cpg.h"
33 
34 #ifdef DEBUG
35 #define WARN_DEBUG(x)		WARN_ON(x)
36 #else
37 #define WARN_DEBUG(x)		do { } while (0)
38 #endif
39 
40 #define GET_CLK_ON_OFFSET(x)	(0x600 + ((x) * 4))
41 #define GET_CLK_MON_OFFSET(x)	(0x800 + ((x) * 4))
42 #define GET_RST_OFFSET(x)	(0x900 + ((x) * 4))
43 #define GET_RST_MON_OFFSET(x)	(0xA00 + ((x) * 4))
44 
45 #define CPG_BUS_1_MSTOP		(0xd00)
46 #define CPG_BUS_MSTOP(m)	(CPG_BUS_1_MSTOP + ((m) - 1) * 4)
47 
48 #define CPG_PLL_STBY(x)		((x))
49 #define CPG_PLL_STBY_RESETB	BIT(0)
50 #define CPG_PLL_STBY_RESETB_WEN	BIT(16)
51 #define CPG_PLL_CLK1(x)		((x) + 0x004)
52 #define CPG_PLL_CLK1_KDIV(x)	((s16)FIELD_GET(GENMASK(31, 16), (x)))
53 #define CPG_PLL_CLK1_MDIV(x)	FIELD_GET(GENMASK(15, 6), (x))
54 #define CPG_PLL_CLK1_PDIV(x)	FIELD_GET(GENMASK(5, 0), (x))
55 #define CPG_PLL_CLK2(x)		((x) + 0x008)
56 #define CPG_PLL_CLK2_SDIV(x)	FIELD_GET(GENMASK(2, 0), (x))
57 #define CPG_PLL_MON(x)		((x) + 0x010)
58 #define CPG_PLL_MON_RESETB	BIT(0)
59 #define CPG_PLL_MON_LOCK	BIT(4)
60 
61 #define DDIV_DIVCTL_WEN(shift)		BIT((shift) + 16)
62 
63 #define GET_MOD_CLK_ID(base, index, bit)		\
64 			((base) + ((((index) * (16))) + (bit)))
65 
66 #define CPG_CLKSTATUS0		(0x700)
67 
68 /**
69  * struct rzv2h_cpg_priv - Clock Pulse Generator Private Data
70  *
71  * @dev: CPG device
72  * @base: CPG register block base address
73  * @rmw_lock: protects register accesses
74  * @clks: Array containing all Core and Module Clocks
75  * @num_core_clks: Number of Core Clocks in clks[]
76  * @num_mod_clks: Number of Module Clocks in clks[]
77  * @resets: Array of resets
78  * @num_resets: Number of Module Resets in info->resets[]
79  * @last_dt_core_clk: ID of the last Core Clock exported to DT
80  * @mstop_count: Array of mstop values
81  * @rcdev: Reset controller entity
82  */
83 struct rzv2h_cpg_priv {
84 	struct device *dev;
85 	void __iomem *base;
86 	spinlock_t rmw_lock;
87 
88 	struct clk **clks;
89 	unsigned int num_core_clks;
90 	unsigned int num_mod_clks;
91 	struct rzv2h_reset *resets;
92 	unsigned int num_resets;
93 	unsigned int last_dt_core_clk;
94 
95 	atomic_t *mstop_count;
96 
97 	struct reset_controller_dev rcdev;
98 };
99 
100 #define rcdev_to_priv(x)	container_of(x, struct rzv2h_cpg_priv, rcdev)
101 
102 struct pll_clk {
103 	struct rzv2h_cpg_priv *priv;
104 	void __iomem *base;
105 	struct clk_hw hw;
106 	struct pll pll;
107 };
108 
109 #define to_pll(_hw)	container_of(_hw, struct pll_clk, hw)
110 
111 /**
112  * struct mod_clock - Module clock
113  *
114  * @priv: CPG private data
115  * @mstop_data: mstop data relating to module clock
116  * @hw: handle between common and hardware-specific interfaces
117  * @no_pm: flag to indicate PM is not supported
118  * @on_index: register offset
119  * @on_bit: ON/MON bit
120  * @mon_index: monitor register offset
121  * @mon_bit: monitor bit
122  */
123 struct mod_clock {
124 	struct rzv2h_cpg_priv *priv;
125 	unsigned int mstop_data;
126 	struct clk_hw hw;
127 	bool no_pm;
128 	u8 on_index;
129 	u8 on_bit;
130 	s8 mon_index;
131 	u8 mon_bit;
132 };
133 
134 #define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw)
135 
136 /**
137  * struct ddiv_clk - DDIV clock
138  *
139  * @priv: CPG private data
140  * @div: divider clk
141  * @mon: monitor bit in CPG_CLKSTATUS0 register
142  */
143 struct ddiv_clk {
144 	struct rzv2h_cpg_priv *priv;
145 	struct clk_divider div;
146 	u8 mon;
147 };
148 
149 #define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div)
150 
rzv2h_cpg_pll_clk_is_enabled(struct clk_hw * hw)151 static int rzv2h_cpg_pll_clk_is_enabled(struct clk_hw *hw)
152 {
153 	struct pll_clk *pll_clk = to_pll(hw);
154 	struct rzv2h_cpg_priv *priv = pll_clk->priv;
155 	u32 val = readl(priv->base + CPG_PLL_MON(pll_clk->pll.offset));
156 
157 	/* Ensure both RESETB and LOCK bits are set */
158 	return (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) ==
159 	       (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK);
160 }
161 
rzv2h_cpg_pll_clk_enable(struct clk_hw * hw)162 static int rzv2h_cpg_pll_clk_enable(struct clk_hw *hw)
163 {
164 	struct pll_clk *pll_clk = to_pll(hw);
165 	struct rzv2h_cpg_priv *priv = pll_clk->priv;
166 	struct pll pll = pll_clk->pll;
167 	u32 stby_offset;
168 	u32 mon_offset;
169 	u32 val;
170 	int ret;
171 
172 	if (rzv2h_cpg_pll_clk_is_enabled(hw))
173 		return 0;
174 
175 	stby_offset = CPG_PLL_STBY(pll.offset);
176 	mon_offset = CPG_PLL_MON(pll.offset);
177 
178 	writel(CPG_PLL_STBY_RESETB_WEN | CPG_PLL_STBY_RESETB,
179 	       priv->base + stby_offset);
180 
181 	/*
182 	 * Ensure PLL enters into normal mode
183 	 *
184 	 * Note: There is no HW information about the worst case latency.
185 	 *
186 	 * Since this latency might depend on external crystal or PLL rate,
187 	 * use a "super" safe timeout value.
188 	 */
189 	ret = readl_poll_timeout_atomic(priv->base + mon_offset, val,
190 			(val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) ==
191 			(CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK), 200, 2000);
192 	if (ret)
193 		dev_err(priv->dev, "Failed to enable PLL 0x%x/%pC\n",
194 			stby_offset, hw->clk);
195 
196 	return ret;
197 }
198 
rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)199 static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
200 						   unsigned long parent_rate)
201 {
202 	struct pll_clk *pll_clk = to_pll(hw);
203 	struct rzv2h_cpg_priv *priv = pll_clk->priv;
204 	struct pll pll = pll_clk->pll;
205 	unsigned int clk1, clk2;
206 	u64 rate;
207 
208 	if (!pll.has_clkn)
209 		return 0;
210 
211 	clk1 = readl(priv->base + CPG_PLL_CLK1(pll.offset));
212 	clk2 = readl(priv->base + CPG_PLL_CLK2(pll.offset));
213 
214 	rate = mul_u64_u32_shr(parent_rate, (CPG_PLL_CLK1_MDIV(clk1) << 16) +
215 			       CPG_PLL_CLK1_KDIV(clk1), 16 + CPG_PLL_CLK2_SDIV(clk2));
216 
217 	return DIV_ROUND_CLOSEST_ULL(rate, CPG_PLL_CLK1_PDIV(clk1));
218 }
219 
220 static const struct clk_ops rzv2h_cpg_pll_ops = {
221 	.is_enabled = rzv2h_cpg_pll_clk_is_enabled,
222 	.enable = rzv2h_cpg_pll_clk_enable,
223 	.recalc_rate = rzv2h_cpg_pll_clk_recalc_rate,
224 };
225 
226 static struct clk * __init
rzv2h_cpg_pll_clk_register(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv,const struct clk_ops * ops)227 rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
228 			   struct rzv2h_cpg_priv *priv,
229 			   const struct clk_ops *ops)
230 {
231 	void __iomem *base = priv->base;
232 	struct device *dev = priv->dev;
233 	struct clk_init_data init;
234 	const struct clk *parent;
235 	const char *parent_name;
236 	struct pll_clk *pll_clk;
237 	int ret;
238 
239 	parent = priv->clks[core->parent];
240 	if (IS_ERR(parent))
241 		return ERR_CAST(parent);
242 
243 	pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
244 	if (!pll_clk)
245 		return ERR_PTR(-ENOMEM);
246 
247 	parent_name = __clk_get_name(parent);
248 	init.name = core->name;
249 	init.ops = ops;
250 	init.flags = 0;
251 	init.parent_names = &parent_name;
252 	init.num_parents = 1;
253 
254 	pll_clk->hw.init = &init;
255 	pll_clk->pll = core->cfg.pll;
256 	pll_clk->base = base;
257 	pll_clk->priv = priv;
258 
259 	ret = devm_clk_hw_register(dev, &pll_clk->hw);
260 	if (ret)
261 		return ERR_PTR(ret);
262 
263 	return pll_clk->hw.clk;
264 }
265 
rzv2h_ddiv_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)266 static unsigned long rzv2h_ddiv_recalc_rate(struct clk_hw *hw,
267 					    unsigned long parent_rate)
268 {
269 	struct clk_divider *divider = to_clk_divider(hw);
270 	unsigned int val;
271 
272 	val = readl(divider->reg) >> divider->shift;
273 	val &= clk_div_mask(divider->width);
274 
275 	return divider_recalc_rate(hw, parent_rate, val, divider->table,
276 				   divider->flags, divider->width);
277 }
278 
rzv2h_ddiv_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)279 static long rzv2h_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
280 				  unsigned long *prate)
281 {
282 	struct clk_divider *divider = to_clk_divider(hw);
283 
284 	return divider_round_rate(hw, rate, prate, divider->table,
285 				  divider->width, divider->flags);
286 }
287 
rzv2h_ddiv_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)288 static int rzv2h_ddiv_determine_rate(struct clk_hw *hw,
289 				     struct clk_rate_request *req)
290 {
291 	struct clk_divider *divider = to_clk_divider(hw);
292 
293 	return divider_determine_rate(hw, req, divider->table, divider->width,
294 				      divider->flags);
295 }
296 
rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem * base,u8 mon)297 static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon)
298 {
299 	u32 bitmask = BIT(mon);
300 	u32 val;
301 
302 	if (mon == CSDIV_NO_MON)
303 		return 0;
304 
305 	return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200);
306 }
307 
rzv2h_ddiv_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)308 static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
309 			       unsigned long parent_rate)
310 {
311 	struct clk_divider *divider = to_clk_divider(hw);
312 	struct ddiv_clk *ddiv = to_ddiv_clock(divider);
313 	struct rzv2h_cpg_priv *priv = ddiv->priv;
314 	unsigned long flags = 0;
315 	int value;
316 	u32 val;
317 	int ret;
318 
319 	value = divider_get_val(rate, parent_rate, divider->table,
320 				divider->width, divider->flags);
321 	if (value < 0)
322 		return value;
323 
324 	spin_lock_irqsave(divider->lock, flags);
325 
326 	ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
327 	if (ret)
328 		goto ddiv_timeout;
329 
330 	val = readl(divider->reg) | DDIV_DIVCTL_WEN(divider->shift);
331 	val &= ~(clk_div_mask(divider->width) << divider->shift);
332 	val |= (u32)value << divider->shift;
333 	writel(val, divider->reg);
334 
335 	ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
336 
337 ddiv_timeout:
338 	spin_unlock_irqrestore(divider->lock, flags);
339 	return ret;
340 }
341 
342 static const struct clk_ops rzv2h_ddiv_clk_divider_ops = {
343 	.recalc_rate = rzv2h_ddiv_recalc_rate,
344 	.round_rate = rzv2h_ddiv_round_rate,
345 	.determine_rate = rzv2h_ddiv_determine_rate,
346 	.set_rate = rzv2h_ddiv_set_rate,
347 };
348 
349 static struct clk * __init
rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv)350 rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
351 			    struct rzv2h_cpg_priv *priv)
352 {
353 	struct ddiv cfg_ddiv = core->cfg.ddiv;
354 	struct clk_init_data init = {};
355 	struct device *dev = priv->dev;
356 	u8 shift = cfg_ddiv.shift;
357 	u8 width = cfg_ddiv.width;
358 	const struct clk *parent;
359 	const char *parent_name;
360 	struct clk_divider *div;
361 	struct ddiv_clk *ddiv;
362 	int ret;
363 
364 	parent = priv->clks[core->parent];
365 	if (IS_ERR(parent))
366 		return ERR_CAST(parent);
367 
368 	parent_name = __clk_get_name(parent);
369 
370 	if ((shift + width) > 16)
371 		return ERR_PTR(-EINVAL);
372 
373 	ddiv = devm_kzalloc(priv->dev, sizeof(*ddiv), GFP_KERNEL);
374 	if (!ddiv)
375 		return ERR_PTR(-ENOMEM);
376 
377 	init.name = core->name;
378 	if (cfg_ddiv.no_rmw)
379 		init.ops = &clk_divider_ops;
380 	else
381 		init.ops = &rzv2h_ddiv_clk_divider_ops;
382 	init.parent_names = &parent_name;
383 	init.num_parents = 1;
384 
385 	ddiv->priv = priv;
386 	ddiv->mon = cfg_ddiv.monbit;
387 	div = &ddiv->div;
388 	div->reg = priv->base + cfg_ddiv.offset;
389 	div->shift = shift;
390 	div->width = width;
391 	div->flags = core->flag;
392 	div->lock = &priv->rmw_lock;
393 	div->hw.init = &init;
394 	div->table = core->dtable;
395 
396 	ret = devm_clk_hw_register(dev, &div->hw);
397 	if (ret)
398 		return ERR_PTR(ret);
399 
400 	return div->hw.clk;
401 }
402 
403 static struct clk * __init
rzv2h_cpg_mux_clk_register(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv)404 rzv2h_cpg_mux_clk_register(const struct cpg_core_clk *core,
405 			   struct rzv2h_cpg_priv *priv)
406 {
407 	struct smuxed mux = core->cfg.smux;
408 	const struct clk_hw *clk_hw;
409 
410 	clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
411 					  core->parent_names, core->num_parents,
412 					  core->flag, priv->base + mux.offset,
413 					  mux.shift, mux.width,
414 					  core->mux_flags, &priv->rmw_lock);
415 	if (IS_ERR(clk_hw))
416 		return ERR_CAST(clk_hw);
417 
418 	return clk_hw->clk;
419 }
420 
421 static struct clk
rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args * clkspec,void * data)422 *rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
423 			       void *data)
424 {
425 	unsigned int clkidx = clkspec->args[1];
426 	struct rzv2h_cpg_priv *priv = data;
427 	struct device *dev = priv->dev;
428 	const char *type;
429 	struct clk *clk;
430 
431 	switch (clkspec->args[0]) {
432 	case CPG_CORE:
433 		type = "core";
434 		if (clkidx > priv->last_dt_core_clk) {
435 			dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
436 			return ERR_PTR(-EINVAL);
437 		}
438 		clk = priv->clks[clkidx];
439 		break;
440 
441 	case CPG_MOD:
442 		type = "module";
443 		if (clkidx >= priv->num_mod_clks) {
444 			dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
445 			return ERR_PTR(-EINVAL);
446 		}
447 		clk = priv->clks[priv->num_core_clks + clkidx];
448 		break;
449 
450 	default:
451 		dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
452 		return ERR_PTR(-EINVAL);
453 	}
454 
455 	if (IS_ERR(clk))
456 		dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
457 			PTR_ERR(clk));
458 	else
459 		dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
460 			clkspec->args[0], clkspec->args[1], clk,
461 			clk_get_rate(clk));
462 	return clk;
463 }
464 
465 static void __init
rzv2h_cpg_register_core_clk(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv)466 rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
467 			    struct rzv2h_cpg_priv *priv)
468 {
469 	struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
470 	unsigned int id = core->id, div = core->div;
471 	struct device *dev = priv->dev;
472 	const char *parent_name;
473 	struct clk_hw *clk_hw;
474 
475 	WARN_DEBUG(id >= priv->num_core_clks);
476 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
477 
478 	switch (core->type) {
479 	case CLK_TYPE_IN:
480 		clk = of_clk_get_by_name(priv->dev->of_node, core->name);
481 		break;
482 	case CLK_TYPE_FF:
483 		WARN_DEBUG(core->parent >= priv->num_core_clks);
484 		parent = priv->clks[core->parent];
485 		if (IS_ERR(parent)) {
486 			clk = parent;
487 			goto fail;
488 		}
489 
490 		parent_name = __clk_get_name(parent);
491 		clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name,
492 							   parent_name, CLK_SET_RATE_PARENT,
493 							   core->mult, div);
494 		if (IS_ERR(clk_hw))
495 			clk = ERR_CAST(clk_hw);
496 		else
497 			clk = clk_hw->clk;
498 		break;
499 	case CLK_TYPE_PLL:
500 		clk = rzv2h_cpg_pll_clk_register(core, priv, &rzv2h_cpg_pll_ops);
501 		break;
502 	case CLK_TYPE_DDIV:
503 		clk = rzv2h_cpg_ddiv_clk_register(core, priv);
504 		break;
505 	case CLK_TYPE_SMUX:
506 		clk = rzv2h_cpg_mux_clk_register(core, priv);
507 		break;
508 	default:
509 		goto fail;
510 	}
511 
512 	if (IS_ERR_OR_NULL(clk))
513 		goto fail;
514 
515 	dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
516 	priv->clks[id] = clk;
517 	return;
518 
519 fail:
520 	dev_err(dev, "Failed to register core clock %s: %ld\n",
521 		core->name, PTR_ERR(clk));
522 }
523 
rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv * priv,u32 mstop_data)524 static void rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv *priv,
525 					 u32 mstop_data)
526 {
527 	unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
528 	u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
529 	atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
530 	unsigned long flags;
531 	unsigned int i;
532 	u32 val = 0;
533 
534 	spin_lock_irqsave(&priv->rmw_lock, flags);
535 	for_each_set_bit(i, &mstop_mask, 16) {
536 		if (!atomic_read(&mstop[i]))
537 			val |= BIT(i) << 16;
538 		atomic_inc(&mstop[i]);
539 	}
540 	if (val)
541 		writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
542 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
543 }
544 
rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv * priv,u32 mstop_data)545 static void rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv *priv,
546 					  u32 mstop_data)
547 {
548 	unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
549 	u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
550 	atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
551 	unsigned long flags;
552 	unsigned int i;
553 	u32 val = 0;
554 
555 	spin_lock_irqsave(&priv->rmw_lock, flags);
556 	for_each_set_bit(i, &mstop_mask, 16) {
557 		if (!atomic_read(&mstop[i]) ||
558 		    atomic_dec_and_test(&mstop[i]))
559 			val |= BIT(i) << 16 | BIT(i);
560 	}
561 	if (val)
562 		writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
563 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
564 }
565 
rzv2h_mod_clock_is_enabled(struct clk_hw * hw)566 static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
567 {
568 	struct mod_clock *clock = to_mod_clock(hw);
569 	struct rzv2h_cpg_priv *priv = clock->priv;
570 	u32 bitmask;
571 	u32 offset;
572 
573 	if (clock->mon_index >= 0) {
574 		offset = GET_CLK_MON_OFFSET(clock->mon_index);
575 		bitmask = BIT(clock->mon_bit);
576 
577 		if (!(readl(priv->base + offset) & bitmask))
578 			return 0;
579 	}
580 
581 	offset = GET_CLK_ON_OFFSET(clock->on_index);
582 	bitmask = BIT(clock->on_bit);
583 
584 	return readl(priv->base + offset) & bitmask;
585 }
586 
rzv2h_mod_clock_endisable(struct clk_hw * hw,bool enable)587 static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
588 {
589 	bool enabled = rzv2h_mod_clock_is_enabled(hw);
590 	struct mod_clock *clock = to_mod_clock(hw);
591 	unsigned int reg = GET_CLK_ON_OFFSET(clock->on_index);
592 	struct rzv2h_cpg_priv *priv = clock->priv;
593 	u32 bitmask = BIT(clock->on_bit);
594 	struct device *dev = priv->dev;
595 	u32 value;
596 	int error;
597 
598 	dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk,
599 		str_on_off(enable));
600 
601 	if (enabled == enable)
602 		return 0;
603 
604 	value = bitmask << 16;
605 	if (enable) {
606 		value |= bitmask;
607 		writel(value, priv->base + reg);
608 		if (clock->mstop_data != BUS_MSTOP_NONE)
609 			rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data);
610 	} else {
611 		if (clock->mstop_data != BUS_MSTOP_NONE)
612 			rzv2h_mod_clock_mstop_disable(priv, clock->mstop_data);
613 		writel(value, priv->base + reg);
614 	}
615 
616 	if (!enable || clock->mon_index < 0)
617 		return 0;
618 
619 	reg = GET_CLK_MON_OFFSET(clock->mon_index);
620 	bitmask = BIT(clock->mon_bit);
621 	error = readl_poll_timeout_atomic(priv->base + reg, value,
622 					  value & bitmask, 0, 10);
623 	if (error)
624 		dev_err(dev, "Failed to enable CLK_ON 0x%x/%pC\n",
625 			GET_CLK_ON_OFFSET(clock->on_index), hw->clk);
626 
627 	return error;
628 }
629 
rzv2h_mod_clock_enable(struct clk_hw * hw)630 static int rzv2h_mod_clock_enable(struct clk_hw *hw)
631 {
632 	return rzv2h_mod_clock_endisable(hw, true);
633 }
634 
rzv2h_mod_clock_disable(struct clk_hw * hw)635 static void rzv2h_mod_clock_disable(struct clk_hw *hw)
636 {
637 	rzv2h_mod_clock_endisable(hw, false);
638 }
639 
640 static const struct clk_ops rzv2h_mod_clock_ops = {
641 	.enable = rzv2h_mod_clock_enable,
642 	.disable = rzv2h_mod_clock_disable,
643 	.is_enabled = rzv2h_mod_clock_is_enabled,
644 };
645 
646 static void __init
rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk * mod,struct rzv2h_cpg_priv * priv)647 rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
648 			   struct rzv2h_cpg_priv *priv)
649 {
650 	struct mod_clock *clock = NULL;
651 	struct device *dev = priv->dev;
652 	struct clk_init_data init;
653 	struct clk *parent, *clk;
654 	const char *parent_name;
655 	unsigned int id;
656 	int ret;
657 
658 	id = GET_MOD_CLK_ID(priv->num_core_clks, mod->on_index, mod->on_bit);
659 	WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
660 	WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
661 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
662 
663 	parent = priv->clks[mod->parent];
664 	if (IS_ERR(parent)) {
665 		clk = parent;
666 		goto fail;
667 	}
668 
669 	clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
670 	if (!clock) {
671 		clk = ERR_PTR(-ENOMEM);
672 		goto fail;
673 	}
674 
675 	init.name = mod->name;
676 	init.ops = &rzv2h_mod_clock_ops;
677 	init.flags = CLK_SET_RATE_PARENT;
678 	if (mod->critical)
679 		init.flags |= CLK_IS_CRITICAL;
680 
681 	parent_name = __clk_get_name(parent);
682 	init.parent_names = &parent_name;
683 	init.num_parents = 1;
684 
685 	clock->on_index = mod->on_index;
686 	clock->on_bit = mod->on_bit;
687 	clock->mon_index = mod->mon_index;
688 	clock->mon_bit = mod->mon_bit;
689 	clock->no_pm = mod->no_pm;
690 	clock->priv = priv;
691 	clock->hw.init = &init;
692 	clock->mstop_data = mod->mstop_data;
693 
694 	ret = devm_clk_hw_register(dev, &clock->hw);
695 	if (ret) {
696 		clk = ERR_PTR(ret);
697 		goto fail;
698 	}
699 
700 	priv->clks[id] = clock->hw.clk;
701 
702 	/*
703 	 * Ensure the module clocks and MSTOP bits are synchronized when they are
704 	 * turned ON by the bootloader. Enable MSTOP bits for module clocks that were
705 	 * turned ON in an earlier boot stage.
706 	 */
707 	if (clock->mstop_data != BUS_MSTOP_NONE &&
708 	    !mod->critical && rzv2h_mod_clock_is_enabled(&clock->hw)) {
709 		rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data);
710 	} else if (clock->mstop_data != BUS_MSTOP_NONE && mod->critical) {
711 		unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, clock->mstop_data);
712 		u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, clock->mstop_data);
713 		atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
714 		unsigned long flags;
715 		unsigned int i;
716 		u32 val = 0;
717 
718 		/*
719 		 * Critical clocks are turned ON immediately upon registration, and the
720 		 * MSTOP counter is updated through the rzv2h_mod_clock_enable() path.
721 		 * However, if the critical clocks were already turned ON by the initial
722 		 * bootloader, synchronize the atomic counter here and clear the MSTOP bit.
723 		 */
724 		spin_lock_irqsave(&priv->rmw_lock, flags);
725 		for_each_set_bit(i, &mstop_mask, 16) {
726 			if (atomic_read(&mstop[i]))
727 				continue;
728 			val |= BIT(i) << 16;
729 			atomic_inc(&mstop[i]);
730 		}
731 		if (val)
732 			writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
733 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
734 	}
735 
736 	return;
737 
738 fail:
739 	dev_err(dev, "Failed to register module clock %s: %ld\n",
740 		mod->name, PTR_ERR(clk));
741 }
742 
__rzv2h_cpg_assert(struct reset_controller_dev * rcdev,unsigned long id,bool assert)743 static int __rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
744 			      unsigned long id, bool assert)
745 {
746 	struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
747 	unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
748 	u32 mask = BIT(priv->resets[id].reset_bit);
749 	u8 monbit = priv->resets[id].mon_bit;
750 	u32 value = mask << 16;
751 
752 	dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n",
753 		assert ? "assert" : "deassert", id, reg);
754 
755 	if (!assert)
756 		value |= mask;
757 	writel(value, priv->base + reg);
758 
759 	reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
760 	mask = BIT(monbit);
761 
762 	return readl_poll_timeout_atomic(priv->base + reg, value,
763 					 assert ? (value & mask) : !(value & mask),
764 					 10, 200);
765 }
766 
rzv2h_cpg_assert(struct reset_controller_dev * rcdev,unsigned long id)767 static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
768 			    unsigned long id)
769 {
770 	return __rzv2h_cpg_assert(rcdev, id, true);
771 }
772 
rzv2h_cpg_deassert(struct reset_controller_dev * rcdev,unsigned long id)773 static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev,
774 			      unsigned long id)
775 {
776 	return __rzv2h_cpg_assert(rcdev, id, false);
777 }
778 
rzv2h_cpg_reset(struct reset_controller_dev * rcdev,unsigned long id)779 static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev,
780 			   unsigned long id)
781 {
782 	int ret;
783 
784 	ret = rzv2h_cpg_assert(rcdev, id);
785 	if (ret)
786 		return ret;
787 
788 	return rzv2h_cpg_deassert(rcdev, id);
789 }
790 
rzv2h_cpg_status(struct reset_controller_dev * rcdev,unsigned long id)791 static int rzv2h_cpg_status(struct reset_controller_dev *rcdev,
792 			    unsigned long id)
793 {
794 	struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
795 	unsigned int reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
796 	u8 monbit = priv->resets[id].mon_bit;
797 
798 	return !!(readl(priv->base + reg) & BIT(monbit));
799 }
800 
801 static const struct reset_control_ops rzv2h_cpg_reset_ops = {
802 	.reset = rzv2h_cpg_reset,
803 	.assert = rzv2h_cpg_assert,
804 	.deassert = rzv2h_cpg_deassert,
805 	.status = rzv2h_cpg_status,
806 };
807 
rzv2h_cpg_reset_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)808 static int rzv2h_cpg_reset_xlate(struct reset_controller_dev *rcdev,
809 				 const struct of_phandle_args *reset_spec)
810 {
811 	struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
812 	unsigned int id = reset_spec->args[0];
813 	u8 rst_index = id / 16;
814 	u8 rst_bit = id % 16;
815 	unsigned int i;
816 
817 	for (i = 0; i < rcdev->nr_resets; i++) {
818 		if (rst_index == priv->resets[i].reset_index &&
819 		    rst_bit == priv->resets[i].reset_bit)
820 			return i;
821 	}
822 
823 	return -EINVAL;
824 }
825 
rzv2h_cpg_reset_controller_register(struct rzv2h_cpg_priv * priv)826 static int rzv2h_cpg_reset_controller_register(struct rzv2h_cpg_priv *priv)
827 {
828 	priv->rcdev.ops = &rzv2h_cpg_reset_ops;
829 	priv->rcdev.of_node = priv->dev->of_node;
830 	priv->rcdev.dev = priv->dev;
831 	priv->rcdev.of_reset_n_cells = 1;
832 	priv->rcdev.of_xlate = rzv2h_cpg_reset_xlate;
833 	priv->rcdev.nr_resets = priv->num_resets;
834 
835 	return devm_reset_controller_register(priv->dev, &priv->rcdev);
836 }
837 
838 /**
839  * struct rzv2h_cpg_pd - RZ/V2H power domain data structure
840  * @priv: pointer to CPG private data structure
841  * @genpd: generic PM domain
842  */
843 struct rzv2h_cpg_pd {
844 	struct rzv2h_cpg_priv *priv;
845 	struct generic_pm_domain genpd;
846 };
847 
rzv2h_cpg_is_pm_clk(struct rzv2h_cpg_pd * pd,const struct of_phandle_args * clkspec)848 static bool rzv2h_cpg_is_pm_clk(struct rzv2h_cpg_pd *pd,
849 				const struct of_phandle_args *clkspec)
850 {
851 	if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
852 		return false;
853 
854 	switch (clkspec->args[0]) {
855 	case CPG_MOD: {
856 		struct rzv2h_cpg_priv *priv = pd->priv;
857 		unsigned int id = clkspec->args[1];
858 		struct mod_clock *clock;
859 
860 		if (id >= priv->num_mod_clks)
861 			return false;
862 
863 		if (priv->clks[priv->num_core_clks + id] == ERR_PTR(-ENOENT))
864 			return false;
865 
866 		clock = to_mod_clock(__clk_get_hw(priv->clks[priv->num_core_clks + id]));
867 
868 		return !clock->no_pm;
869 	}
870 
871 	case CPG_CORE:
872 	default:
873 		return false;
874 	}
875 }
876 
rzv2h_cpg_attach_dev(struct generic_pm_domain * domain,struct device * dev)877 static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
878 {
879 	struct rzv2h_cpg_pd *pd = container_of(domain, struct rzv2h_cpg_pd, genpd);
880 	struct device_node *np = dev->of_node;
881 	struct of_phandle_args clkspec;
882 	bool once = true;
883 	struct clk *clk;
884 	unsigned int i;
885 	int error;
886 
887 	for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
888 		if (!rzv2h_cpg_is_pm_clk(pd, &clkspec)) {
889 			of_node_put(clkspec.np);
890 			continue;
891 		}
892 
893 		if (once) {
894 			once = false;
895 			error = pm_clk_create(dev);
896 			if (error) {
897 				of_node_put(clkspec.np);
898 				goto err;
899 			}
900 		}
901 		clk = of_clk_get_from_provider(&clkspec);
902 		of_node_put(clkspec.np);
903 		if (IS_ERR(clk)) {
904 			error = PTR_ERR(clk);
905 			goto fail_destroy;
906 		}
907 
908 		error = pm_clk_add_clk(dev, clk);
909 		if (error) {
910 			dev_err(dev, "pm_clk_add_clk failed %d\n",
911 				error);
912 			goto fail_put;
913 		}
914 	}
915 
916 	return 0;
917 
918 fail_put:
919 	clk_put(clk);
920 
921 fail_destroy:
922 	pm_clk_destroy(dev);
923 err:
924 	return error;
925 }
926 
rzv2h_cpg_detach_dev(struct generic_pm_domain * unused,struct device * dev)927 static void rzv2h_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
928 {
929 	if (!pm_clk_no_clocks(dev))
930 		pm_clk_destroy(dev);
931 }
932 
rzv2h_cpg_genpd_remove_simple(void * data)933 static void rzv2h_cpg_genpd_remove_simple(void *data)
934 {
935 	pm_genpd_remove(data);
936 }
937 
rzv2h_cpg_add_pm_domains(struct rzv2h_cpg_priv * priv)938 static int __init rzv2h_cpg_add_pm_domains(struct rzv2h_cpg_priv *priv)
939 {
940 	struct device *dev = priv->dev;
941 	struct device_node *np = dev->of_node;
942 	struct rzv2h_cpg_pd *pd;
943 	int ret;
944 
945 	pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
946 	if (!pd)
947 		return -ENOMEM;
948 
949 	pd->genpd.name = np->name;
950 	pd->priv = priv;
951 	pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
952 	pd->genpd.attach_dev = rzv2h_cpg_attach_dev;
953 	pd->genpd.detach_dev = rzv2h_cpg_detach_dev;
954 	ret = pm_genpd_init(&pd->genpd, &pm_domain_always_on_gov, false);
955 	if (ret)
956 		return ret;
957 
958 	ret = devm_add_action_or_reset(dev, rzv2h_cpg_genpd_remove_simple, &pd->genpd);
959 	if (ret)
960 		return ret;
961 
962 	return of_genpd_add_provider_simple(np, &pd->genpd);
963 }
964 
rzv2h_cpg_del_clk_provider(void * data)965 static void rzv2h_cpg_del_clk_provider(void *data)
966 {
967 	of_clk_del_provider(data);
968 }
969 
rzv2h_cpg_probe(struct platform_device * pdev)970 static int __init rzv2h_cpg_probe(struct platform_device *pdev)
971 {
972 	struct device *dev = &pdev->dev;
973 	struct device_node *np = dev->of_node;
974 	const struct rzv2h_cpg_info *info;
975 	struct rzv2h_cpg_priv *priv;
976 	unsigned int nclks, i;
977 	struct clk **clks;
978 	int error;
979 
980 	info = of_device_get_match_data(dev);
981 
982 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
983 	if (!priv)
984 		return -ENOMEM;
985 
986 	spin_lock_init(&priv->rmw_lock);
987 
988 	priv->dev = dev;
989 
990 	priv->base = devm_platform_ioremap_resource(pdev, 0);
991 	if (IS_ERR(priv->base))
992 		return PTR_ERR(priv->base);
993 
994 	nclks = info->num_total_core_clks + info->num_hw_mod_clks;
995 	clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
996 	if (!clks)
997 		return -ENOMEM;
998 
999 	priv->mstop_count = devm_kcalloc(dev, info->num_mstop_bits,
1000 					 sizeof(*priv->mstop_count), GFP_KERNEL);
1001 	if (!priv->mstop_count)
1002 		return -ENOMEM;
1003 
1004 	/* Adjust for CPG_BUS_m_MSTOP starting from m = 1 */
1005 	priv->mstop_count -= 16;
1006 
1007 	priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) *
1008 				    info->num_resets, GFP_KERNEL);
1009 	if (!priv->resets)
1010 		return -ENOMEM;
1011 
1012 	dev_set_drvdata(dev, priv);
1013 	priv->clks = clks;
1014 	priv->num_core_clks = info->num_total_core_clks;
1015 	priv->num_mod_clks = info->num_hw_mod_clks;
1016 	priv->last_dt_core_clk = info->last_dt_core_clk;
1017 	priv->num_resets = info->num_resets;
1018 
1019 	for (i = 0; i < nclks; i++)
1020 		clks[i] = ERR_PTR(-ENOENT);
1021 
1022 	for (i = 0; i < info->num_core_clks; i++)
1023 		rzv2h_cpg_register_core_clk(&info->core_clks[i], priv);
1024 
1025 	for (i = 0; i < info->num_mod_clks; i++)
1026 		rzv2h_cpg_register_mod_clk(&info->mod_clks[i], priv);
1027 
1028 	error = of_clk_add_provider(np, rzv2h_cpg_clk_src_twocell_get, priv);
1029 	if (error)
1030 		return error;
1031 
1032 	error = devm_add_action_or_reset(dev, rzv2h_cpg_del_clk_provider, np);
1033 	if (error)
1034 		return error;
1035 
1036 	error = rzv2h_cpg_add_pm_domains(priv);
1037 	if (error)
1038 		return error;
1039 
1040 	error = rzv2h_cpg_reset_controller_register(priv);
1041 	if (error)
1042 		return error;
1043 
1044 	return 0;
1045 }
1046 
1047 static const struct of_device_id rzv2h_cpg_match[] = {
1048 #ifdef CONFIG_CLK_R9A09G047
1049 	{
1050 		.compatible = "renesas,r9a09g047-cpg",
1051 		.data = &r9a09g047_cpg_info,
1052 	},
1053 #endif
1054 #ifdef CONFIG_CLK_R9A09G056
1055 	{
1056 		.compatible = "renesas,r9a09g056-cpg",
1057 		.data = &r9a09g056_cpg_info,
1058 	},
1059 #endif
1060 #ifdef CONFIG_CLK_R9A09G057
1061 	{
1062 		.compatible = "renesas,r9a09g057-cpg",
1063 		.data = &r9a09g057_cpg_info,
1064 	},
1065 #endif
1066 	{ /* sentinel */ }
1067 };
1068 
1069 static struct platform_driver rzv2h_cpg_driver = {
1070 	.driver		= {
1071 		.name	= "rzv2h-cpg",
1072 		.of_match_table = rzv2h_cpg_match,
1073 	},
1074 };
1075 
rzv2h_cpg_init(void)1076 static int __init rzv2h_cpg_init(void)
1077 {
1078 	return platform_driver_probe(&rzv2h_cpg_driver, rzv2h_cpg_probe);
1079 }
1080 
1081 subsys_initcall(rzv2h_cpg_init);
1082 
1083 MODULE_DESCRIPTION("Renesas RZ/V2H CPG Driver");
1084