xref: /linux/drivers/clk/renesas/rzv2h-cpg.c (revision ba65a4e7120a616d9c592750d9147f6dcafedffa)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Renesas RZ/V2H(P) Clock Pulse Generator
4  *
5  * Copyright (C) 2024 Renesas Electronics Corp.
6  *
7  * Based on rzg2l-cpg.c
8  *
9  * Copyright (C) 2015 Glider bvba
10  * Copyright (C) 2013 Ideas On Board SPRL
11  * Copyright (C) 2015 Renesas Electronics Corp.
12  */
13 
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/clk/renesas.h>
18 #include <linux/delay.h>
19 #include <linux/init.h>
20 #include <linux/iopoll.h>
21 #include <linux/limits.h>
22 #include <linux/math.h>
23 #include <linux/math64.h>
24 #include <linux/minmax.h>
25 #include <linux/mod_devicetable.h>
26 #include <linux/module.h>
27 #include <linux/of.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_clock.h>
30 #include <linux/pm_domain.h>
31 #include <linux/refcount.h>
32 #include <linux/reset-controller.h>
33 #include <linux/string_choices.h>
34 #include <linux/units.h>
35 
36 #include <dt-bindings/clock/renesas-cpg-mssr.h>
37 
38 #include "rzv2h-cpg.h"
39 
40 #ifdef DEBUG
41 #define WARN_DEBUG(x)		WARN_ON(x)
42 #else
43 #define WARN_DEBUG(x)		do { } while (0)
44 #endif
45 
46 #define GET_CLK_ON_OFFSET(x)	(0x600 + ((x) * 4))
47 #define GET_CLK_MON_OFFSET(x)	(0x800 + ((x) * 4))
48 #define GET_RST_OFFSET(x)	(0x900 + ((x) * 4))
49 #define GET_RST_MON_OFFSET(x)	(0xA00 + ((x) * 4))
50 
51 #define CPG_BUS_1_MSTOP		(0xd00)
52 #define CPG_BUS_MSTOP(m)	(CPG_BUS_1_MSTOP + ((m) - 1) * 4)
53 
54 #define CPG_PLL_STBY(x)		((x))
55 #define CPG_PLL_STBY_RESETB	BIT(0)
56 #define CPG_PLL_STBY_SSC_EN	BIT(2)
57 #define CPG_PLL_STBY_RESETB_WEN	BIT(16)
58 #define CPG_PLL_STBY_SSC_EN_WEN BIT(18)
59 #define CPG_PLL_CLK1(x)		((x) + 0x004)
60 #define CPG_PLL_CLK1_KDIV	GENMASK(31, 16)
61 #define CPG_PLL_CLK1_MDIV	GENMASK(15, 6)
62 #define CPG_PLL_CLK1_PDIV	GENMASK(5, 0)
63 #define CPG_PLL_CLK2(x)		((x) + 0x008)
64 #define CPG_PLL_CLK2_SDIV	GENMASK(2, 0)
65 #define CPG_PLL_MON(x)		((x) + 0x010)
66 #define CPG_PLL_MON_RESETB	BIT(0)
67 #define CPG_PLL_MON_LOCK	BIT(4)
68 
69 #define DDIV_DIVCTL_WEN(shift)		BIT((shift) + 16)
70 
71 #define GET_MOD_CLK_ID(base, index, bit)		\
72 			((base) + ((((index) * (16))) + (bit)))
73 
74 #define CPG_CLKSTATUS0		(0x700)
75 
76 /* On RZ/G3E SoC we have two DSI PLLs */
77 #define MAX_CPG_DSI_PLL		2
78 
79 /**
80  * struct rzv2h_pll_dsi_info - PLL DSI information, holds the limits and parameters
81  *
82  * @pll_dsi_limits: PLL DSI parameters limits
83  * @pll_dsi_parameters: Calculated PLL DSI parameters
84  * @req_pll_dsi_rate: Requested PLL DSI rate
85  */
86 struct rzv2h_pll_dsi_info {
87 	const struct rzv2h_pll_limits *pll_dsi_limits;
88 	struct rzv2h_pll_div_pars pll_dsi_parameters;
89 	unsigned long req_pll_dsi_rate;
90 };
91 
92 /**
93  * struct rzv2h_cpg_priv - Clock Pulse Generator Private Data
94  *
95  * @dev: CPG device
96  * @base: CPG register block base address
97  * @rmw_lock: protects register accesses
98  * @clks: Array containing all Core and Module Clocks
99  * @num_core_clks: Number of Core Clocks in clks[]
100  * @num_mod_clks: Number of Module Clocks in clks[]
101  * @resets: Array of resets
102  * @num_resets: Number of Module Resets in info->resets[]
103  * @last_dt_core_clk: ID of the last Core Clock exported to DT
104  * @ff_mod_status_ops: Fixed Factor Module Status Clock operations
105  * @mstop_count: Array of mstop values
106  * @rcdev: Reset controller entity
107  * @pll_dsi_info: Array of PLL DSI information, holds the limits and parameters
108  */
109 struct rzv2h_cpg_priv {
110 	struct device *dev;
111 	void __iomem *base;
112 	spinlock_t rmw_lock;
113 
114 	struct clk **clks;
115 	unsigned int num_core_clks;
116 	unsigned int num_mod_clks;
117 	struct rzv2h_reset *resets;
118 	unsigned int num_resets;
119 	unsigned int last_dt_core_clk;
120 
121 	struct clk_ops *ff_mod_status_ops;
122 
123 	atomic_t *mstop_count;
124 
125 	struct reset_controller_dev rcdev;
126 
127 	struct rzv2h_pll_dsi_info pll_dsi_info[MAX_CPG_DSI_PLL];
128 };
129 
130 #define rcdev_to_priv(x)	container_of(x, struct rzv2h_cpg_priv, rcdev)
131 
132 struct pll_clk {
133 	struct rzv2h_cpg_priv *priv;
134 	struct clk_hw hw;
135 	struct pll pll;
136 };
137 
138 #define to_pll(_hw)	container_of(_hw, struct pll_clk, hw)
139 
140 /**
141  * struct mod_clock - Module clock
142  *
143  * @priv: CPG private data
144  * @mstop_data: mstop data relating to module clock
145  * @hw: handle between common and hardware-specific interfaces
146  * @no_pm: flag to indicate PM is not supported
147  * @on_index: register offset
148  * @on_bit: ON/MON bit
149  * @mon_index: monitor register offset
150  * @mon_bit: monitor bit
151  * @ext_clk_mux_index: mux index for external clock source, or -1 if internal
152  */
153 struct mod_clock {
154 	struct rzv2h_cpg_priv *priv;
155 	unsigned int mstop_data;
156 	struct clk_hw hw;
157 	bool no_pm;
158 	u8 on_index;
159 	u8 on_bit;
160 	s8 mon_index;
161 	u8 mon_bit;
162 	s8 ext_clk_mux_index;
163 };
164 
165 #define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw)
166 
167 /**
168  * struct ddiv_clk - DDIV clock
169  *
170  * @priv: CPG private data
171  * @div: divider clk
172  * @mon: monitor bit in CPG_CLKSTATUS0 register
173  */
174 struct ddiv_clk {
175 	struct rzv2h_cpg_priv *priv;
176 	struct clk_divider div;
177 	u8 mon;
178 };
179 
180 #define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div)
181 
182 /**
183  * struct rzv2h_ff_mod_status_clk - Fixed Factor Module Status Clock
184  *
185  * @priv: CPG private data
186  * @conf: fixed mod configuration
187  * @fix: fixed factor clock
188  */
189 struct rzv2h_ff_mod_status_clk {
190 	struct rzv2h_cpg_priv *priv;
191 	struct fixed_mod_conf conf;
192 	struct clk_fixed_factor fix;
193 };
194 
195 #define to_rzv2h_ff_mod_status_clk(_hw) \
196 	container_of(_hw, struct rzv2h_ff_mod_status_clk, fix.hw)
197 
198 /**
199  * struct rzv2h_plldsi_div_clk - PLL DSI DDIV clock
200  *
201  * @dtable: divider table
202  * @priv: CPG private data
203  * @hw: divider clk
204  * @ddiv: divider configuration
205  */
206 struct rzv2h_plldsi_div_clk {
207 	const struct clk_div_table *dtable;
208 	struct rzv2h_cpg_priv *priv;
209 	struct clk_hw hw;
210 	struct ddiv ddiv;
211 };
212 
213 #define to_plldsi_div_clk(_hw) \
214 	container_of(_hw, struct rzv2h_plldsi_div_clk, hw)
215 
216 #define RZ_V2H_OSC_CLK_IN_MEGA		(24 * MEGA)
217 #define RZV2H_MAX_DIV_TABLES		(16)
218 
219 /**
220  * rzv2h_get_pll_pars - Finds the best combination of PLL parameters
221  * for a given frequency.
222  *
223  * @limits: Pointer to the structure containing the limits for the PLL parameters
224  * @pars: Pointer to the structure where the best calculated PLL parameters values
225  * will be stored
226  * @freq_millihz: Target output frequency in millihertz
227  *
228  * This function calculates the best set of PLL parameters (M, K, P, S) to achieve
229  * the desired frequency.
230  * There is no direct formula to calculate the PLL parameters, as it's an open
231  * system of equations, therefore this function uses an iterative approach to
232  * determine the best solution. The best solution is one that minimizes the error
233  * (desired frequency - actual frequency).
234  *
235  * Return: true if a valid set of parameters values is found, false otherwise.
236  */
rzv2h_get_pll_pars(const struct rzv2h_pll_limits * limits,struct rzv2h_pll_pars * pars,u64 freq_millihz)237 bool rzv2h_get_pll_pars(const struct rzv2h_pll_limits *limits,
238 			struct rzv2h_pll_pars *pars, u64 freq_millihz)
239 {
240 	u64 fout_min_millihz = mul_u32_u32(limits->fout.min, MILLI);
241 	u64 fout_max_millihz = mul_u32_u32(limits->fout.max, MILLI);
242 	struct rzv2h_pll_pars p, best;
243 
244 	if (freq_millihz > fout_max_millihz ||
245 	    freq_millihz < fout_min_millihz)
246 		return false;
247 
248 	/* Initialize best error to maximum possible value */
249 	best.error_millihz = S64_MAX;
250 
251 	for (p.p = limits->p.min; p.p <= limits->p.max; p.p++) {
252 		u32 fref = RZ_V2H_OSC_CLK_IN_MEGA / p.p;
253 		u16 divider;
254 
255 		for (divider = 1 << limits->s.min, p.s = limits->s.min;
256 			p.s <= limits->s.max; p.s++, divider <<= 1) {
257 			for (p.m = limits->m.min; p.m <= limits->m.max; p.m++) {
258 				u64 output_m, output_k_range;
259 				s64 pll_k, output_k;
260 				u64 fvco, output;
261 
262 				/*
263 				 * The frequency generated by the PLL + divider
264 				 * is calculated as follows:
265 				 *
266 				 * With:
267 				 * Freq = Ffout = Ffvco / 2^(pll_s)
268 				 * Ffvco = (pll_m + (pll_k / 65536)) * Ffref
269 				 * Ffref = 24MHz / pll_p
270 				 *
271 				 * Freq can also be rewritten as:
272 				 * Freq = Ffvco / 2^(pll_s)
273 				 *      = ((pll_m + (pll_k / 65536)) * Ffref) / 2^(pll_s)
274 				 *      = (pll_m * Ffref) / 2^(pll_s) + ((pll_k / 65536) * Ffref) / 2^(pll_s)
275 				 *      = output_m + output_k
276 				 *
277 				 * Every parameter has been determined at this
278 				 * point, but pll_k.
279 				 *
280 				 * Considering that:
281 				 * limits->k.min <= pll_k <= limits->k.max
282 				 * Then:
283 				 * -0.5 <= (pll_k / 65536) < 0.5
284 				 * Therefore:
285 				 * -Ffref / (2 * 2^(pll_s)) <= output_k < Ffref / (2 * 2^(pll_s))
286 				 */
287 
288 				/* Compute output M component (in mHz) */
289 				output_m = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(p.m, fref) * MILLI,
290 								 divider);
291 				/* Compute range for output K (in mHz) */
292 				output_k_range = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(fref, MILLI),
293 								       2 * divider);
294 				/*
295 				 * No point in continuing if we can't achieve
296 				 * the desired frequency
297 				 */
298 				if (freq_millihz <  (output_m - output_k_range) ||
299 				    freq_millihz >= (output_m + output_k_range)) {
300 					continue;
301 				}
302 
303 				/*
304 				 * Compute the K component
305 				 *
306 				 * Since:
307 				 * Freq = output_m + output_k
308 				 * Then:
309 				 * output_k = Freq - output_m
310 				 *          = ((pll_k / 65536) * Ffref) / 2^(pll_s)
311 				 * Therefore:
312 				 * pll_k = (output_k * 65536 * 2^(pll_s)) / Ffref
313 				 */
314 				output_k = freq_millihz - output_m;
315 				pll_k = div_s64(output_k * 65536ULL * divider,
316 						fref);
317 				pll_k = DIV_S64_ROUND_CLOSEST(pll_k, MILLI);
318 
319 				/* Validate K value within allowed limits */
320 				if (pll_k < limits->k.min ||
321 				    pll_k > limits->k.max)
322 					continue;
323 
324 				p.k = pll_k;
325 
326 				/* Compute (Ffvco * 65536) */
327 				fvco = mul_u32_u32(p.m * 65536 + p.k, fref);
328 				if (fvco < mul_u32_u32(limits->fvco.min, 65536) ||
329 				    fvco > mul_u32_u32(limits->fvco.max, 65536))
330 					continue;
331 
332 				/* PLL_M component of (output * 65536 * PLL_P) */
333 				output = mul_u32_u32(p.m * 65536, RZ_V2H_OSC_CLK_IN_MEGA);
334 				/* PLL_K component of (output * 65536 * PLL_P) */
335 				output += p.k * RZ_V2H_OSC_CLK_IN_MEGA;
336 				/* Make it in mHz */
337 				output *= MILLI;
338 				output = DIV_U64_ROUND_CLOSEST(output, 65536 * p.p * divider);
339 
340 				/* Check output frequency against limits */
341 				if (output < fout_min_millihz ||
342 				    output > fout_max_millihz)
343 					continue;
344 
345 				p.error_millihz = freq_millihz - output;
346 				p.freq_millihz = output;
347 
348 				/* If an exact match is found, return immediately */
349 				if (p.error_millihz == 0) {
350 					*pars = p;
351 					return true;
352 				}
353 
354 				/* Update best match if error is smaller */
355 				if (abs(best.error_millihz) > abs(p.error_millihz))
356 					best = p;
357 			}
358 		}
359 	}
360 
361 	/* If no valid parameters were found, return false */
362 	if (best.error_millihz == S64_MAX)
363 		return false;
364 
365 	*pars = best;
366 	return true;
367 }
368 EXPORT_SYMBOL_NS_GPL(rzv2h_get_pll_pars, "RZV2H_CPG");
369 
370 /*
371  * rzv2h_get_pll_divs_pars - Finds the best combination of PLL parameters
372  * and divider value for a given frequency.
373  *
374  * @limits: Pointer to the structure containing the limits for the PLL parameters
375  * @pars: Pointer to the structure where the best calculated PLL parameters and
376  * divider values will be stored
377  * @table: Pointer to the array of valid divider values
378  * @table_size: Size of the divider values array
379  * @freq_millihz: Target output frequency in millihertz
380  *
381  * This function calculates the best set of PLL parameters (M, K, P, S) and divider
382  * value to achieve the desired frequency. See rzv2h_get_pll_pars() for more details
383  * on how the PLL parameters are calculated.
384  *
385  * freq_millihz is the desired frequency generated by the PLL followed by a
386  * a gear.
387  */
rzv2h_get_pll_divs_pars(const struct rzv2h_pll_limits * limits,struct rzv2h_pll_div_pars * pars,const u8 * table,u8 table_size,u64 freq_millihz)388 bool rzv2h_get_pll_divs_pars(const struct rzv2h_pll_limits *limits,
389 			     struct rzv2h_pll_div_pars *pars,
390 			     const u8 *table, u8 table_size, u64 freq_millihz)
391 {
392 	struct rzv2h_pll_div_pars p, best;
393 
394 	best.div.error_millihz = S64_MAX;
395 	p.div.error_millihz = S64_MAX;
396 	for (unsigned int i = 0; i < table_size; i++) {
397 		if (!rzv2h_get_pll_pars(limits, &p.pll, freq_millihz * table[i]))
398 			continue;
399 
400 		p.div.divider_value = table[i];
401 		p.div.freq_millihz = DIV_U64_ROUND_CLOSEST(p.pll.freq_millihz, table[i]);
402 		p.div.error_millihz = freq_millihz - p.div.freq_millihz;
403 
404 		if (p.div.error_millihz == 0) {
405 			*pars = p;
406 			return true;
407 		}
408 
409 		if (abs(best.div.error_millihz) > abs(p.div.error_millihz))
410 			best = p;
411 	}
412 
413 	if (best.div.error_millihz == S64_MAX)
414 		return false;
415 
416 	*pars = best;
417 	return true;
418 }
419 EXPORT_SYMBOL_NS_GPL(rzv2h_get_pll_divs_pars, "RZV2H_CPG");
420 
rzv2h_cpg_plldsi_div_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)421 static unsigned long rzv2h_cpg_plldsi_div_recalc_rate(struct clk_hw *hw,
422 						      unsigned long parent_rate)
423 {
424 	struct rzv2h_plldsi_div_clk *dsi_div = to_plldsi_div_clk(hw);
425 	struct rzv2h_cpg_priv *priv = dsi_div->priv;
426 	struct ddiv ddiv = dsi_div->ddiv;
427 	u32 div;
428 
429 	div = readl(priv->base + ddiv.offset);
430 	div >>= ddiv.shift;
431 	div &= clk_div_mask(ddiv.width);
432 	div = dsi_div->dtable[div].div;
433 
434 	return DIV_ROUND_CLOSEST_ULL(parent_rate, div);
435 }
436 
rzv2h_cpg_plldsi_div_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)437 static int rzv2h_cpg_plldsi_div_determine_rate(struct clk_hw *hw,
438 					       struct clk_rate_request *req)
439 {
440 	struct rzv2h_plldsi_div_clk *dsi_div = to_plldsi_div_clk(hw);
441 	struct pll_clk *pll_clk = to_pll(clk_hw_get_parent(hw));
442 	struct rzv2h_cpg_priv *priv = dsi_div->priv;
443 	u8 table[RZV2H_MAX_DIV_TABLES] = { 0 };
444 	struct rzv2h_pll_div_pars *dsi_params;
445 	struct rzv2h_pll_dsi_info *dsi_info;
446 	const struct clk_div_table *div;
447 	unsigned int i = 0;
448 	u64 rate_millihz;
449 
450 	dsi_info = &priv->pll_dsi_info[pll_clk->pll.instance];
451 	dsi_params = &dsi_info->pll_dsi_parameters;
452 
453 	rate_millihz = mul_u32_u32(req->rate, MILLI);
454 	if (rate_millihz == dsi_params->div.error_millihz + dsi_params->div.freq_millihz)
455 		goto exit_determine_rate;
456 
457 	for (div = dsi_div->dtable; div->div; div++) {
458 		if (i >= RZV2H_MAX_DIV_TABLES)
459 			return -EINVAL;
460 		table[i++] = div->div;
461 	}
462 
463 	if (!rzv2h_get_pll_divs_pars(dsi_info->pll_dsi_limits, dsi_params, table, i,
464 				     rate_millihz)) {
465 		dev_err(priv->dev, "failed to determine rate for req->rate: %lu\n",
466 			req->rate);
467 		return -EINVAL;
468 	}
469 
470 exit_determine_rate:
471 	req->rate = DIV_ROUND_CLOSEST_ULL(dsi_params->div.freq_millihz, MILLI);
472 	req->best_parent_rate = req->rate * dsi_params->div.divider_value;
473 	dsi_info->req_pll_dsi_rate = req->best_parent_rate;
474 
475 	return 0;
476 }
477 
rzv2h_cpg_plldsi_div_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)478 static int rzv2h_cpg_plldsi_div_set_rate(struct clk_hw *hw,
479 					 unsigned long rate,
480 					 unsigned long parent_rate)
481 {
482 	struct rzv2h_plldsi_div_clk *dsi_div = to_plldsi_div_clk(hw);
483 	struct pll_clk *pll_clk = to_pll(clk_hw_get_parent(hw));
484 	struct rzv2h_cpg_priv *priv = dsi_div->priv;
485 	struct rzv2h_pll_div_pars *dsi_params;
486 	struct rzv2h_pll_dsi_info *dsi_info;
487 	struct ddiv ddiv = dsi_div->ddiv;
488 	const struct clk_div_table *clkt;
489 	bool divider_found = false;
490 	u32 val, shift;
491 
492 	dsi_info = &priv->pll_dsi_info[pll_clk->pll.instance];
493 	dsi_params = &dsi_info->pll_dsi_parameters;
494 
495 	for (clkt = dsi_div->dtable; clkt->div; clkt++) {
496 		if (clkt->div == dsi_params->div.divider_value) {
497 			divider_found = true;
498 			break;
499 		}
500 	}
501 
502 	if (!divider_found)
503 		return -EINVAL;
504 
505 	shift = ddiv.shift;
506 	val = readl(priv->base + ddiv.offset) | DDIV_DIVCTL_WEN(shift);
507 	val &= ~(clk_div_mask(ddiv.width) << shift);
508 	val |= clkt->val << shift;
509 	writel(val, priv->base + ddiv.offset);
510 
511 	return 0;
512 }
513 
514 static const struct clk_ops rzv2h_cpg_plldsi_div_ops = {
515 	.recalc_rate = rzv2h_cpg_plldsi_div_recalc_rate,
516 	.determine_rate = rzv2h_cpg_plldsi_div_determine_rate,
517 	.set_rate = rzv2h_cpg_plldsi_div_set_rate,
518 };
519 
520 static struct clk * __init
rzv2h_cpg_plldsi_div_clk_register(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv)521 rzv2h_cpg_plldsi_div_clk_register(const struct cpg_core_clk *core,
522 				  struct rzv2h_cpg_priv *priv)
523 {
524 	struct rzv2h_plldsi_div_clk *clk_hw_data;
525 	struct clk **clks = priv->clks;
526 	struct clk_init_data init;
527 	const struct clk *parent;
528 	const char *parent_name;
529 	struct clk_hw *clk_hw;
530 	int ret;
531 
532 	parent = clks[core->parent];
533 	if (IS_ERR(parent))
534 		return ERR_CAST(parent);
535 
536 	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
537 	if (!clk_hw_data)
538 		return ERR_PTR(-ENOMEM);
539 
540 	clk_hw_data->priv = priv;
541 	clk_hw_data->ddiv = core->cfg.ddiv;
542 	clk_hw_data->dtable = core->dtable;
543 
544 	parent_name = __clk_get_name(parent);
545 	init.name = core->name;
546 	init.ops = &rzv2h_cpg_plldsi_div_ops;
547 	init.flags = core->flag;
548 	init.parent_names = &parent_name;
549 	init.num_parents = 1;
550 
551 	clk_hw = &clk_hw_data->hw;
552 	clk_hw->init = &init;
553 
554 	ret = devm_clk_hw_register(priv->dev, clk_hw);
555 	if (ret)
556 		return ERR_PTR(ret);
557 
558 	return clk_hw->clk;
559 }
560 
rzv2h_cpg_plldsi_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)561 static int rzv2h_cpg_plldsi_determine_rate(struct clk_hw *hw,
562 					   struct clk_rate_request *req)
563 {
564 	struct pll_clk *pll_clk = to_pll(hw);
565 	struct rzv2h_cpg_priv *priv = pll_clk->priv;
566 	struct rzv2h_pll_dsi_info *dsi_info;
567 	u64 rate_millihz;
568 
569 	dsi_info = &priv->pll_dsi_info[pll_clk->pll.instance];
570 	/* check if the divider has already invoked the algorithm */
571 	if (req->rate == dsi_info->req_pll_dsi_rate)
572 		return 0;
573 
574 	/* If the req->rate doesn't match we do the calculation assuming there is no divider */
575 	rate_millihz = mul_u32_u32(req->rate, MILLI);
576 	if (!rzv2h_get_pll_pars(dsi_info->pll_dsi_limits,
577 				&dsi_info->pll_dsi_parameters.pll, rate_millihz)) {
578 		dev_err(priv->dev,
579 			"failed to determine rate for req->rate: %lu\n",
580 			req->rate);
581 		return -EINVAL;
582 	}
583 
584 	req->rate = DIV_ROUND_CLOSEST_ULL(dsi_info->pll_dsi_parameters.pll.freq_millihz, MILLI);
585 	dsi_info->req_pll_dsi_rate = req->rate;
586 
587 	return 0;
588 }
589 
rzv2h_cpg_pll_set_rate(struct pll_clk * pll_clk,struct rzv2h_pll_pars * params,bool ssc_disable)590 static int rzv2h_cpg_pll_set_rate(struct pll_clk *pll_clk,
591 				  struct rzv2h_pll_pars *params,
592 				  bool ssc_disable)
593 {
594 	struct rzv2h_cpg_priv *priv = pll_clk->priv;
595 	u16 offset = pll_clk->pll.offset;
596 	u32 val;
597 	int ret;
598 
599 	/* Put PLL into standby mode */
600 	writel(CPG_PLL_STBY_RESETB_WEN, priv->base + CPG_PLL_STBY(offset));
601 	ret = readl_poll_timeout_atomic(priv->base + CPG_PLL_MON(offset),
602 					val, !(val & CPG_PLL_MON_LOCK),
603 					100, 2000);
604 	if (ret) {
605 		dev_err(priv->dev, "Failed to put PLLDSI into standby mode");
606 		return ret;
607 	}
608 
609 	/* Output clock setting 1 */
610 	writel(FIELD_PREP(CPG_PLL_CLK1_KDIV, (u16)params->k) |
611 	       FIELD_PREP(CPG_PLL_CLK1_MDIV, params->m) |
612 	       FIELD_PREP(CPG_PLL_CLK1_PDIV, params->p),
613 	       priv->base + CPG_PLL_CLK1(offset));
614 
615 	/* Output clock setting 2 */
616 	val = readl(priv->base + CPG_PLL_CLK2(offset));
617 	writel((val & ~CPG_PLL_CLK2_SDIV) | FIELD_PREP(CPG_PLL_CLK2_SDIV, params->s),
618 	       priv->base + CPG_PLL_CLK2(offset));
619 
620 	/* Put PLL to normal mode */
621 	if (ssc_disable)
622 		val = CPG_PLL_STBY_SSC_EN_WEN;
623 	else
624 		val = CPG_PLL_STBY_SSC_EN_WEN | CPG_PLL_STBY_SSC_EN;
625 	writel(val | CPG_PLL_STBY_RESETB_WEN | CPG_PLL_STBY_RESETB,
626 	       priv->base + CPG_PLL_STBY(offset));
627 
628 	/* PLL normal mode transition, output clock stability check */
629 	ret = readl_poll_timeout_atomic(priv->base + CPG_PLL_MON(offset),
630 					val, (val & CPG_PLL_MON_LOCK),
631 					100, 2000);
632 	if (ret) {
633 		dev_err(priv->dev, "Failed to put PLLDSI into normal mode");
634 		return ret;
635 	}
636 
637 	return 0;
638 }
639 
rzv2h_cpg_plldsi_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)640 static int rzv2h_cpg_plldsi_set_rate(struct clk_hw *hw, unsigned long rate,
641 				     unsigned long parent_rate)
642 {
643 	struct pll_clk *pll_clk = to_pll(hw);
644 	struct rzv2h_pll_dsi_info *dsi_info;
645 	struct rzv2h_cpg_priv *priv = pll_clk->priv;
646 
647 	dsi_info = &priv->pll_dsi_info[pll_clk->pll.instance];
648 
649 	return rzv2h_cpg_pll_set_rate(pll_clk, &dsi_info->pll_dsi_parameters.pll, true);
650 }
651 
rzv2h_cpg_pll_clk_is_enabled(struct clk_hw * hw)652 static int rzv2h_cpg_pll_clk_is_enabled(struct clk_hw *hw)
653 {
654 	struct pll_clk *pll_clk = to_pll(hw);
655 	struct rzv2h_cpg_priv *priv = pll_clk->priv;
656 	u32 val = readl(priv->base + CPG_PLL_MON(pll_clk->pll.offset));
657 
658 	/* Ensure both RESETB and LOCK bits are set */
659 	return (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) ==
660 	       (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK);
661 }
662 
rzv2h_cpg_pll_clk_enable(struct clk_hw * hw)663 static int rzv2h_cpg_pll_clk_enable(struct clk_hw *hw)
664 {
665 	struct pll_clk *pll_clk = to_pll(hw);
666 	struct rzv2h_cpg_priv *priv = pll_clk->priv;
667 	struct pll pll = pll_clk->pll;
668 	u32 stby_offset;
669 	u32 mon_offset;
670 	u32 val;
671 	int ret;
672 
673 	if (rzv2h_cpg_pll_clk_is_enabled(hw))
674 		return 0;
675 
676 	stby_offset = CPG_PLL_STBY(pll.offset);
677 	mon_offset = CPG_PLL_MON(pll.offset);
678 
679 	writel(CPG_PLL_STBY_RESETB_WEN | CPG_PLL_STBY_RESETB,
680 	       priv->base + stby_offset);
681 
682 	/*
683 	 * Ensure PLL enters into normal mode
684 	 *
685 	 * Note: There is no HW information about the worst case latency.
686 	 *
687 	 * Since this latency might depend on external crystal or PLL rate,
688 	 * use a "super" safe timeout value.
689 	 */
690 	ret = readl_poll_timeout_atomic(priv->base + mon_offset, val,
691 			(val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) ==
692 			(CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK), 200, 2000);
693 	if (ret)
694 		dev_err(priv->dev, "Failed to enable PLL 0x%x/%pC\n",
695 			stby_offset, hw->clk);
696 
697 	return ret;
698 }
699 
rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)700 static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
701 						   unsigned long parent_rate)
702 {
703 	struct pll_clk *pll_clk = to_pll(hw);
704 	struct rzv2h_cpg_priv *priv = pll_clk->priv;
705 	struct pll pll = pll_clk->pll;
706 	unsigned int clk1, clk2;
707 	u64 rate;
708 
709 	if (!pll.has_clkn)
710 		return 0;
711 
712 	clk1 = readl(priv->base + CPG_PLL_CLK1(pll.offset));
713 	clk2 = readl(priv->base + CPG_PLL_CLK2(pll.offset));
714 
715 	rate = mul_u64_u32_shr(parent_rate, (FIELD_GET(CPG_PLL_CLK1_MDIV, clk1) << 16) +
716 			       (s16)FIELD_GET(CPG_PLL_CLK1_KDIV, clk1),
717 			       16 + FIELD_GET(CPG_PLL_CLK2_SDIV, clk2));
718 
719 	return DIV_ROUND_CLOSEST_ULL(rate, FIELD_GET(CPG_PLL_CLK1_PDIV, clk1));
720 }
721 
722 static const struct clk_ops rzv2h_cpg_plldsi_ops = {
723 	.recalc_rate = rzv2h_cpg_pll_clk_recalc_rate,
724 	.determine_rate = rzv2h_cpg_plldsi_determine_rate,
725 	.set_rate = rzv2h_cpg_plldsi_set_rate,
726 };
727 
728 static const struct clk_ops rzv2h_cpg_pll_ops = {
729 	.is_enabled = rzv2h_cpg_pll_clk_is_enabled,
730 	.enable = rzv2h_cpg_pll_clk_enable,
731 	.recalc_rate = rzv2h_cpg_pll_clk_recalc_rate,
732 };
733 
734 static struct clk * __init
rzv2h_cpg_pll_clk_register(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv,const struct clk_ops * ops)735 rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
736 			   struct rzv2h_cpg_priv *priv,
737 			   const struct clk_ops *ops)
738 {
739 	struct device *dev = priv->dev;
740 	struct clk_init_data init;
741 	const struct clk *parent;
742 	const char *parent_name;
743 	struct pll_clk *pll_clk;
744 	int ret;
745 
746 	parent = priv->clks[core->parent];
747 	if (IS_ERR(parent))
748 		return ERR_CAST(parent);
749 
750 	pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
751 	if (!pll_clk)
752 		return ERR_PTR(-ENOMEM);
753 
754 	if (core->type == CLK_TYPE_PLLDSI)
755 		priv->pll_dsi_info[core->cfg.pll.instance].pll_dsi_limits =
756 			core->cfg.pll.limits;
757 
758 	parent_name = __clk_get_name(parent);
759 	init.name = core->name;
760 	init.ops = ops;
761 	init.flags = 0;
762 	init.parent_names = &parent_name;
763 	init.num_parents = 1;
764 
765 	pll_clk->hw.init = &init;
766 	pll_clk->pll = core->cfg.pll;
767 	pll_clk->priv = priv;
768 
769 	ret = devm_clk_hw_register(dev, &pll_clk->hw);
770 	if (ret)
771 		return ERR_PTR(ret);
772 
773 	return pll_clk->hw.clk;
774 }
775 
rzv2h_ddiv_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)776 static unsigned long rzv2h_ddiv_recalc_rate(struct clk_hw *hw,
777 					    unsigned long parent_rate)
778 {
779 	struct clk_divider *divider = to_clk_divider(hw);
780 	unsigned int val;
781 
782 	val = readl(divider->reg) >> divider->shift;
783 	val &= clk_div_mask(divider->width);
784 
785 	return divider_recalc_rate(hw, parent_rate, val, divider->table,
786 				   divider->flags, divider->width);
787 }
788 
rzv2h_ddiv_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)789 static int rzv2h_ddiv_determine_rate(struct clk_hw *hw,
790 				     struct clk_rate_request *req)
791 {
792 	struct clk_divider *divider = to_clk_divider(hw);
793 
794 	return divider_determine_rate(hw, req, divider->table, divider->width,
795 				      divider->flags);
796 }
797 
rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem * base,u8 mon)798 static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon)
799 {
800 	u32 bitmask = BIT(mon);
801 	u32 val;
802 
803 	if (mon == CSDIV_NO_MON)
804 		return 0;
805 
806 	return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200);
807 }
808 
rzv2h_ddiv_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)809 static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
810 			       unsigned long parent_rate)
811 {
812 	struct clk_divider *divider = to_clk_divider(hw);
813 	struct ddiv_clk *ddiv = to_ddiv_clock(divider);
814 	struct rzv2h_cpg_priv *priv = ddiv->priv;
815 	unsigned long flags = 0;
816 	int value;
817 	u32 val;
818 	int ret;
819 
820 	value = divider_get_val(rate, parent_rate, divider->table,
821 				divider->width, divider->flags);
822 	if (value < 0)
823 		return value;
824 
825 	spin_lock_irqsave(divider->lock, flags);
826 
827 	ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
828 	if (ret)
829 		goto ddiv_timeout;
830 
831 	val = readl(divider->reg) | DDIV_DIVCTL_WEN(divider->shift);
832 	val &= ~(clk_div_mask(divider->width) << divider->shift);
833 	val |= (u32)value << divider->shift;
834 	writel(val, divider->reg);
835 
836 	ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
837 
838 ddiv_timeout:
839 	spin_unlock_irqrestore(divider->lock, flags);
840 	return ret;
841 }
842 
843 static const struct clk_ops rzv2h_ddiv_clk_divider_ops = {
844 	.recalc_rate = rzv2h_ddiv_recalc_rate,
845 	.determine_rate = rzv2h_ddiv_determine_rate,
846 	.set_rate = rzv2h_ddiv_set_rate,
847 };
848 
849 static struct clk * __init
rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv)850 rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
851 			    struct rzv2h_cpg_priv *priv)
852 {
853 	struct ddiv cfg_ddiv = core->cfg.ddiv;
854 	struct clk_init_data init = {};
855 	struct device *dev = priv->dev;
856 	u8 shift = cfg_ddiv.shift;
857 	u8 width = cfg_ddiv.width;
858 	const struct clk *parent;
859 	const char *parent_name;
860 	struct clk_divider *div;
861 	struct ddiv_clk *ddiv;
862 	int ret;
863 
864 	parent = priv->clks[core->parent];
865 	if (IS_ERR(parent))
866 		return ERR_CAST(parent);
867 
868 	parent_name = __clk_get_name(parent);
869 
870 	if ((shift + width) > 16)
871 		return ERR_PTR(-EINVAL);
872 
873 	ddiv = devm_kzalloc(priv->dev, sizeof(*ddiv), GFP_KERNEL);
874 	if (!ddiv)
875 		return ERR_PTR(-ENOMEM);
876 
877 	init.name = core->name;
878 	if (cfg_ddiv.no_rmw)
879 		init.ops = &clk_divider_ops;
880 	else
881 		init.ops = &rzv2h_ddiv_clk_divider_ops;
882 	init.parent_names = &parent_name;
883 	init.num_parents = 1;
884 	init.flags = CLK_SET_RATE_PARENT;
885 
886 	ddiv->priv = priv;
887 	ddiv->mon = cfg_ddiv.monbit;
888 	div = &ddiv->div;
889 	div->reg = priv->base + cfg_ddiv.offset;
890 	div->shift = shift;
891 	div->width = width;
892 	div->flags = core->flag;
893 	div->lock = &priv->rmw_lock;
894 	div->hw.init = &init;
895 	div->table = core->dtable;
896 
897 	ret = devm_clk_hw_register(dev, &div->hw);
898 	if (ret)
899 		return ERR_PTR(ret);
900 
901 	return div->hw.clk;
902 }
903 
904 static struct clk * __init
rzv2h_cpg_mux_clk_register(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv)905 rzv2h_cpg_mux_clk_register(const struct cpg_core_clk *core,
906 			   struct rzv2h_cpg_priv *priv)
907 {
908 	struct smuxed mux = core->cfg.smux;
909 	const struct clk_hw *clk_hw;
910 
911 	clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
912 					  core->parent_names, core->num_parents,
913 					  core->flag, priv->base + mux.offset,
914 					  mux.shift, mux.width,
915 					  core->mux_flags, &priv->rmw_lock);
916 	if (IS_ERR(clk_hw))
917 		return ERR_CAST(clk_hw);
918 
919 	return clk_hw->clk;
920 }
921 
922 static int
rzv2h_clk_ff_mod_status_is_enabled(struct clk_hw * hw)923 rzv2h_clk_ff_mod_status_is_enabled(struct clk_hw *hw)
924 {
925 	struct rzv2h_ff_mod_status_clk *fix = to_rzv2h_ff_mod_status_clk(hw);
926 	struct rzv2h_cpg_priv *priv = fix->priv;
927 	u32 offset = GET_CLK_MON_OFFSET(fix->conf.mon_index);
928 	u32 bitmask = BIT(fix->conf.mon_bit);
929 	u32 val;
930 
931 	val = readl(priv->base + offset);
932 	return !!(val & bitmask);
933 }
934 
935 static struct clk * __init
rzv2h_cpg_fixed_mod_status_clk_register(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv)936 rzv2h_cpg_fixed_mod_status_clk_register(const struct cpg_core_clk *core,
937 					struct rzv2h_cpg_priv *priv)
938 {
939 	struct rzv2h_ff_mod_status_clk *clk_hw_data;
940 	struct clk_init_data init = { };
941 	struct clk_fixed_factor *fix;
942 	const struct clk *parent;
943 	const char *parent_name;
944 	int ret;
945 
946 	WARN_DEBUG(core->parent >= priv->num_core_clks);
947 	parent = priv->clks[core->parent];
948 	if (IS_ERR(parent))
949 		return ERR_CAST(parent);
950 
951 	parent_name = __clk_get_name(parent);
952 	parent = priv->clks[core->parent];
953 	if (IS_ERR(parent))
954 		return ERR_CAST(parent);
955 
956 	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
957 	if (!clk_hw_data)
958 		return ERR_PTR(-ENOMEM);
959 
960 	clk_hw_data->priv = priv;
961 	clk_hw_data->conf = core->cfg.fixed_mod;
962 
963 	init.name = core->name;
964 	init.ops = priv->ff_mod_status_ops;
965 	init.flags = CLK_SET_RATE_PARENT;
966 	init.parent_names = &parent_name;
967 	init.num_parents = 1;
968 
969 	fix = &clk_hw_data->fix;
970 	fix->hw.init = &init;
971 	fix->mult = core->mult;
972 	fix->div = core->div;
973 
974 	ret = devm_clk_hw_register(priv->dev, &clk_hw_data->fix.hw);
975 	if (ret)
976 		return ERR_PTR(ret);
977 
978 	return clk_hw_data->fix.hw.clk;
979 }
980 
981 static struct clk
rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args * clkspec,void * data)982 *rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
983 			       void *data)
984 {
985 	unsigned int clkidx = clkspec->args[1];
986 	struct rzv2h_cpg_priv *priv = data;
987 	struct device *dev = priv->dev;
988 	const char *type;
989 	struct clk *clk;
990 
991 	switch (clkspec->args[0]) {
992 	case CPG_CORE:
993 		type = "core";
994 		if (clkidx > priv->last_dt_core_clk) {
995 			dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
996 			return ERR_PTR(-EINVAL);
997 		}
998 		clk = priv->clks[clkidx];
999 		break;
1000 
1001 	case CPG_MOD:
1002 		type = "module";
1003 		if (clkidx >= priv->num_mod_clks) {
1004 			dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
1005 			return ERR_PTR(-EINVAL);
1006 		}
1007 		clk = priv->clks[priv->num_core_clks + clkidx];
1008 		break;
1009 
1010 	default:
1011 		dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
1012 		return ERR_PTR(-EINVAL);
1013 	}
1014 
1015 	if (IS_ERR(clk))
1016 		dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
1017 			PTR_ERR(clk));
1018 	else
1019 		dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
1020 			clkspec->args[0], clkspec->args[1], clk,
1021 			clk_get_rate(clk));
1022 	return clk;
1023 }
1024 
1025 static void __init
rzv2h_cpg_register_core_clk(const struct cpg_core_clk * core,struct rzv2h_cpg_priv * priv)1026 rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
1027 			    struct rzv2h_cpg_priv *priv)
1028 {
1029 	struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
1030 	unsigned int id = core->id, div = core->div;
1031 	struct device *dev = priv->dev;
1032 	const char *parent_name;
1033 	struct clk_hw *clk_hw;
1034 
1035 	WARN_DEBUG(id >= priv->num_core_clks);
1036 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1037 
1038 	switch (core->type) {
1039 	case CLK_TYPE_IN:
1040 		clk = of_clk_get_by_name(priv->dev->of_node, core->name);
1041 		break;
1042 	case CLK_TYPE_FF:
1043 		WARN_DEBUG(core->parent >= priv->num_core_clks);
1044 		parent = priv->clks[core->parent];
1045 		if (IS_ERR(parent)) {
1046 			clk = parent;
1047 			goto fail;
1048 		}
1049 
1050 		parent_name = __clk_get_name(parent);
1051 		clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name,
1052 							   parent_name, CLK_SET_RATE_PARENT,
1053 							   core->mult, div);
1054 		if (IS_ERR(clk_hw))
1055 			clk = ERR_CAST(clk_hw);
1056 		else
1057 			clk = clk_hw->clk;
1058 		break;
1059 	case CLK_TYPE_FF_MOD_STATUS:
1060 		if (!priv->ff_mod_status_ops) {
1061 			priv->ff_mod_status_ops =
1062 				devm_kzalloc(dev, sizeof(*priv->ff_mod_status_ops), GFP_KERNEL);
1063 			if (!priv->ff_mod_status_ops) {
1064 				clk = ERR_PTR(-ENOMEM);
1065 				goto fail;
1066 			}
1067 			memcpy(priv->ff_mod_status_ops, &clk_fixed_factor_ops,
1068 			       sizeof(const struct clk_ops));
1069 			priv->ff_mod_status_ops->is_enabled = rzv2h_clk_ff_mod_status_is_enabled;
1070 		}
1071 		clk = rzv2h_cpg_fixed_mod_status_clk_register(core, priv);
1072 		break;
1073 	case CLK_TYPE_PLL:
1074 		clk = rzv2h_cpg_pll_clk_register(core, priv, &rzv2h_cpg_pll_ops);
1075 		break;
1076 	case CLK_TYPE_DDIV:
1077 		clk = rzv2h_cpg_ddiv_clk_register(core, priv);
1078 		break;
1079 	case CLK_TYPE_SMUX:
1080 		clk = rzv2h_cpg_mux_clk_register(core, priv);
1081 		break;
1082 	case CLK_TYPE_PLLDSI:
1083 		clk = rzv2h_cpg_pll_clk_register(core, priv, &rzv2h_cpg_plldsi_ops);
1084 		break;
1085 	case CLK_TYPE_PLLDSI_DIV:
1086 		clk = rzv2h_cpg_plldsi_div_clk_register(core, priv);
1087 		break;
1088 	default:
1089 		goto fail;
1090 	}
1091 
1092 	if (IS_ERR(clk))
1093 		goto fail;
1094 
1095 	dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1096 	priv->clks[id] = clk;
1097 	return;
1098 
1099 fail:
1100 	dev_err(dev, "Failed to register core clock %s: %ld\n",
1101 		core->name, PTR_ERR(clk));
1102 }
1103 
rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv * priv,u32 mstop_data)1104 static void rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv *priv,
1105 					 u32 mstop_data)
1106 {
1107 	unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
1108 	u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
1109 	atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
1110 	unsigned long flags;
1111 	unsigned int i;
1112 	u32 val = 0;
1113 
1114 	spin_lock_irqsave(&priv->rmw_lock, flags);
1115 	for_each_set_bit(i, &mstop_mask, 16) {
1116 		if (!atomic_read(&mstop[i]))
1117 			val |= BIT(i) << 16;
1118 		atomic_inc(&mstop[i]);
1119 	}
1120 	if (val)
1121 		writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
1122 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
1123 }
1124 
rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv * priv,u32 mstop_data)1125 static void rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv *priv,
1126 					  u32 mstop_data)
1127 {
1128 	unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
1129 	u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
1130 	atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
1131 	unsigned long flags;
1132 	unsigned int i;
1133 	u32 val = 0;
1134 
1135 	spin_lock_irqsave(&priv->rmw_lock, flags);
1136 	for_each_set_bit(i, &mstop_mask, 16) {
1137 		if (!atomic_read(&mstop[i]) ||
1138 		    atomic_dec_and_test(&mstop[i]))
1139 			val |= BIT(i) << 16 | BIT(i);
1140 	}
1141 	if (val)
1142 		writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
1143 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
1144 }
1145 
rzv2h_parent_clk_mux_to_index(struct clk_hw * hw)1146 static int rzv2h_parent_clk_mux_to_index(struct clk_hw *hw)
1147 {
1148 	struct clk_hw *parent_hw;
1149 	struct clk *parent_clk;
1150 	struct clk_mux *mux;
1151 	u32 val;
1152 
1153 	/* This will always succeed, so no need to check for IS_ERR() */
1154 	parent_clk = clk_get_parent(hw->clk);
1155 
1156 	parent_hw = __clk_get_hw(parent_clk);
1157 	mux = to_clk_mux(parent_hw);
1158 
1159 	val = readl(mux->reg) >> mux->shift;
1160 	val &= mux->mask;
1161 	return clk_mux_val_to_index(parent_hw, mux->table, 0, val);
1162 }
1163 
rzv2h_mod_clock_is_enabled(struct clk_hw * hw)1164 static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
1165 {
1166 	struct mod_clock *clock = to_mod_clock(hw);
1167 	struct rzv2h_cpg_priv *priv = clock->priv;
1168 	int mon_index = clock->mon_index;
1169 	u32 bitmask;
1170 	u32 offset;
1171 
1172 	if (clock->ext_clk_mux_index >= 0 &&
1173 	    rzv2h_parent_clk_mux_to_index(hw) == clock->ext_clk_mux_index)
1174 		mon_index = -1;
1175 
1176 	if (mon_index >= 0) {
1177 		offset = GET_CLK_MON_OFFSET(mon_index);
1178 		bitmask = BIT(clock->mon_bit);
1179 
1180 		if (!(readl(priv->base + offset) & bitmask))
1181 			return 0;
1182 	}
1183 
1184 	offset = GET_CLK_ON_OFFSET(clock->on_index);
1185 	bitmask = BIT(clock->on_bit);
1186 
1187 	return readl(priv->base + offset) & bitmask;
1188 }
1189 
rzv2h_mod_clock_endisable(struct clk_hw * hw,bool enable)1190 static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
1191 {
1192 	bool enabled = rzv2h_mod_clock_is_enabled(hw);
1193 	struct mod_clock *clock = to_mod_clock(hw);
1194 	unsigned int reg = GET_CLK_ON_OFFSET(clock->on_index);
1195 	struct rzv2h_cpg_priv *priv = clock->priv;
1196 	u32 bitmask = BIT(clock->on_bit);
1197 	struct device *dev = priv->dev;
1198 	u32 value;
1199 	int error;
1200 
1201 	dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk,
1202 		str_on_off(enable));
1203 
1204 	if (enabled == enable)
1205 		return 0;
1206 
1207 	value = bitmask << 16;
1208 	if (enable) {
1209 		value |= bitmask;
1210 		writel(value, priv->base + reg);
1211 		if (clock->mstop_data != BUS_MSTOP_NONE)
1212 			rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data);
1213 	} else {
1214 		if (clock->mstop_data != BUS_MSTOP_NONE)
1215 			rzv2h_mod_clock_mstop_disable(priv, clock->mstop_data);
1216 		writel(value, priv->base + reg);
1217 	}
1218 
1219 	if (!enable || clock->mon_index < 0)
1220 		return 0;
1221 
1222 	reg = GET_CLK_MON_OFFSET(clock->mon_index);
1223 	bitmask = BIT(clock->mon_bit);
1224 	error = readl_poll_timeout_atomic(priv->base + reg, value,
1225 					  value & bitmask, 0, 10);
1226 	if (error)
1227 		dev_err(dev, "Failed to enable CLK_ON 0x%x/%pC\n",
1228 			GET_CLK_ON_OFFSET(clock->on_index), hw->clk);
1229 
1230 	return error;
1231 }
1232 
rzv2h_mod_clock_enable(struct clk_hw * hw)1233 static int rzv2h_mod_clock_enable(struct clk_hw *hw)
1234 {
1235 	return rzv2h_mod_clock_endisable(hw, true);
1236 }
1237 
rzv2h_mod_clock_disable(struct clk_hw * hw)1238 static void rzv2h_mod_clock_disable(struct clk_hw *hw)
1239 {
1240 	rzv2h_mod_clock_endisable(hw, false);
1241 }
1242 
1243 static const struct clk_ops rzv2h_mod_clock_ops = {
1244 	.enable = rzv2h_mod_clock_enable,
1245 	.disable = rzv2h_mod_clock_disable,
1246 	.is_enabled = rzv2h_mod_clock_is_enabled,
1247 };
1248 
1249 static void __init
rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk * mod,struct rzv2h_cpg_priv * priv)1250 rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
1251 			   struct rzv2h_cpg_priv *priv)
1252 {
1253 	struct mod_clock *clock = NULL;
1254 	struct device *dev = priv->dev;
1255 	struct clk_init_data init;
1256 	struct clk *parent, *clk;
1257 	const char *parent_name;
1258 	unsigned int id;
1259 	int ret;
1260 
1261 	id = GET_MOD_CLK_ID(priv->num_core_clks, mod->on_index, mod->on_bit);
1262 	WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1263 	WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1264 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1265 
1266 	parent = priv->clks[mod->parent];
1267 	if (IS_ERR(parent)) {
1268 		clk = parent;
1269 		goto fail;
1270 	}
1271 
1272 	clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1273 	if (!clock) {
1274 		clk = ERR_PTR(-ENOMEM);
1275 		goto fail;
1276 	}
1277 
1278 	init.name = mod->name;
1279 	init.ops = &rzv2h_mod_clock_ops;
1280 	init.flags = CLK_SET_RATE_PARENT;
1281 	if (mod->critical)
1282 		init.flags |= CLK_IS_CRITICAL;
1283 
1284 	parent_name = __clk_get_name(parent);
1285 	init.parent_names = &parent_name;
1286 	init.num_parents = 1;
1287 
1288 	clock->on_index = mod->on_index;
1289 	clock->on_bit = mod->on_bit;
1290 	clock->mon_index = mod->mon_index;
1291 	clock->mon_bit = mod->mon_bit;
1292 	clock->no_pm = mod->no_pm;
1293 	clock->ext_clk_mux_index = mod->ext_clk_mux_index;
1294 	clock->priv = priv;
1295 	clock->hw.init = &init;
1296 	clock->mstop_data = mod->mstop_data;
1297 
1298 	ret = devm_clk_hw_register(dev, &clock->hw);
1299 	if (ret) {
1300 		clk = ERR_PTR(ret);
1301 		goto fail;
1302 	}
1303 
1304 	priv->clks[id] = clock->hw.clk;
1305 
1306 	/*
1307 	 * Ensure the module clocks and MSTOP bits are synchronized when they are
1308 	 * turned ON by the bootloader. Enable MSTOP bits for module clocks that were
1309 	 * turned ON in an earlier boot stage.
1310 	 */
1311 	if (clock->mstop_data != BUS_MSTOP_NONE &&
1312 	    !mod->critical && rzv2h_mod_clock_is_enabled(&clock->hw)) {
1313 		rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data);
1314 	} else if (clock->mstop_data != BUS_MSTOP_NONE && mod->critical) {
1315 		unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, clock->mstop_data);
1316 		u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, clock->mstop_data);
1317 		atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
1318 		unsigned long flags;
1319 		unsigned int i;
1320 		u32 val = 0;
1321 
1322 		/*
1323 		 * Critical clocks are turned ON immediately upon registration, and the
1324 		 * MSTOP counter is updated through the rzv2h_mod_clock_enable() path.
1325 		 * However, if the critical clocks were already turned ON by the initial
1326 		 * bootloader, synchronize the atomic counter here and clear the MSTOP bit.
1327 		 */
1328 		spin_lock_irqsave(&priv->rmw_lock, flags);
1329 		for_each_set_bit(i, &mstop_mask, 16) {
1330 			if (atomic_read(&mstop[i]))
1331 				continue;
1332 			val |= BIT(i) << 16;
1333 			atomic_inc(&mstop[i]);
1334 		}
1335 		if (val)
1336 			writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
1337 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
1338 	}
1339 
1340 	return;
1341 
1342 fail:
1343 	dev_err(dev, "Failed to register module clock %s: %ld\n",
1344 		mod->name, PTR_ERR(clk));
1345 }
1346 
__rzv2h_cpg_assert(struct reset_controller_dev * rcdev,unsigned long id,bool assert)1347 static int __rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
1348 			      unsigned long id, bool assert)
1349 {
1350 	struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
1351 	unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
1352 	u32 mask = BIT(priv->resets[id].reset_bit);
1353 	u8 monbit = priv->resets[id].mon_bit;
1354 	u32 value = mask << 16;
1355 	int ret;
1356 
1357 	dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n",
1358 		assert ? "assert" : "deassert", id, reg);
1359 
1360 	if (!assert)
1361 		value |= mask;
1362 	writel(value, priv->base + reg);
1363 
1364 	reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
1365 	mask = BIT(monbit);
1366 
1367 	ret = readl_poll_timeout_atomic(priv->base + reg, value,
1368 					assert == !!(value & mask), 10, 200);
1369 	if (ret && !assert) {
1370 		value = mask << 16;
1371 		writel(value, priv->base + GET_RST_OFFSET(priv->resets[id].reset_index));
1372 	}
1373 
1374 	return ret;
1375 }
1376 
rzv2h_cpg_assert(struct reset_controller_dev * rcdev,unsigned long id)1377 static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
1378 			    unsigned long id)
1379 {
1380 	return __rzv2h_cpg_assert(rcdev, id, true);
1381 }
1382 
rzv2h_cpg_deassert(struct reset_controller_dev * rcdev,unsigned long id)1383 static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev,
1384 			      unsigned long id)
1385 {
1386 	return __rzv2h_cpg_assert(rcdev, id, false);
1387 }
1388 
rzv2h_cpg_reset(struct reset_controller_dev * rcdev,unsigned long id)1389 static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev,
1390 			   unsigned long id)
1391 {
1392 	int ret;
1393 
1394 	ret = rzv2h_cpg_assert(rcdev, id);
1395 	if (ret)
1396 		return ret;
1397 
1398 	return rzv2h_cpg_deassert(rcdev, id);
1399 }
1400 
rzv2h_cpg_status(struct reset_controller_dev * rcdev,unsigned long id)1401 static int rzv2h_cpg_status(struct reset_controller_dev *rcdev,
1402 			    unsigned long id)
1403 {
1404 	struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
1405 	unsigned int reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
1406 	u8 monbit = priv->resets[id].mon_bit;
1407 
1408 	return !!(readl(priv->base + reg) & BIT(monbit));
1409 }
1410 
1411 static const struct reset_control_ops rzv2h_cpg_reset_ops = {
1412 	.reset = rzv2h_cpg_reset,
1413 	.assert = rzv2h_cpg_assert,
1414 	.deassert = rzv2h_cpg_deassert,
1415 	.status = rzv2h_cpg_status,
1416 };
1417 
rzv2h_cpg_reset_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)1418 static int rzv2h_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1419 				 const struct of_phandle_args *reset_spec)
1420 {
1421 	struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
1422 	unsigned int id = reset_spec->args[0];
1423 	u8 rst_index = id / 16;
1424 	u8 rst_bit = id % 16;
1425 	unsigned int i;
1426 
1427 	for (i = 0; i < rcdev->nr_resets; i++) {
1428 		if (rst_index == priv->resets[i].reset_index &&
1429 		    rst_bit == priv->resets[i].reset_bit)
1430 			return i;
1431 	}
1432 
1433 	return -EINVAL;
1434 }
1435 
rzv2h_cpg_reset_controller_register(struct rzv2h_cpg_priv * priv)1436 static int rzv2h_cpg_reset_controller_register(struct rzv2h_cpg_priv *priv)
1437 {
1438 	priv->rcdev.ops = &rzv2h_cpg_reset_ops;
1439 	priv->rcdev.of_node = priv->dev->of_node;
1440 	priv->rcdev.dev = priv->dev;
1441 	priv->rcdev.of_reset_n_cells = 1;
1442 	priv->rcdev.of_xlate = rzv2h_cpg_reset_xlate;
1443 	priv->rcdev.nr_resets = priv->num_resets;
1444 
1445 	return devm_reset_controller_register(priv->dev, &priv->rcdev);
1446 }
1447 
1448 /**
1449  * struct rzv2h_cpg_pd - RZ/V2H power domain data structure
1450  * @priv: pointer to CPG private data structure
1451  * @genpd: generic PM domain
1452  */
1453 struct rzv2h_cpg_pd {
1454 	struct rzv2h_cpg_priv *priv;
1455 	struct generic_pm_domain genpd;
1456 };
1457 
rzv2h_cpg_is_pm_clk(struct rzv2h_cpg_pd * pd,const struct of_phandle_args * clkspec)1458 static bool rzv2h_cpg_is_pm_clk(struct rzv2h_cpg_pd *pd,
1459 				const struct of_phandle_args *clkspec)
1460 {
1461 	if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
1462 		return false;
1463 
1464 	switch (clkspec->args[0]) {
1465 	case CPG_MOD: {
1466 		struct rzv2h_cpg_priv *priv = pd->priv;
1467 		unsigned int id = clkspec->args[1];
1468 		struct mod_clock *clock;
1469 
1470 		if (id >= priv->num_mod_clks)
1471 			return false;
1472 
1473 		if (priv->clks[priv->num_core_clks + id] == ERR_PTR(-ENOENT))
1474 			return false;
1475 
1476 		clock = to_mod_clock(__clk_get_hw(priv->clks[priv->num_core_clks + id]));
1477 
1478 		return !clock->no_pm;
1479 	}
1480 
1481 	case CPG_CORE:
1482 	default:
1483 		return false;
1484 	}
1485 }
1486 
rzv2h_cpg_attach_dev(struct generic_pm_domain * domain,struct device * dev)1487 static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
1488 {
1489 	struct rzv2h_cpg_pd *pd = container_of(domain, struct rzv2h_cpg_pd, genpd);
1490 	struct device_node *np = dev->of_node;
1491 	struct of_phandle_args clkspec;
1492 	bool once = true;
1493 	struct clk *clk;
1494 	unsigned int i;
1495 	int error;
1496 
1497 	for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
1498 		if (!rzv2h_cpg_is_pm_clk(pd, &clkspec)) {
1499 			of_node_put(clkspec.np);
1500 			continue;
1501 		}
1502 
1503 		if (once) {
1504 			once = false;
1505 			error = pm_clk_create(dev);
1506 			if (error) {
1507 				of_node_put(clkspec.np);
1508 				goto err;
1509 			}
1510 		}
1511 		clk = of_clk_get_from_provider(&clkspec);
1512 		of_node_put(clkspec.np);
1513 		if (IS_ERR(clk)) {
1514 			error = PTR_ERR(clk);
1515 			goto fail_destroy;
1516 		}
1517 
1518 		error = pm_clk_add_clk(dev, clk);
1519 		if (error) {
1520 			dev_err(dev, "pm_clk_add_clk failed %d\n",
1521 				error);
1522 			goto fail_put;
1523 		}
1524 	}
1525 
1526 	return 0;
1527 
1528 fail_put:
1529 	clk_put(clk);
1530 
1531 fail_destroy:
1532 	pm_clk_destroy(dev);
1533 err:
1534 	return error;
1535 }
1536 
rzv2h_cpg_detach_dev(struct generic_pm_domain * unused,struct device * dev)1537 static void rzv2h_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1538 {
1539 	if (!pm_clk_no_clocks(dev))
1540 		pm_clk_destroy(dev);
1541 }
1542 
rzv2h_cpg_genpd_remove_simple(void * data)1543 static void rzv2h_cpg_genpd_remove_simple(void *data)
1544 {
1545 	pm_genpd_remove(data);
1546 }
1547 
rzv2h_cpg_add_pm_domains(struct rzv2h_cpg_priv * priv)1548 static int __init rzv2h_cpg_add_pm_domains(struct rzv2h_cpg_priv *priv)
1549 {
1550 	struct device *dev = priv->dev;
1551 	struct device_node *np = dev->of_node;
1552 	struct rzv2h_cpg_pd *pd;
1553 	int ret;
1554 
1555 	pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1556 	if (!pd)
1557 		return -ENOMEM;
1558 
1559 	pd->genpd.name = np->name;
1560 	pd->priv = priv;
1561 	pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
1562 	pd->genpd.attach_dev = rzv2h_cpg_attach_dev;
1563 	pd->genpd.detach_dev = rzv2h_cpg_detach_dev;
1564 	ret = pm_genpd_init(&pd->genpd, &pm_domain_always_on_gov, false);
1565 	if (ret)
1566 		return ret;
1567 
1568 	ret = devm_add_action_or_reset(dev, rzv2h_cpg_genpd_remove_simple, &pd->genpd);
1569 	if (ret)
1570 		return ret;
1571 
1572 	return of_genpd_add_provider_simple(np, &pd->genpd);
1573 }
1574 
rzv2h_cpg_del_clk_provider(void * data)1575 static void rzv2h_cpg_del_clk_provider(void *data)
1576 {
1577 	of_clk_del_provider(data);
1578 }
1579 
rzv2h_cpg_probe(struct platform_device * pdev)1580 static int __init rzv2h_cpg_probe(struct platform_device *pdev)
1581 {
1582 	struct device *dev = &pdev->dev;
1583 	struct device_node *np = dev->of_node;
1584 	const struct rzv2h_cpg_info *info;
1585 	struct rzv2h_cpg_priv *priv;
1586 	unsigned int nclks, i;
1587 	struct clk **clks;
1588 	int error;
1589 
1590 	info = of_device_get_match_data(dev);
1591 
1592 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1593 	if (!priv)
1594 		return -ENOMEM;
1595 
1596 	spin_lock_init(&priv->rmw_lock);
1597 
1598 	priv->dev = dev;
1599 
1600 	priv->base = devm_platform_ioremap_resource(pdev, 0);
1601 	if (IS_ERR(priv->base))
1602 		return PTR_ERR(priv->base);
1603 
1604 	nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1605 	clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1606 	if (!clks)
1607 		return -ENOMEM;
1608 
1609 	priv->mstop_count = devm_kcalloc(dev, info->num_mstop_bits,
1610 					 sizeof(*priv->mstop_count), GFP_KERNEL);
1611 	if (!priv->mstop_count)
1612 		return -ENOMEM;
1613 
1614 	/* Adjust for CPG_BUS_m_MSTOP starting from m = 1 */
1615 	priv->mstop_count -= 16;
1616 
1617 	priv->resets = devm_kmemdup_array(dev, info->resets, info->num_resets,
1618 					  sizeof(*info->resets), GFP_KERNEL);
1619 	if (!priv->resets)
1620 		return -ENOMEM;
1621 
1622 	dev_set_drvdata(dev, priv);
1623 	priv->clks = clks;
1624 	priv->num_core_clks = info->num_total_core_clks;
1625 	priv->num_mod_clks = info->num_hw_mod_clks;
1626 	priv->last_dt_core_clk = info->last_dt_core_clk;
1627 	priv->num_resets = info->num_resets;
1628 
1629 	for (i = 0; i < nclks; i++)
1630 		clks[i] = ERR_PTR(-ENOENT);
1631 
1632 	for (i = 0; i < info->num_core_clks; i++)
1633 		rzv2h_cpg_register_core_clk(&info->core_clks[i], priv);
1634 
1635 	for (i = 0; i < info->num_mod_clks; i++)
1636 		rzv2h_cpg_register_mod_clk(&info->mod_clks[i], priv);
1637 
1638 	error = of_clk_add_provider(np, rzv2h_cpg_clk_src_twocell_get, priv);
1639 	if (error)
1640 		return error;
1641 
1642 	error = devm_add_action_or_reset(dev, rzv2h_cpg_del_clk_provider, np);
1643 	if (error)
1644 		return error;
1645 
1646 	error = rzv2h_cpg_add_pm_domains(priv);
1647 	if (error)
1648 		return error;
1649 
1650 	error = rzv2h_cpg_reset_controller_register(priv);
1651 	if (error)
1652 		return error;
1653 
1654 	return 0;
1655 }
1656 
1657 static const struct of_device_id rzv2h_cpg_match[] = {
1658 #ifdef CONFIG_CLK_R9A09G047
1659 	{
1660 		.compatible = "renesas,r9a09g047-cpg",
1661 		.data = &r9a09g047_cpg_info,
1662 	},
1663 #endif
1664 #ifdef CONFIG_CLK_R9A09G056
1665 	{
1666 		.compatible = "renesas,r9a09g056-cpg",
1667 		.data = &r9a09g056_cpg_info,
1668 	},
1669 #endif
1670 #ifdef CONFIG_CLK_R9A09G057
1671 	{
1672 		.compatible = "renesas,r9a09g057-cpg",
1673 		.data = &r9a09g057_cpg_info,
1674 	},
1675 #endif
1676 	{ /* sentinel */ }
1677 };
1678 
1679 static struct platform_driver rzv2h_cpg_driver = {
1680 	.driver		= {
1681 		.name	= "rzv2h-cpg",
1682 		.of_match_table = rzv2h_cpg_match,
1683 	},
1684 };
1685 
rzv2h_cpg_init(void)1686 static int __init rzv2h_cpg_init(void)
1687 {
1688 	return platform_driver_probe(&rzv2h_cpg_driver, rzv2h_cpg_probe);
1689 }
1690 
1691 subsys_initcall(rzv2h_cpg_init);
1692 
1693 MODULE_DESCRIPTION("Renesas RZ/V2H CPG Driver");
1694