xref: /linux/drivers/clk/renesas/rzg2l-cpg.c (revision 522ba450b56fff29f868b1552bdc2965f55de7ed)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * RZ/G2L Clock Pulse Generator
4  *
5  * Copyright (C) 2021 Renesas Electronics Corp.
6  *
7  * Based on renesas-cpg-mssr.c
8  *
9  * Copyright (C) 2015 Glider bvba
10  * Copyright (C) 2013 Ideas On Board SPRL
11  * Copyright (C) 2015 Renesas Electronics Corp.
12  */
13 
14 #include <linux/atomic.h>
15 #include <linux/bitfield.h>
16 #include <linux/cleanup.h>
17 #include <linux/clk.h>
18 #include <linux/clk-provider.h>
19 #include <linux/clk/renesas.h>
20 #include <linux/debugfs.h>
21 #include <linux/delay.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/iopoll.h>
25 #include <linux/mod_devicetable.h>
26 #include <linux/module.h>
27 #include <linux/of.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_clock.h>
30 #include <linux/pm_domain.h>
31 #include <linux/reset-controller.h>
32 #include <linux/slab.h>
33 #include <linux/string_choices.h>
34 #include <linux/units.h>
35 
36 #include <dt-bindings/clock/renesas-cpg-mssr.h>
37 
38 #include "rzg2l-cpg.h"
39 
40 #ifdef DEBUG
41 #define WARN_DEBUG(x)	WARN_ON(x)
42 #else
43 #define WARN_DEBUG(x)	do { } while (0)
44 #endif
45 
46 #define GET_SHIFT(val)		((val >> 12) & 0xff)
47 #define GET_WIDTH(val)		((val >> 8) & 0xf)
48 
49 #define KDIV(val)		((s16)FIELD_GET(GENMASK(31, 16), val))
50 #define MDIV(val)		FIELD_GET(GENMASK(15, 6), val)
51 #define PDIV(val)		FIELD_GET(GENMASK(5, 0), val)
52 #define SDIV(val)		FIELD_GET(GENMASK(2, 0), val)
53 
54 #define RZG3S_DIV_P		GENMASK(28, 26)
55 #define RZG3S_DIV_M		GENMASK(25, 22)
56 #define RZG3S_DIV_NI		GENMASK(21, 13)
57 #define RZG3S_DIV_NF		GENMASK(12, 1)
58 #define RZG3S_SEL_PLL		BIT(0)
59 
60 #define CLK_ON_R(reg)		(reg)
61 #define CLK_MON_R(reg)		(0x180 + (reg))
62 #define CLK_RST_R(reg)		(reg)
63 #define CLK_MRST_R(reg)		(0x180 + (reg))
64 
65 #define GET_REG_OFFSET(val)		((val >> 20) & 0xfff)
66 #define GET_REG_SAMPLL_CLK1(val)	((val >> 22) & 0xfff)
67 #define GET_REG_SAMPLL_CLK2(val)	((val >> 12) & 0xfff)
68 #define GET_REG_SAMPLL_SETTING(val)	((val) & 0xfff)
69 
70 #define CPG_WEN_BIT		BIT(16)
71 
72 #define MAX_VCLK_FREQ		(148500000)
73 
74 #define MSTOP_OFF(conf)		FIELD_GET(GENMASK(31, 16), (conf))
75 #define MSTOP_MASK(conf)	FIELD_GET(GENMASK(15, 0), (conf))
76 
77 /**
78  * struct clk_hw_data - clock hardware data
79  * @hw: clock hw
80  * @conf: clock configuration (register offset, shift, width)
81  * @sconf: clock status configuration (register offset, shift, width)
82  * @priv: CPG private data structure
83  */
84 struct clk_hw_data {
85 	struct clk_hw hw;
86 	u32 conf;
87 	u32 sconf;
88 	struct rzg2l_cpg_priv *priv;
89 };
90 
91 #define to_clk_hw_data(_hw)	container_of(_hw, struct clk_hw_data, hw)
92 
93 /**
94  * struct sd_mux_hw_data - SD MUX clock hardware data
95  * @hw_data: clock hw data
96  * @mtable: clock mux table
97  */
98 struct sd_mux_hw_data {
99 	struct clk_hw_data hw_data;
100 	const u32 *mtable;
101 };
102 
103 #define to_sd_mux_hw_data(_hw)	container_of(_hw, struct sd_mux_hw_data, hw_data)
104 
105 /**
106  * struct div_hw_data - divider clock hardware data
107  * @hw_data: clock hw data
108  * @dtable: pointer to divider table
109  * @invalid_rate: invalid rate for divider
110  * @max_rate: maximum rate for divider
111  * @width: divider width
112  */
113 struct div_hw_data {
114 	struct clk_hw_data hw_data;
115 	const struct clk_div_table *dtable;
116 	unsigned long invalid_rate;
117 	unsigned long max_rate;
118 	u32 width;
119 };
120 
121 #define to_div_hw_data(_hw)	container_of(_hw, struct div_hw_data, hw_data)
122 
123 struct rzg2l_pll5_param {
124 	u32 pl5_fracin;
125 	u8 pl5_refdiv;
126 	u8 pl5_intin;
127 	u8 pl5_postdiv1;
128 	u8 pl5_postdiv2;
129 	u8 pl5_spread;
130 };
131 
132 struct rzg2l_pll5_mux_dsi_div_param {
133 	u8 clksrc;
134 	u8 dsi_div_a;
135 	u8 dsi_div_b;
136 };
137 
138 /**
139  * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
140  *
141  * @rcdev: Reset controller entity
142  * @dev: CPG device
143  * @base: CPG register block base address
144  * @rmw_lock: protects register accesses
145  * @clks: Array containing all Core and Module Clocks
146  * @num_core_clks: Number of Core Clocks in clks[]
147  * @num_mod_clks: Number of Module Clocks in clks[]
148  * @num_resets: Number of Module Resets in info->resets[]
149  * @last_dt_core_clk: ID of the last Core Clock exported to DT
150  * @info: Pointer to platform data
151  * @genpd: PM domain
152  * @mux_dsi_div_params: pll5 mux and dsi div parameters
153  */
154 struct rzg2l_cpg_priv {
155 	struct reset_controller_dev rcdev;
156 	struct device *dev;
157 	void __iomem *base;
158 	spinlock_t rmw_lock;
159 
160 	struct clk **clks;
161 	unsigned int num_core_clks;
162 	unsigned int num_mod_clks;
163 	unsigned int num_resets;
164 	unsigned int last_dt_core_clk;
165 
166 	const struct rzg2l_cpg_info *info;
167 
168 	struct generic_pm_domain genpd;
169 
170 	struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
171 };
172 
rzg2l_cpg_del_clk_provider(void * data)173 static void rzg2l_cpg_del_clk_provider(void *data)
174 {
175 	of_clk_del_provider(data);
176 }
177 
178 /* Must be called in atomic context. */
rzg2l_cpg_wait_clk_update_done(void __iomem * base,u32 conf)179 static int rzg2l_cpg_wait_clk_update_done(void __iomem *base, u32 conf)
180 {
181 	u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf);
182 	u32 off = GET_REG_OFFSET(conf);
183 	u32 val;
184 
185 	return readl_poll_timeout_atomic(base + off, val, !(val & bitmask), 10, 200);
186 }
187 
rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block * nb,unsigned long event,void * data)188 int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event,
189 				  void *data)
190 {
191 	struct clk_notifier_data *cnd = data;
192 	struct clk_hw *hw = __clk_get_hw(cnd->clk);
193 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
194 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
195 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
196 	u32 shift = GET_SHIFT(clk_hw_data->conf);
197 	const u32 clk_src_266 = 3;
198 	unsigned long flags;
199 	int ret;
200 
201 	if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266))
202 		return NOTIFY_DONE;
203 
204 	spin_lock_irqsave(&priv->rmw_lock, flags);
205 
206 	/*
207 	 * As per the HW manual, we should not directly switch from 533 MHz to
208 	 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
209 	 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
210 	 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
211 	 * (400 MHz)).
212 	 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
213 	 * switching register is prohibited.
214 	 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
215 	 * the index to value mapping is done by adding 1 to the index.
216 	 */
217 
218 	writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off);
219 
220 	/* Wait for the update done. */
221 	ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
222 
223 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
224 
225 	if (ret)
226 		dev_err(priv->dev, "failed to switch to safe clk source\n");
227 
228 	return notifier_from_errno(ret);
229 }
230 
rzg3s_cpg_div_clk_notifier(struct notifier_block * nb,unsigned long event,void * data)231 int rzg3s_cpg_div_clk_notifier(struct notifier_block *nb, unsigned long event,
232 			       void *data)
233 {
234 	struct clk_notifier_data *cnd = data;
235 	struct clk_hw *hw = __clk_get_hw(cnd->clk);
236 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
237 	struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
238 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
239 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
240 	u32 shift = GET_SHIFT(clk_hw_data->conf);
241 	unsigned long flags;
242 	int ret = 0;
243 	u32 val;
244 
245 	if (event != PRE_RATE_CHANGE || !div_hw_data->invalid_rate ||
246 	    div_hw_data->invalid_rate % cnd->new_rate)
247 		return NOTIFY_DONE;
248 
249 	spin_lock_irqsave(&priv->rmw_lock, flags);
250 
251 	val = readl(priv->base + off);
252 	val >>= shift;
253 	val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
254 
255 	/*
256 	 * There are different constraints for the user of this notifiers as follows:
257 	 * 1/ SD div cannot be 1 (val == 0) if parent rate is 800MHz
258 	 * 2/ OCTA / SPI div cannot be 1 (val == 0) if parent rate is 400MHz
259 	 * As SD can have only one parent having 800MHz and OCTA div can have
260 	 * only one parent having 400MHz we took into account the parent rate
261 	 * at the beginning of function (by checking invalid_rate % new_rate).
262 	 * Now it is time to check the hardware divider and update it accordingly.
263 	 */
264 	if (!val) {
265 		writel((CPG_WEN_BIT | 1) << shift, priv->base + off);
266 		/* Wait for the update done. */
267 		ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
268 	}
269 
270 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
271 
272 	if (ret)
273 		dev_err(priv->dev, "Failed to downgrade the div\n");
274 
275 	return notifier_from_errno(ret);
276 }
277 
rzg2l_register_notifier(struct clk_hw * hw,const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)278 static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core,
279 				   struct rzg2l_cpg_priv *priv)
280 {
281 	struct notifier_block *nb;
282 
283 	if (!core->notifier)
284 		return 0;
285 
286 	nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL);
287 	if (!nb)
288 		return -ENOMEM;
289 
290 	nb->notifier_call = core->notifier;
291 
292 	return clk_notifier_register(hw->clk, nb);
293 }
294 
rzg3s_div_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)295 static unsigned long rzg3s_div_clk_recalc_rate(struct clk_hw *hw,
296 					       unsigned long parent_rate)
297 {
298 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
299 	struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
300 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
301 	u32 val;
302 
303 	val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
304 	val >>= GET_SHIFT(clk_hw_data->conf);
305 	val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
306 
307 	return divider_recalc_rate(hw, parent_rate, val, div_hw_data->dtable,
308 				   CLK_DIVIDER_ROUND_CLOSEST, div_hw_data->width);
309 }
310 
rzg3s_div_clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)311 static int rzg3s_div_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
312 {
313 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
314 	struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
315 
316 	if (div_hw_data->max_rate && req->rate > div_hw_data->max_rate)
317 		req->rate = div_hw_data->max_rate;
318 
319 	return divider_determine_rate(hw, req, div_hw_data->dtable, div_hw_data->width,
320 				      CLK_DIVIDER_ROUND_CLOSEST);
321 }
322 
rzg3s_div_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)323 static int rzg3s_div_clk_set_rate(struct clk_hw *hw, unsigned long rate,
324 				  unsigned long parent_rate)
325 {
326 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
327 	struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
328 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
329 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
330 	u32 shift = GET_SHIFT(clk_hw_data->conf);
331 	unsigned long flags;
332 	u32 val;
333 	int ret;
334 
335 	val = divider_get_val(rate, parent_rate, div_hw_data->dtable, div_hw_data->width,
336 			      CLK_DIVIDER_ROUND_CLOSEST);
337 
338 	spin_lock_irqsave(&priv->rmw_lock, flags);
339 	writel((CPG_WEN_BIT | val) << shift, priv->base + off);
340 	/* Wait for the update done. */
341 	ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
342 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
343 
344 	return ret;
345 }
346 
347 static const struct clk_ops rzg3s_div_clk_ops = {
348 	.recalc_rate = rzg3s_div_clk_recalc_rate,
349 	.determine_rate = rzg3s_div_clk_determine_rate,
350 	.set_rate = rzg3s_div_clk_set_rate,
351 };
352 
353 static struct clk * __init
rzg3s_cpg_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)354 rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct rzg2l_cpg_priv *priv)
355 {
356 	struct div_hw_data *div_hw_data;
357 	struct clk_init_data init = {};
358 	const struct clk_div_table *clkt;
359 	struct clk_hw *clk_hw;
360 	const struct clk *parent;
361 	const char *parent_name;
362 	u32 max = 0;
363 	int ret;
364 
365 	parent = priv->clks[core->parent];
366 	if (IS_ERR(parent))
367 		return ERR_CAST(parent);
368 
369 	parent_name = __clk_get_name(parent);
370 
371 	div_hw_data = devm_kzalloc(priv->dev, sizeof(*div_hw_data), GFP_KERNEL);
372 	if (!div_hw_data)
373 		return ERR_PTR(-ENOMEM);
374 
375 	init.name = core->name;
376 	init.flags = core->flag;
377 	init.ops = &rzg3s_div_clk_ops;
378 	init.parent_names = &parent_name;
379 	init.num_parents = 1;
380 
381 	/* Get the maximum divider to retrieve div width. */
382 	for (clkt = core->dtable; clkt->div; clkt++) {
383 		if (max < clkt->div)
384 			max = clkt->div;
385 	}
386 
387 	div_hw_data->hw_data.priv = priv;
388 	div_hw_data->hw_data.conf = core->conf;
389 	div_hw_data->hw_data.sconf = core->sconf;
390 	div_hw_data->dtable = core->dtable;
391 	div_hw_data->invalid_rate = core->invalid_rate;
392 	div_hw_data->max_rate = core->max_rate;
393 	div_hw_data->width = fls(max) - 1;
394 
395 	clk_hw = &div_hw_data->hw_data.hw;
396 	clk_hw->init = &init;
397 
398 	ret = devm_clk_hw_register(priv->dev, clk_hw);
399 	if (ret)
400 		return ERR_PTR(ret);
401 
402 	ret = rzg2l_register_notifier(clk_hw, core, priv);
403 	if (ret) {
404 		dev_err(priv->dev, "Failed to register notifier for %s\n",
405 			core->name);
406 		return ERR_PTR(ret);
407 	}
408 
409 	return clk_hw->clk;
410 }
411 
412 static struct clk * __init
rzg2l_cpg_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)413 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
414 			   struct rzg2l_cpg_priv *priv)
415 {
416 	void __iomem *base = priv->base;
417 	struct device *dev = priv->dev;
418 	const struct clk *parent;
419 	const char *parent_name;
420 	struct clk_hw *clk_hw;
421 
422 	parent = priv->clks[core->parent];
423 	if (IS_ERR(parent))
424 		return ERR_CAST(parent);
425 
426 	parent_name = __clk_get_name(parent);
427 
428 	if (core->dtable)
429 		clk_hw = clk_hw_register_divider_table(dev, core->name,
430 						       parent_name, 0,
431 						       base + GET_REG_OFFSET(core->conf),
432 						       GET_SHIFT(core->conf),
433 						       GET_WIDTH(core->conf),
434 						       core->flag,
435 						       core->dtable,
436 						       &priv->rmw_lock);
437 	else
438 		clk_hw = clk_hw_register_divider(dev, core->name,
439 						 parent_name, 0,
440 						 base + GET_REG_OFFSET(core->conf),
441 						 GET_SHIFT(core->conf),
442 						 GET_WIDTH(core->conf),
443 						 core->flag, &priv->rmw_lock);
444 
445 	if (IS_ERR(clk_hw))
446 		return ERR_CAST(clk_hw);
447 
448 	return clk_hw->clk;
449 }
450 
451 static struct clk * __init
rzg2l_cpg_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)452 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
453 			   struct rzg2l_cpg_priv *priv)
454 {
455 	const struct clk_hw *clk_hw;
456 
457 	clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
458 					  core->parent_names, core->num_parents,
459 					  core->flag,
460 					  priv->base + GET_REG_OFFSET(core->conf),
461 					  GET_SHIFT(core->conf),
462 					  GET_WIDTH(core->conf),
463 					  core->mux_flags, &priv->rmw_lock);
464 	if (IS_ERR(clk_hw))
465 		return ERR_CAST(clk_hw);
466 
467 	return clk_hw->clk;
468 }
469 
rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw * hw,u8 index)470 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
471 {
472 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
473 	struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
474 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
475 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
476 	u32 shift = GET_SHIFT(clk_hw_data->conf);
477 	unsigned long flags;
478 	u32 val;
479 	int ret;
480 
481 	val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index);
482 
483 	spin_lock_irqsave(&priv->rmw_lock, flags);
484 
485 	writel((CPG_WEN_BIT | val) << shift, priv->base + off);
486 
487 	/* Wait for the update done. */
488 	ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
489 
490 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
491 
492 	if (ret)
493 		dev_err(priv->dev, "Failed to switch parent\n");
494 
495 	return ret;
496 }
497 
rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw * hw)498 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
499 {
500 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
501 	struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
502 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
503 	u32 val;
504 
505 	val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
506 	val >>= GET_SHIFT(clk_hw_data->conf);
507 	val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
508 
509 	return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val);
510 }
511 
512 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
513 	.determine_rate = __clk_mux_determine_rate_closest,
514 	.set_parent	= rzg2l_cpg_sd_clk_mux_set_parent,
515 	.get_parent	= rzg2l_cpg_sd_clk_mux_get_parent,
516 };
517 
518 static struct clk * __init
rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)519 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
520 			      struct rzg2l_cpg_priv *priv)
521 {
522 	struct sd_mux_hw_data *sd_mux_hw_data;
523 	struct clk_init_data init;
524 	struct clk_hw *clk_hw;
525 	int ret;
526 
527 	sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL);
528 	if (!sd_mux_hw_data)
529 		return ERR_PTR(-ENOMEM);
530 
531 	sd_mux_hw_data->hw_data.priv = priv;
532 	sd_mux_hw_data->hw_data.conf = core->conf;
533 	sd_mux_hw_data->hw_data.sconf = core->sconf;
534 	sd_mux_hw_data->mtable = core->mtable;
535 
536 	init.name = core->name;
537 	init.ops = &rzg2l_cpg_sd_clk_mux_ops;
538 	init.flags = core->flag;
539 	init.num_parents = core->num_parents;
540 	init.parent_names = core->parent_names;
541 
542 	clk_hw = &sd_mux_hw_data->hw_data.hw;
543 	clk_hw->init = &init;
544 
545 	ret = devm_clk_hw_register(priv->dev, clk_hw);
546 	if (ret)
547 		return ERR_PTR(ret);
548 
549 	ret = rzg2l_register_notifier(clk_hw, core, priv);
550 	if (ret) {
551 		dev_err(priv->dev, "Failed to register notifier for %s\n",
552 			core->name);
553 		return ERR_PTR(ret);
554 	}
555 
556 	return clk_hw->clk;
557 }
558 
559 static unsigned long
rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param * params,unsigned long rate)560 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
561 			       unsigned long rate)
562 {
563 	unsigned long foutpostdiv_rate, foutvco_rate;
564 
565 	params->pl5_intin = rate / MEGA;
566 	params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
567 	params->pl5_refdiv = 2;
568 	params->pl5_postdiv1 = 1;
569 	params->pl5_postdiv2 = 1;
570 	params->pl5_spread = 0x16;
571 
572 	foutvco_rate = div_u64(mul_u32_u32(EXTAL_FREQ_IN_MEGA_HZ * MEGA,
573 					   (params->pl5_intin << 24) + params->pl5_fracin),
574 			       params->pl5_refdiv) >> 24;
575 	foutpostdiv_rate = DIV_ROUND_CLOSEST_ULL(foutvco_rate,
576 						 params->pl5_postdiv1 * params->pl5_postdiv2);
577 
578 	return foutpostdiv_rate;
579 }
580 
581 struct dsi_div_hw_data {
582 	struct clk_hw hw;
583 	u32 conf;
584 	unsigned long rate;
585 	struct rzg2l_cpg_priv *priv;
586 };
587 
588 #define to_dsi_div_hw_data(_hw)	container_of(_hw, struct dsi_div_hw_data, hw)
589 
rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)590 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
591 						   unsigned long parent_rate)
592 {
593 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
594 	unsigned long rate = dsi_div->rate;
595 
596 	if (!rate)
597 		rate = parent_rate;
598 
599 	return rate;
600 }
601 
rzg2l_cpg_get_vclk_parent_rate(struct clk_hw * hw,unsigned long rate)602 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
603 						    unsigned long rate)
604 {
605 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
606 	struct rzg2l_cpg_priv *priv = dsi_div->priv;
607 	struct rzg2l_pll5_param params;
608 	unsigned long parent_rate;
609 
610 	parent_rate = rzg2l_cpg_get_foutpostdiv_rate(&params, rate);
611 
612 	if (priv->mux_dsi_div_params.clksrc)
613 		parent_rate /= 2;
614 
615 	return parent_rate;
616 }
617 
rzg2l_cpg_dsi_div_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)618 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
619 					    struct clk_rate_request *req)
620 {
621 	if (req->rate > MAX_VCLK_FREQ)
622 		req->rate = MAX_VCLK_FREQ;
623 
624 	req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
625 
626 	return 0;
627 }
628 
rzg2l_cpg_dsi_div_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)629 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
630 				      unsigned long rate,
631 				      unsigned long parent_rate)
632 {
633 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
634 	struct rzg2l_cpg_priv *priv = dsi_div->priv;
635 
636 	/*
637 	 * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
638 	 *
639 	 * Based on the dot clock, the DSI divider clock sets the divider value,
640 	 * calculates the pll parameters for generating FOUTPOSTDIV and the clk
641 	 * source for the MUX and propagates that info to the parents.
642 	 */
643 
644 	if (!rate || rate > MAX_VCLK_FREQ)
645 		return -EINVAL;
646 
647 	dsi_div->rate = rate;
648 	writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
649 	       (priv->mux_dsi_div_params.dsi_div_a << 0) |
650 	       (priv->mux_dsi_div_params.dsi_div_b << 8),
651 	       priv->base + CPG_PL5_SDIV);
652 
653 	return 0;
654 }
655 
656 static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
657 	.recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
658 	.determine_rate = rzg2l_cpg_dsi_div_determine_rate,
659 	.set_rate = rzg2l_cpg_dsi_div_set_rate,
660 };
661 
662 static struct clk * __init
rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)663 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
664 			       struct rzg2l_cpg_priv *priv)
665 {
666 	struct dsi_div_hw_data *clk_hw_data;
667 	const struct clk *parent;
668 	const char *parent_name;
669 	struct clk_init_data init;
670 	struct clk_hw *clk_hw;
671 	int ret;
672 
673 	parent = priv->clks[core->parent];
674 	if (IS_ERR(parent))
675 		return ERR_CAST(parent);
676 
677 	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
678 	if (!clk_hw_data)
679 		return ERR_PTR(-ENOMEM);
680 
681 	clk_hw_data->priv = priv;
682 
683 	parent_name = __clk_get_name(parent);
684 	init.name = core->name;
685 	init.ops = &rzg2l_cpg_dsi_div_ops;
686 	init.flags = CLK_SET_RATE_PARENT;
687 	init.parent_names = &parent_name;
688 	init.num_parents = 1;
689 
690 	clk_hw = &clk_hw_data->hw;
691 	clk_hw->init = &init;
692 
693 	ret = devm_clk_hw_register(priv->dev, clk_hw);
694 	if (ret)
695 		return ERR_PTR(ret);
696 
697 	return clk_hw->clk;
698 }
699 
700 struct pll5_mux_hw_data {
701 	struct clk_hw hw;
702 	u32 conf;
703 	unsigned long rate;
704 	struct rzg2l_cpg_priv *priv;
705 };
706 
707 #define to_pll5_mux_hw_data(_hw)	container_of(_hw, struct pll5_mux_hw_data, hw)
708 
rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)709 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
710 						   struct clk_rate_request *req)
711 {
712 	struct clk_hw *parent;
713 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
714 	struct rzg2l_cpg_priv *priv = hwdata->priv;
715 
716 	parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
717 	req->best_parent_hw = parent;
718 	req->best_parent_rate = req->rate;
719 
720 	return 0;
721 }
722 
rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw * hw,u8 index)723 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
724 {
725 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
726 	struct rzg2l_cpg_priv *priv = hwdata->priv;
727 
728 	/*
729 	 * FOUTPOSTDIV--->|
730 	 *  |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
731 	 *  |--FOUT1PH0-->|
732 	 *
733 	 * Based on the dot clock, the DSI divider clock calculates the parent
734 	 * rate and clk source for the MUX. It propagates that info to
735 	 * pll5_4_clk_mux which sets the clock source for DSI divider clock.
736 	 */
737 
738 	writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
739 	       priv->base + CPG_OTHERFUNC1_REG);
740 
741 	return 0;
742 }
743 
rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw * hw)744 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
745 {
746 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
747 	struct rzg2l_cpg_priv *priv = hwdata->priv;
748 
749 	return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
750 }
751 
752 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
753 	.determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
754 	.set_parent	= rzg2l_cpg_pll5_4_clk_mux_set_parent,
755 	.get_parent	= rzg2l_cpg_pll5_4_clk_mux_get_parent,
756 };
757 
758 static struct clk * __init
rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)759 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
760 				  struct rzg2l_cpg_priv *priv)
761 {
762 	struct pll5_mux_hw_data *clk_hw_data;
763 	struct clk_init_data init;
764 	struct clk_hw *clk_hw;
765 	int ret;
766 
767 	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
768 	if (!clk_hw_data)
769 		return ERR_PTR(-ENOMEM);
770 
771 	clk_hw_data->priv = priv;
772 	clk_hw_data->conf = core->conf;
773 
774 	init.name = core->name;
775 	init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
776 	init.flags = CLK_SET_RATE_PARENT;
777 	init.num_parents = core->num_parents;
778 	init.parent_names = core->parent_names;
779 
780 	clk_hw = &clk_hw_data->hw;
781 	clk_hw->init = &init;
782 
783 	ret = devm_clk_hw_register(priv->dev, clk_hw);
784 	if (ret)
785 		return ERR_PTR(ret);
786 
787 	return clk_hw->clk;
788 }
789 
790 struct sipll5 {
791 	struct clk_hw hw;
792 	u32 conf;
793 	unsigned long foutpostdiv_rate;
794 	struct rzg2l_cpg_priv *priv;
795 };
796 
797 #define to_sipll5(_hw)	container_of(_hw, struct sipll5, hw)
798 
rzg2l_cpg_get_vclk_rate(struct clk_hw * hw,unsigned long rate)799 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
800 					     unsigned long rate)
801 {
802 	struct sipll5 *sipll5 = to_sipll5(hw);
803 	struct rzg2l_cpg_priv *priv = sipll5->priv;
804 	unsigned long vclk;
805 
806 	vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
807 		       (priv->mux_dsi_div_params.dsi_div_b + 1));
808 
809 	if (priv->mux_dsi_div_params.clksrc)
810 		vclk /= 2;
811 
812 	return vclk;
813 }
814 
rzg2l_cpg_sipll5_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)815 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
816 						  unsigned long parent_rate)
817 {
818 	struct sipll5 *sipll5 = to_sipll5(hw);
819 	unsigned long pll5_rate = sipll5->foutpostdiv_rate;
820 
821 	if (!pll5_rate)
822 		pll5_rate = parent_rate;
823 
824 	return pll5_rate;
825 }
826 
rzg2l_cpg_sipll5_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)827 static int rzg2l_cpg_sipll5_determine_rate(struct clk_hw *hw,
828 					   struct clk_rate_request *req)
829 {
830 	return 0;
831 }
832 
rzg2l_cpg_sipll5_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)833 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
834 				     unsigned long rate,
835 				     unsigned long parent_rate)
836 {
837 	struct sipll5 *sipll5 = to_sipll5(hw);
838 	struct rzg2l_cpg_priv *priv = sipll5->priv;
839 	struct rzg2l_pll5_param params;
840 	unsigned long vclk_rate;
841 	int ret;
842 	u32 val;
843 
844 	/*
845 	 *  OSC --> PLL5 --> FOUTPOSTDIV-->|
846 	 *                   |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
847 	 *                   |--FOUT1PH0-->|
848 	 *
849 	 * Based on the dot clock, the DSI divider clock calculates the parent
850 	 * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
851 	 * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
852 	 *
853 	 * OSC --> PLL5 --> FOUTPOSTDIV
854 	 */
855 
856 	if (!rate)
857 		return -EINVAL;
858 
859 	vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
860 	sipll5->foutpostdiv_rate =
861 		rzg2l_cpg_get_foutpostdiv_rate(&params, vclk_rate);
862 
863 	/* Put PLL5 into standby mode */
864 	writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
865 	ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
866 				 !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
867 	if (ret) {
868 		dev_err(priv->dev, "failed to release pll5 lock");
869 		return ret;
870 	}
871 
872 	/* Output clock setting 1 */
873 	writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
874 	       (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
875 
876 	/* Output clock setting, SSCG modulation value setting 3 */
877 	writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
878 
879 	/* Output clock setting 4 */
880 	writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
881 	       priv->base + CPG_SIPLL5_CLK4);
882 
883 	/* Output clock setting 5 */
884 	writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
885 
886 	/* PLL normal mode setting */
887 	writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
888 	       CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
889 	       priv->base + CPG_SIPLL5_STBY);
890 
891 	/* PLL normal mode transition, output clock stability check */
892 	ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
893 				 (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
894 	if (ret) {
895 		dev_err(priv->dev, "failed to lock pll5");
896 		return ret;
897 	}
898 
899 	return 0;
900 }
901 
902 static const struct clk_ops rzg2l_cpg_sipll5_ops = {
903 	.recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
904 	.determine_rate = rzg2l_cpg_sipll5_determine_rate,
905 	.set_rate = rzg2l_cpg_sipll5_set_rate,
906 };
907 
908 static struct clk * __init
rzg2l_cpg_sipll5_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)909 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
910 			  struct rzg2l_cpg_priv *priv)
911 {
912 	const struct clk *parent;
913 	struct clk_init_data init;
914 	const char *parent_name;
915 	struct sipll5 *sipll5;
916 	struct clk_hw *clk_hw;
917 	int ret;
918 
919 	parent = priv->clks[core->parent];
920 	if (IS_ERR(parent))
921 		return ERR_CAST(parent);
922 
923 	sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
924 	if (!sipll5)
925 		return ERR_PTR(-ENOMEM);
926 
927 	init.name = core->name;
928 	parent_name = __clk_get_name(parent);
929 	init.ops = &rzg2l_cpg_sipll5_ops;
930 	init.flags = 0;
931 	init.parent_names = &parent_name;
932 	init.num_parents = 1;
933 
934 	sipll5->hw.init = &init;
935 	sipll5->conf = core->conf;
936 	sipll5->priv = priv;
937 
938 	writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
939 	       CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
940 
941 	clk_hw = &sipll5->hw;
942 	clk_hw->init = &init;
943 
944 	ret = devm_clk_hw_register(priv->dev, clk_hw);
945 	if (ret)
946 		return ERR_PTR(ret);
947 
948 	priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
949 	priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
950 	priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
951 
952 	return clk_hw->clk;
953 }
954 
955 struct pll_clk {
956 	struct clk_hw hw;
957 	unsigned long default_rate;
958 	unsigned int conf;
959 	unsigned int type;
960 	void __iomem *base;
961 	struct rzg2l_cpg_priv *priv;
962 };
963 
964 #define to_pll(_hw)	container_of(_hw, struct pll_clk, hw)
965 
rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)966 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
967 						   unsigned long parent_rate)
968 {
969 	struct pll_clk *pll_clk = to_pll(hw);
970 	struct rzg2l_cpg_priv *priv = pll_clk->priv;
971 	unsigned int val1, val2;
972 	u64 rate;
973 
974 	if (pll_clk->type != CLK_TYPE_SAM_PLL)
975 		return parent_rate;
976 
977 	val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
978 	val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
979 
980 	rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
981 			       16 + SDIV(val2));
982 
983 	return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
984 }
985 
986 static const struct clk_ops rzg2l_cpg_pll_ops = {
987 	.recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
988 };
989 
rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)990 static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
991 						   unsigned long parent_rate)
992 {
993 	struct pll_clk *pll_clk = to_pll(hw);
994 	struct rzg2l_cpg_priv *priv = pll_clk->priv;
995 	u32 nir, nfr, mr, pr, val, setting;
996 	u64 rate;
997 
998 	if (pll_clk->type != CLK_TYPE_G3S_PLL)
999 		return parent_rate;
1000 
1001 	setting = GET_REG_SAMPLL_SETTING(pll_clk->conf);
1002 	if (setting) {
1003 		val = readl(priv->base + setting);
1004 		if (val & RZG3S_SEL_PLL)
1005 			return pll_clk->default_rate;
1006 	}
1007 
1008 	val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
1009 
1010 	pr = 1 << FIELD_GET(RZG3S_DIV_P, val);
1011 	/* Hardware interprets values higher than 8 as p = 16. */
1012 	if (pr > 8)
1013 		pr = 16;
1014 
1015 	mr  = FIELD_GET(RZG3S_DIV_M, val) + 1;
1016 	nir = FIELD_GET(RZG3S_DIV_NI, val) + 1;
1017 	nfr = FIELD_GET(RZG3S_DIV_NF, val);
1018 
1019 	rate = mul_u64_u32_shr(parent_rate, 4096 * nir + nfr, 12);
1020 
1021 	return DIV_ROUND_CLOSEST_ULL(rate, (mr * pr));
1022 }
1023 
1024 static const struct clk_ops rzg3s_cpg_pll_ops = {
1025 	.recalc_rate = rzg3s_cpg_pll_clk_recalc_rate,
1026 };
1027 
1028 static struct clk * __init
rzg2l_cpg_pll_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv,const struct clk_ops * ops)1029 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
1030 			   struct rzg2l_cpg_priv *priv,
1031 			   const struct clk_ops *ops)
1032 {
1033 	struct device *dev = priv->dev;
1034 	const struct clk *parent;
1035 	struct clk_init_data init;
1036 	const char *parent_name;
1037 	struct pll_clk *pll_clk;
1038 	int ret;
1039 
1040 	parent = priv->clks[core->parent];
1041 	if (IS_ERR(parent))
1042 		return ERR_CAST(parent);
1043 
1044 	pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
1045 	if (!pll_clk)
1046 		return ERR_PTR(-ENOMEM);
1047 
1048 	parent_name = __clk_get_name(parent);
1049 	init.name = core->name;
1050 	init.ops = ops;
1051 	init.flags = 0;
1052 	init.parent_names = &parent_name;
1053 	init.num_parents = 1;
1054 
1055 	pll_clk->hw.init = &init;
1056 	pll_clk->conf = core->conf;
1057 	pll_clk->base = priv->base;
1058 	pll_clk->priv = priv;
1059 	pll_clk->type = core->type;
1060 	pll_clk->default_rate = core->default_rate;
1061 
1062 	ret = devm_clk_hw_register(dev, &pll_clk->hw);
1063 	if (ret)
1064 		return ERR_PTR(ret);
1065 
1066 	return pll_clk->hw.clk;
1067 }
1068 
1069 static struct clk
rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args * clkspec,void * data)1070 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
1071 			       void *data)
1072 {
1073 	unsigned int clkidx = clkspec->args[1];
1074 	struct rzg2l_cpg_priv *priv = data;
1075 	struct device *dev = priv->dev;
1076 	const char *type;
1077 	struct clk *clk;
1078 
1079 	switch (clkspec->args[0]) {
1080 	case CPG_CORE:
1081 		type = "core";
1082 		if (clkidx > priv->last_dt_core_clk) {
1083 			dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
1084 			return ERR_PTR(-EINVAL);
1085 		}
1086 		clk = priv->clks[clkidx];
1087 		break;
1088 
1089 	case CPG_MOD:
1090 		type = "module";
1091 		if (clkidx >= priv->num_mod_clks) {
1092 			dev_err(dev, "Invalid %s clock index %u\n", type,
1093 				clkidx);
1094 			return ERR_PTR(-EINVAL);
1095 		}
1096 		clk = priv->clks[priv->num_core_clks + clkidx];
1097 		break;
1098 
1099 	default:
1100 		dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
1101 		return ERR_PTR(-EINVAL);
1102 	}
1103 
1104 	if (IS_ERR(clk))
1105 		dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
1106 			PTR_ERR(clk));
1107 	else
1108 		dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
1109 			clkspec->args[0], clkspec->args[1], clk,
1110 			clk_get_rate(clk));
1111 	return clk;
1112 }
1113 
1114 static void __init
rzg2l_cpg_register_core_clk(const struct cpg_core_clk * core,const struct rzg2l_cpg_info * info,struct rzg2l_cpg_priv * priv)1115 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
1116 			    const struct rzg2l_cpg_info *info,
1117 			    struct rzg2l_cpg_priv *priv)
1118 {
1119 	struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
1120 	struct device *dev = priv->dev;
1121 	unsigned int id = core->id, div = core->div;
1122 	const char *parent_name;
1123 	struct clk_hw *clk_hw;
1124 
1125 	WARN_DEBUG(id >= priv->num_core_clks);
1126 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1127 
1128 	switch (core->type) {
1129 	case CLK_TYPE_IN:
1130 		clk = of_clk_get_by_name(priv->dev->of_node, core->name);
1131 		break;
1132 	case CLK_TYPE_FF:
1133 		WARN_DEBUG(core->parent >= priv->num_core_clks);
1134 		parent = priv->clks[core->parent];
1135 		if (IS_ERR(parent)) {
1136 			clk = parent;
1137 			goto fail;
1138 		}
1139 
1140 		parent_name = __clk_get_name(parent);
1141 		clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name, parent_name,
1142 							   CLK_SET_RATE_PARENT,
1143 							   core->mult, div);
1144 		if (IS_ERR(clk_hw))
1145 			clk = ERR_CAST(clk_hw);
1146 		else
1147 			clk = clk_hw->clk;
1148 		break;
1149 	case CLK_TYPE_SAM_PLL:
1150 		clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg2l_cpg_pll_ops);
1151 		break;
1152 	case CLK_TYPE_G3S_PLL:
1153 		clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg3s_cpg_pll_ops);
1154 		break;
1155 	case CLK_TYPE_SIPLL5:
1156 		clk = rzg2l_cpg_sipll5_register(core, priv);
1157 		break;
1158 	case CLK_TYPE_DIV:
1159 		clk = rzg2l_cpg_div_clk_register(core, priv);
1160 		break;
1161 	case CLK_TYPE_G3S_DIV:
1162 		clk = rzg3s_cpg_div_clk_register(core, priv);
1163 		break;
1164 	case CLK_TYPE_MUX:
1165 		clk = rzg2l_cpg_mux_clk_register(core, priv);
1166 		break;
1167 	case CLK_TYPE_SD_MUX:
1168 		clk = rzg2l_cpg_sd_mux_clk_register(core, priv);
1169 		break;
1170 	case CLK_TYPE_PLL5_4_MUX:
1171 		clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
1172 		break;
1173 	case CLK_TYPE_DSI_DIV:
1174 		clk = rzg2l_cpg_dsi_div_clk_register(core, priv);
1175 		break;
1176 	default:
1177 		goto fail;
1178 	}
1179 
1180 	if (IS_ERR_OR_NULL(clk))
1181 		goto fail;
1182 
1183 	dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1184 	priv->clks[id] = clk;
1185 	return;
1186 
1187 fail:
1188 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
1189 		core->name, PTR_ERR(clk));
1190 }
1191 
1192 /**
1193  * struct mstop - MSTOP specific data structure
1194  * @usecnt: Usage counter for MSTOP settings (when zero the settings
1195  *          are applied to register)
1196  * @conf: MSTOP configuration (register offset, setup bits)
1197  */
1198 struct mstop {
1199 	atomic_t usecnt;
1200 	u32 conf;
1201 };
1202 
1203 /**
1204  * struct mod_clock - Module clock
1205  *
1206  * @hw: handle between common and hardware-specific interfaces
1207  * @priv: CPG/MSTP private data
1208  * @sibling: pointer to the other coupled clock
1209  * @mstop: MSTOP configuration
1210  * @shared_mstop_clks: clocks sharing the MSTOP with this clock
1211  * @off: register offset
1212  * @bit: ON/MON bit
1213  * @num_shared_mstop_clks: number of the clocks sharing MSTOP with this clock
1214  * @enabled: soft state of the clock, if it is coupled with another clock
1215  */
1216 struct mod_clock {
1217 	struct clk_hw hw;
1218 	struct rzg2l_cpg_priv *priv;
1219 	struct mod_clock *sibling;
1220 	struct mstop *mstop;
1221 	struct mod_clock **shared_mstop_clks;
1222 	u16 off;
1223 	u8 bit;
1224 	u8 num_shared_mstop_clks;
1225 	bool enabled;
1226 };
1227 
1228 #define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw)
1229 
1230 #define for_each_mod_clock(mod_clock, hw, priv) \
1231 	for (unsigned int i = 0; (priv) && i < (priv)->num_mod_clks; i++) \
1232 		if ((priv)->clks[(priv)->num_core_clks + i] == ERR_PTR(-ENOENT)) \
1233 			continue; \
1234 		else if (((hw) = __clk_get_hw((priv)->clks[(priv)->num_core_clks + i])) && \
1235 			 ((mod_clock) = to_mod_clock(hw)))
1236 
1237 /* Need to be called with a lock held to avoid concurrent access to mstop->usecnt. */
rzg2l_mod_clock_module_set_state(struct mod_clock * clock,bool standby)1238 static void rzg2l_mod_clock_module_set_state(struct mod_clock *clock,
1239 					     bool standby)
1240 {
1241 	struct rzg2l_cpg_priv *priv = clock->priv;
1242 	struct mstop *mstop = clock->mstop;
1243 	bool update = false;
1244 	u32 value;
1245 
1246 	if (!mstop)
1247 		return;
1248 
1249 	value = MSTOP_MASK(mstop->conf) << 16;
1250 
1251 	if (standby) {
1252 		unsigned int criticals = 0;
1253 
1254 		for (unsigned int i = 0; i < clock->num_shared_mstop_clks; i++) {
1255 			struct mod_clock *clk = clock->shared_mstop_clks[i];
1256 
1257 			if (clk_hw_get_flags(&clk->hw) & CLK_IS_CRITICAL)
1258 				criticals++;
1259 		}
1260 
1261 		if (!clock->num_shared_mstop_clks &&
1262 		    clk_hw_get_flags(&clock->hw) & CLK_IS_CRITICAL)
1263 			criticals++;
1264 
1265 		/*
1266 		 * If this is a shared MSTOP and it is shared with critical clocks,
1267 		 * and the system boots up with this clock enabled but no driver
1268 		 * uses it the CCF will disable it (as it is unused). As we don't
1269 		 * increment reference counter for it at registration (to avoid
1270 		 * messing with clocks enabled at probe but later used by drivers)
1271 		 * do not set the MSTOP here too if it is shared with critical
1272 		 * clocks and ref counted only by those critical clocks.
1273 		 */
1274 		if (criticals && criticals == atomic_read(&mstop->usecnt))
1275 			return;
1276 
1277 		value |= MSTOP_MASK(mstop->conf);
1278 
1279 		/* Allow updates on probe when usecnt = 0. */
1280 		if (!atomic_read(&mstop->usecnt))
1281 			update = true;
1282 		else
1283 			update = atomic_dec_and_test(&mstop->usecnt);
1284 	} else {
1285 		if (!atomic_read(&mstop->usecnt))
1286 			update = true;
1287 		atomic_inc(&mstop->usecnt);
1288 	}
1289 
1290 	if (update)
1291 		writel(value, priv->base + MSTOP_OFF(mstop->conf));
1292 }
1293 
rzg2l_mod_clock_mstop_show(struct seq_file * s,void * what)1294 static int rzg2l_mod_clock_mstop_show(struct seq_file *s, void *what)
1295 {
1296 	struct rzg2l_cpg_priv *priv = s->private;
1297 	struct mod_clock *clk;
1298 	struct clk_hw *hw;
1299 
1300 	seq_printf(s, "%-20s %-5s %-10s\n", "", "", "MSTOP");
1301 	seq_printf(s, "%-20s %-5s %-10s\n", "", "clk", "-------------------------");
1302 	seq_printf(s, "%-20s %-5s %-5s %-5s %-6s %-6s\n",
1303 		   "clk_name", "cnt", "cnt", "off", "val", "shared");
1304 	seq_printf(s, "%-20s %-5s %-5s %-5s %-6s %-6s\n",
1305 		   "--------", "-----", "-----", "-----", "------", "------");
1306 
1307 	for_each_mod_clock(clk, hw, priv) {
1308 		u32 val;
1309 
1310 		if (!clk->mstop)
1311 			continue;
1312 
1313 		val = readl(priv->base + MSTOP_OFF(clk->mstop->conf)) &
1314 		      MSTOP_MASK(clk->mstop->conf);
1315 
1316 		seq_printf(s, "%-20s %-5d %-5d 0x%-3lx 0x%-4x", clk_hw_get_name(hw),
1317 			   __clk_get_enable_count(hw->clk), atomic_read(&clk->mstop->usecnt),
1318 			   MSTOP_OFF(clk->mstop->conf), val);
1319 
1320 		for (unsigned int i = 0; i < clk->num_shared_mstop_clks; i++)
1321 			seq_printf(s, " %pC", clk->shared_mstop_clks[i]->hw.clk);
1322 
1323 		seq_puts(s, "\n");
1324 	}
1325 
1326 	return 0;
1327 }
1328 DEFINE_SHOW_ATTRIBUTE(rzg2l_mod_clock_mstop);
1329 
rzg2l_mod_clock_endisable(struct clk_hw * hw,bool enable)1330 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
1331 {
1332 	struct mod_clock *clock = to_mod_clock(hw);
1333 	struct rzg2l_cpg_priv *priv = clock->priv;
1334 	unsigned int reg = clock->off;
1335 	struct device *dev = priv->dev;
1336 	u32 bitmask = BIT(clock->bit);
1337 	u32 value;
1338 	int error;
1339 
1340 	if (!clock->off) {
1341 		dev_dbg(dev, "%pC does not support ON/OFF\n",  hw->clk);
1342 		return 0;
1343 	}
1344 
1345 	dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk,
1346 		str_on_off(enable));
1347 
1348 	value = bitmask << 16;
1349 	if (enable)
1350 		value |= bitmask;
1351 
1352 	scoped_guard(spinlock_irqsave, &priv->rmw_lock) {
1353 		if (enable) {
1354 			writel(value, priv->base + CLK_ON_R(reg));
1355 			rzg2l_mod_clock_module_set_state(clock, false);
1356 		} else {
1357 			rzg2l_mod_clock_module_set_state(clock, true);
1358 			writel(value, priv->base + CLK_ON_R(reg));
1359 		}
1360 	}
1361 
1362 	if (!enable)
1363 		return 0;
1364 
1365 	if (!priv->info->has_clk_mon_regs)
1366 		return 0;
1367 
1368 	error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
1369 					  value & bitmask, 0, 10);
1370 	if (error)
1371 		dev_err(dev, "Failed to enable CLK_ON 0x%x/%pC\n",
1372 			CLK_ON_R(reg), hw->clk);
1373 
1374 	return error;
1375 }
1376 
rzg2l_mod_clock_enable(struct clk_hw * hw)1377 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
1378 {
1379 	struct mod_clock *clock = to_mod_clock(hw);
1380 
1381 	if (clock->sibling) {
1382 		struct rzg2l_cpg_priv *priv = clock->priv;
1383 		unsigned long flags;
1384 		bool enabled;
1385 
1386 		spin_lock_irqsave(&priv->rmw_lock, flags);
1387 		enabled = clock->sibling->enabled;
1388 		clock->enabled = true;
1389 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
1390 		if (enabled)
1391 			return 0;
1392 	}
1393 
1394 	return rzg2l_mod_clock_endisable(hw, true);
1395 }
1396 
rzg2l_mod_clock_disable(struct clk_hw * hw)1397 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
1398 {
1399 	struct mod_clock *clock = to_mod_clock(hw);
1400 
1401 	if (clock->sibling) {
1402 		struct rzg2l_cpg_priv *priv = clock->priv;
1403 		unsigned long flags;
1404 		bool enabled;
1405 
1406 		spin_lock_irqsave(&priv->rmw_lock, flags);
1407 		enabled = clock->sibling->enabled;
1408 		clock->enabled = false;
1409 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
1410 		if (enabled)
1411 			return;
1412 	}
1413 
1414 	rzg2l_mod_clock_endisable(hw, false);
1415 }
1416 
rzg2l_mod_clock_is_enabled(struct clk_hw * hw)1417 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
1418 {
1419 	struct mod_clock *clock = to_mod_clock(hw);
1420 	struct rzg2l_cpg_priv *priv = clock->priv;
1421 	u32 bitmask = BIT(clock->bit);
1422 	u32 value;
1423 
1424 	if (!clock->off) {
1425 		dev_dbg(priv->dev, "%pC does not support ON/OFF\n",  hw->clk);
1426 		return 1;
1427 	}
1428 
1429 	if (clock->sibling)
1430 		return clock->enabled;
1431 
1432 	if (priv->info->has_clk_mon_regs)
1433 		value = readl(priv->base + CLK_MON_R(clock->off));
1434 	else
1435 		value = readl(priv->base + clock->off);
1436 
1437 	return value & bitmask;
1438 }
1439 
1440 static const struct clk_ops rzg2l_mod_clock_ops = {
1441 	.enable = rzg2l_mod_clock_enable,
1442 	.disable = rzg2l_mod_clock_disable,
1443 	.is_enabled = rzg2l_mod_clock_is_enabled,
1444 };
1445 
1446 static struct mod_clock
rzg2l_mod_clock_get_sibling(struct mod_clock * clock,struct rzg2l_cpg_priv * priv)1447 *rzg2l_mod_clock_get_sibling(struct mod_clock *clock,
1448 			     struct rzg2l_cpg_priv *priv)
1449 {
1450 	struct mod_clock *clk;
1451 	struct clk_hw *hw;
1452 
1453 	for_each_mod_clock(clk, hw, priv) {
1454 		if (clock->off == clk->off && clock->bit == clk->bit)
1455 			return clk;
1456 	}
1457 
1458 	return NULL;
1459 }
1460 
rzg2l_mod_clock_get_mstop(struct rzg2l_cpg_priv * priv,u32 conf)1461 static struct mstop *rzg2l_mod_clock_get_mstop(struct rzg2l_cpg_priv *priv, u32 conf)
1462 {
1463 	struct mod_clock *clk;
1464 	struct clk_hw *hw;
1465 
1466 	for_each_mod_clock(clk, hw, priv) {
1467 		if (!clk->mstop)
1468 			continue;
1469 
1470 		if (clk->mstop->conf == conf)
1471 			return clk->mstop;
1472 	}
1473 
1474 	return NULL;
1475 }
1476 
rzg2l_mod_clock_init_mstop(struct rzg2l_cpg_priv * priv)1477 static void rzg2l_mod_clock_init_mstop(struct rzg2l_cpg_priv *priv)
1478 {
1479 	struct mod_clock *clk;
1480 	struct clk_hw *hw;
1481 
1482 	for_each_mod_clock(clk, hw, priv) {
1483 		if (!clk->mstop)
1484 			continue;
1485 
1486 		/*
1487 		 * Out of reset all modules are enabled. Set module state
1488 		 * in case associated clocks are disabled at probe. Otherwise
1489 		 * module is in invalid HW state.
1490 		 */
1491 		scoped_guard(spinlock_irqsave, &priv->rmw_lock) {
1492 			if (!rzg2l_mod_clock_is_enabled(&clk->hw))
1493 				rzg2l_mod_clock_module_set_state(clk, true);
1494 		}
1495 	}
1496 }
1497 
rzg2l_mod_clock_update_shared_mstop_clks(struct rzg2l_cpg_priv * priv,struct mod_clock * clock)1498 static int rzg2l_mod_clock_update_shared_mstop_clks(struct rzg2l_cpg_priv *priv,
1499 						    struct mod_clock *clock)
1500 {
1501 	struct mod_clock *clk;
1502 	struct clk_hw *hw;
1503 
1504 	if (!clock->mstop)
1505 		return 0;
1506 
1507 	for_each_mod_clock(clk, hw, priv) {
1508 		int num_shared_mstop_clks, incr = 1;
1509 		struct mod_clock **new_clks;
1510 
1511 		if (clk->mstop != clock->mstop)
1512 			continue;
1513 
1514 		num_shared_mstop_clks = clk->num_shared_mstop_clks;
1515 		if (!num_shared_mstop_clks)
1516 			incr++;
1517 
1518 		new_clks = devm_krealloc(priv->dev, clk->shared_mstop_clks,
1519 					 (num_shared_mstop_clks + incr) * sizeof(*new_clks),
1520 					 GFP_KERNEL);
1521 		if (!new_clks)
1522 			return -ENOMEM;
1523 
1524 		if (!num_shared_mstop_clks)
1525 			new_clks[num_shared_mstop_clks++] = clk;
1526 		new_clks[num_shared_mstop_clks++] = clock;
1527 
1528 		for (unsigned int i = 0; i < num_shared_mstop_clks; i++) {
1529 			new_clks[i]->shared_mstop_clks = new_clks;
1530 			new_clks[i]->num_shared_mstop_clks = num_shared_mstop_clks;
1531 		}
1532 		break;
1533 	}
1534 
1535 	return 0;
1536 }
1537 
1538 static void __init
rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk * mod,const struct rzg2l_cpg_info * info,struct rzg2l_cpg_priv * priv)1539 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1540 			   const struct rzg2l_cpg_info *info,
1541 			   struct rzg2l_cpg_priv *priv)
1542 {
1543 	struct mod_clock *clock = NULL;
1544 	struct device *dev = priv->dev;
1545 	unsigned int id = mod->id;
1546 	struct clk_init_data init;
1547 	struct clk *parent, *clk;
1548 	const char *parent_name;
1549 	unsigned int i;
1550 	int ret;
1551 
1552 	WARN_DEBUG(id < priv->num_core_clks);
1553 	WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1554 	WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1555 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1556 
1557 	parent = priv->clks[mod->parent];
1558 	if (IS_ERR(parent)) {
1559 		clk = parent;
1560 		goto fail;
1561 	}
1562 
1563 	clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1564 	if (!clock) {
1565 		clk = ERR_PTR(-ENOMEM);
1566 		goto fail;
1567 	}
1568 
1569 	init.name = mod->name;
1570 	init.ops = &rzg2l_mod_clock_ops;
1571 	init.flags = CLK_SET_RATE_PARENT;
1572 	for (i = 0; i < info->num_crit_mod_clks; i++)
1573 		if (id == info->crit_mod_clks[i]) {
1574 			dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1575 				mod->name);
1576 			init.flags |= CLK_IS_CRITICAL;
1577 			break;
1578 		}
1579 
1580 	parent_name = __clk_get_name(parent);
1581 	init.parent_names = &parent_name;
1582 	init.num_parents = 1;
1583 
1584 	clock->off = mod->off;
1585 	clock->bit = mod->bit;
1586 	clock->priv = priv;
1587 	clock->hw.init = &init;
1588 
1589 	if (mod->mstop_conf) {
1590 		struct mstop *mstop = rzg2l_mod_clock_get_mstop(priv, mod->mstop_conf);
1591 
1592 		if (!mstop) {
1593 			mstop = devm_kzalloc(dev, sizeof(*mstop), GFP_KERNEL);
1594 			if (!mstop) {
1595 				clk = ERR_PTR(-ENOMEM);
1596 				goto fail;
1597 			}
1598 			mstop->conf = mod->mstop_conf;
1599 			atomic_set(&mstop->usecnt, 0);
1600 		}
1601 		clock->mstop = mstop;
1602 	}
1603 
1604 	ret = devm_clk_hw_register(dev, &clock->hw);
1605 	if (ret) {
1606 		clk = ERR_PTR(ret);
1607 		goto fail;
1608 	}
1609 
1610 	if (mod->is_coupled) {
1611 		struct mod_clock *sibling;
1612 
1613 		clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1614 		sibling = rzg2l_mod_clock_get_sibling(clock, priv);
1615 		if (sibling) {
1616 			clock->sibling = sibling;
1617 			sibling->sibling = clock;
1618 		}
1619 	}
1620 
1621 	/* Keep this before priv->clks[id] is updated. */
1622 	ret = rzg2l_mod_clock_update_shared_mstop_clks(priv, clock);
1623 	if (ret) {
1624 		clk = ERR_PTR(ret);
1625 		goto fail;
1626 	}
1627 
1628 	clk = clock->hw.clk;
1629 	dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1630 	priv->clks[id] = clk;
1631 
1632 	return;
1633 
1634 fail:
1635 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1636 		mod->name, PTR_ERR(clk));
1637 }
1638 
1639 #define rcdev_to_priv(x)	container_of(x, struct rzg2l_cpg_priv, rcdev)
1640 
__rzg2l_cpg_assert(struct reset_controller_dev * rcdev,unsigned long id,bool assert)1641 static int __rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1642 			      unsigned long id, bool assert)
1643 {
1644 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1645 	const struct rzg2l_cpg_info *info = priv->info;
1646 	unsigned int reg = info->resets[id].off;
1647 	u32 mask = BIT(info->resets[id].bit);
1648 	s8 monbit = info->resets[id].monbit;
1649 	u32 value = mask << 16;
1650 	int ret;
1651 
1652 	dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n",
1653 		assert ? "assert" : "deassert", id, CLK_RST_R(reg));
1654 
1655 	if (!assert)
1656 		value |= mask;
1657 	writel(value, priv->base + CLK_RST_R(reg));
1658 
1659 	if (info->has_clk_mon_regs) {
1660 		reg = CLK_MRST_R(reg);
1661 	} else if (monbit >= 0) {
1662 		reg = CPG_RST_MON;
1663 		mask = BIT(monbit);
1664 	} else {
1665 		/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1666 		udelay(35);
1667 		return 0;
1668 	}
1669 
1670 	ret = readl_poll_timeout_atomic(priv->base + reg, value,
1671 					assert == !!(value & mask), 10, 200);
1672 	if (ret && !assert) {
1673 		value = mask << 16;
1674 		writel(value, priv->base + CLK_RST_R(info->resets[id].off));
1675 	}
1676 
1677 	return ret;
1678 }
1679 
rzg2l_cpg_assert(struct reset_controller_dev * rcdev,unsigned long id)1680 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1681 			    unsigned long id)
1682 {
1683 	return __rzg2l_cpg_assert(rcdev, id, true);
1684 }
1685 
rzg2l_cpg_deassert(struct reset_controller_dev * rcdev,unsigned long id)1686 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1687 			      unsigned long id)
1688 {
1689 	return __rzg2l_cpg_assert(rcdev, id, false);
1690 }
1691 
rzg2l_cpg_reset(struct reset_controller_dev * rcdev,unsigned long id)1692 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1693 			   unsigned long id)
1694 {
1695 	int ret;
1696 
1697 	ret = rzg2l_cpg_assert(rcdev, id);
1698 	if (ret)
1699 		return ret;
1700 
1701 	return rzg2l_cpg_deassert(rcdev, id);
1702 }
1703 
rzg2l_cpg_status(struct reset_controller_dev * rcdev,unsigned long id)1704 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1705 			    unsigned long id)
1706 {
1707 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1708 	const struct rzg2l_cpg_info *info = priv->info;
1709 	s8 monbit = info->resets[id].monbit;
1710 	unsigned int reg;
1711 	u32 bitmask;
1712 
1713 	if (info->has_clk_mon_regs) {
1714 		reg = CLK_MRST_R(info->resets[id].off);
1715 		bitmask = BIT(info->resets[id].bit);
1716 	} else if (monbit >= 0) {
1717 		reg = CPG_RST_MON;
1718 		bitmask = BIT(monbit);
1719 	} else {
1720 		return -ENOTSUPP;
1721 	}
1722 
1723 	return !!(readl(priv->base + reg) & bitmask);
1724 }
1725 
1726 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1727 	.reset = rzg2l_cpg_reset,
1728 	.assert = rzg2l_cpg_assert,
1729 	.deassert = rzg2l_cpg_deassert,
1730 	.status = rzg2l_cpg_status,
1731 };
1732 
rzg2l_cpg_reset_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)1733 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1734 				 const struct of_phandle_args *reset_spec)
1735 {
1736 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1737 	const struct rzg2l_cpg_info *info = priv->info;
1738 	unsigned int id = reset_spec->args[0];
1739 
1740 	if (id >= rcdev->nr_resets || !info->resets[id].off) {
1741 		dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1742 		return -EINVAL;
1743 	}
1744 
1745 	return id;
1746 }
1747 
rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv * priv)1748 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1749 {
1750 	priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1751 	priv->rcdev.of_node = priv->dev->of_node;
1752 	priv->rcdev.dev = priv->dev;
1753 	priv->rcdev.of_reset_n_cells = 1;
1754 	priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1755 	priv->rcdev.nr_resets = priv->num_resets;
1756 
1757 	return devm_reset_controller_register(priv->dev, &priv->rcdev);
1758 }
1759 
rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv * priv,const struct of_phandle_args * clkspec)1760 static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
1761 				const struct of_phandle_args *clkspec)
1762 {
1763 	if (clkspec->np != priv->genpd.dev.of_node || clkspec->args_count != 2)
1764 		return false;
1765 
1766 	switch (clkspec->args[0]) {
1767 	case CPG_MOD: {
1768 		const struct rzg2l_cpg_info *info = priv->info;
1769 		unsigned int id = clkspec->args[1];
1770 
1771 		if (id >= priv->num_mod_clks)
1772 			return false;
1773 
1774 		id += info->num_total_core_clks;
1775 
1776 		for (unsigned int i = 0; i < info->num_no_pm_mod_clks; i++) {
1777 			if (info->no_pm_mod_clks[i] == id)
1778 				return false;
1779 		}
1780 
1781 		return true;
1782 	}
1783 
1784 	case CPG_CORE:
1785 	default:
1786 		return false;
1787 	}
1788 }
1789 
rzg2l_cpg_attach_dev(struct generic_pm_domain * domain,struct device * dev)1790 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
1791 {
1792 	struct rzg2l_cpg_priv *priv = container_of(domain, struct rzg2l_cpg_priv, genpd);
1793 	struct device_node *np = dev->of_node;
1794 	struct of_phandle_args clkspec;
1795 	bool once = true;
1796 	struct clk *clk;
1797 	unsigned int i;
1798 	int error;
1799 
1800 	for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
1801 		if (!rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
1802 			of_node_put(clkspec.np);
1803 			continue;
1804 		}
1805 
1806 		if (once) {
1807 			once = false;
1808 			error = pm_clk_create(dev);
1809 			if (error) {
1810 				of_node_put(clkspec.np);
1811 				goto err;
1812 			}
1813 		}
1814 		clk = of_clk_get_from_provider(&clkspec);
1815 		of_node_put(clkspec.np);
1816 		if (IS_ERR(clk)) {
1817 			error = PTR_ERR(clk);
1818 			goto fail_destroy;
1819 		}
1820 
1821 		error = pm_clk_add_clk(dev, clk);
1822 		if (error) {
1823 			dev_err(dev, "pm_clk_add_clk failed %d\n", error);
1824 			goto fail_put;
1825 		}
1826 	}
1827 
1828 	return 0;
1829 
1830 fail_put:
1831 	clk_put(clk);
1832 
1833 fail_destroy:
1834 	pm_clk_destroy(dev);
1835 err:
1836 	return error;
1837 }
1838 
rzg2l_cpg_detach_dev(struct generic_pm_domain * unused,struct device * dev)1839 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1840 {
1841 	if (!pm_clk_no_clocks(dev))
1842 		pm_clk_destroy(dev);
1843 }
1844 
rzg2l_cpg_genpd_remove(void * data)1845 static void rzg2l_cpg_genpd_remove(void *data)
1846 {
1847 	pm_genpd_remove(data);
1848 }
1849 
rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv * priv)1850 static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
1851 {
1852 	struct device *dev = priv->dev;
1853 	struct device_node *np = dev->of_node;
1854 	struct generic_pm_domain *genpd = &priv->genpd;
1855 	int ret;
1856 
1857 	genpd->name = np->name;
1858 	genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
1859 		       GENPD_FLAG_ACTIVE_WAKEUP;
1860 	genpd->attach_dev = rzg2l_cpg_attach_dev;
1861 	genpd->detach_dev = rzg2l_cpg_detach_dev;
1862 	ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
1863 	if (ret)
1864 		return ret;
1865 
1866 	ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
1867 	if (ret)
1868 		return ret;
1869 
1870 	return of_genpd_add_provider_simple(np, genpd);
1871 }
1872 
rzg2l_cpg_probe(struct platform_device * pdev)1873 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1874 {
1875 	struct device *dev = &pdev->dev;
1876 	struct device_node *np = dev->of_node;
1877 	const struct rzg2l_cpg_info *info;
1878 	struct rzg2l_cpg_priv *priv;
1879 	unsigned int nclks, i;
1880 	struct clk **clks;
1881 	int error;
1882 
1883 	info = of_device_get_match_data(dev);
1884 
1885 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1886 	if (!priv)
1887 		return -ENOMEM;
1888 
1889 	priv->dev = dev;
1890 	priv->info = info;
1891 	spin_lock_init(&priv->rmw_lock);
1892 
1893 	priv->base = devm_platform_ioremap_resource(pdev, 0);
1894 	if (IS_ERR(priv->base))
1895 		return PTR_ERR(priv->base);
1896 
1897 	nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1898 	clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1899 	if (!clks)
1900 		return -ENOMEM;
1901 
1902 	dev_set_drvdata(dev, priv);
1903 	priv->clks = clks;
1904 	priv->num_core_clks = info->num_total_core_clks;
1905 	priv->num_mod_clks = info->num_hw_mod_clks;
1906 	priv->num_resets = info->num_resets;
1907 	priv->last_dt_core_clk = info->last_dt_core_clk;
1908 
1909 	for (i = 0; i < nclks; i++)
1910 		clks[i] = ERR_PTR(-ENOENT);
1911 
1912 	for (i = 0; i < info->num_core_clks; i++)
1913 		rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1914 
1915 	for (i = 0; i < info->num_mod_clks; i++)
1916 		rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1917 
1918 	/*
1919 	 * Initialize MSTOP after all the clocks were registered to avoid
1920 	 * invalid reference counting when multiple clocks (critical,
1921 	 * non-critical) share the same MSTOP.
1922 	 */
1923 	rzg2l_mod_clock_init_mstop(priv);
1924 
1925 	error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1926 	if (error)
1927 		return error;
1928 
1929 	error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1930 	if (error)
1931 		return error;
1932 
1933 	error = rzg2l_cpg_add_clk_domain(priv);
1934 	if (error)
1935 		return error;
1936 
1937 	error = rzg2l_cpg_reset_controller_register(priv);
1938 	if (error)
1939 		return error;
1940 
1941 	debugfs_create_file("mstop", 0444, NULL, priv, &rzg2l_mod_clock_mstop_fops);
1942 	return 0;
1943 }
1944 
rzg2l_cpg_resume(struct device * dev)1945 static int rzg2l_cpg_resume(struct device *dev)
1946 {
1947 	struct rzg2l_cpg_priv *priv = dev_get_drvdata(dev);
1948 
1949 	rzg2l_mod_clock_init_mstop(priv);
1950 
1951 	return 0;
1952 }
1953 
1954 static const struct dev_pm_ops rzg2l_cpg_pm_ops = {
1955 	NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, rzg2l_cpg_resume)
1956 };
1957 
1958 static const struct of_device_id rzg2l_cpg_match[] = {
1959 #ifdef CONFIG_CLK_R9A07G043
1960 	{
1961 		.compatible = "renesas,r9a07g043-cpg",
1962 		.data = &r9a07g043_cpg_info,
1963 	},
1964 #endif
1965 #ifdef CONFIG_CLK_R9A07G044
1966 	{
1967 		.compatible = "renesas,r9a07g044-cpg",
1968 		.data = &r9a07g044_cpg_info,
1969 	},
1970 #endif
1971 #ifdef CONFIG_CLK_R9A07G054
1972 	{
1973 		.compatible = "renesas,r9a07g054-cpg",
1974 		.data = &r9a07g054_cpg_info,
1975 	},
1976 #endif
1977 #ifdef CONFIG_CLK_R9A08G045
1978 	{
1979 		.compatible = "renesas,r9a08g045-cpg",
1980 		.data = &r9a08g045_cpg_info,
1981 	},
1982 #endif
1983 #ifdef CONFIG_CLK_R9A09G011
1984 	{
1985 		.compatible = "renesas,r9a09g011-cpg",
1986 		.data = &r9a09g011_cpg_info,
1987 	},
1988 #endif
1989 	{ /* sentinel */ }
1990 };
1991 
1992 static struct platform_driver rzg2l_cpg_driver = {
1993 	.driver		= {
1994 		.name	= "rzg2l-cpg",
1995 		.of_match_table = rzg2l_cpg_match,
1996 		.pm	= pm_sleep_ptr(&rzg2l_cpg_pm_ops),
1997 	},
1998 };
1999 
rzg2l_cpg_init(void)2000 static int __init rzg2l_cpg_init(void)
2001 {
2002 	return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
2003 }
2004 
2005 subsys_initcall(rzg2l_cpg_init);
2006 
2007 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");
2008