xref: /linux/drivers/clk/renesas/rzg2l-cpg.c (revision 9f32a03e3e0d372c520d829dd4da6022fe88832a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * RZ/G2L Clock Pulse Generator
4  *
5  * Copyright (C) 2021 Renesas Electronics Corp.
6  *
7  * Based on renesas-cpg-mssr.c
8  *
9  * Copyright (C) 2015 Glider bvba
10  * Copyright (C) 2013 Ideas On Board SPRL
11  * Copyright (C) 2015 Renesas Electronics Corp.
12  */
13 
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/clk/renesas.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/iopoll.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_clock.h>
27 #include <linux/pm_domain.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 #include <linux/string_choices.h>
31 #include <linux/units.h>
32 
33 #include <dt-bindings/clock/renesas-cpg-mssr.h>
34 
35 #include "rzg2l-cpg.h"
36 
37 #ifdef DEBUG
38 #define WARN_DEBUG(x)	WARN_ON(x)
39 #else
40 #define WARN_DEBUG(x)	do { } while (0)
41 #endif
42 
43 #define GET_SHIFT(val)		((val >> 12) & 0xff)
44 #define GET_WIDTH(val)		((val >> 8) & 0xf)
45 
46 #define KDIV(val)		((s16)FIELD_GET(GENMASK(31, 16), val))
47 #define MDIV(val)		FIELD_GET(GENMASK(15, 6), val)
48 #define PDIV(val)		FIELD_GET(GENMASK(5, 0), val)
49 #define SDIV(val)		FIELD_GET(GENMASK(2, 0), val)
50 
51 #define RZG3S_DIV_P		GENMASK(28, 26)
52 #define RZG3S_DIV_M		GENMASK(25, 22)
53 #define RZG3S_DIV_NI		GENMASK(21, 13)
54 #define RZG3S_DIV_NF		GENMASK(12, 1)
55 #define RZG3S_SEL_PLL		BIT(0)
56 
57 #define CLK_ON_R(reg)		(reg)
58 #define CLK_MON_R(reg)		(0x180 + (reg))
59 #define CLK_RST_R(reg)		(reg)
60 #define CLK_MRST_R(reg)		(0x180 + (reg))
61 
62 #define GET_REG_OFFSET(val)		((val >> 20) & 0xfff)
63 #define GET_REG_SAMPLL_CLK1(val)	((val >> 22) & 0xfff)
64 #define GET_REG_SAMPLL_CLK2(val)	((val >> 12) & 0xfff)
65 #define GET_REG_SAMPLL_SETTING(val)	((val) & 0xfff)
66 
67 #define CPG_WEN_BIT		BIT(16)
68 
69 #define MAX_VCLK_FREQ		(148500000)
70 
71 /**
72  * struct clk_hw_data - clock hardware data
73  * @hw: clock hw
74  * @conf: clock configuration (register offset, shift, width)
75  * @sconf: clock status configuration (register offset, shift, width)
76  * @priv: CPG private data structure
77  */
78 struct clk_hw_data {
79 	struct clk_hw hw;
80 	u32 conf;
81 	u32 sconf;
82 	struct rzg2l_cpg_priv *priv;
83 };
84 
85 #define to_clk_hw_data(_hw)	container_of(_hw, struct clk_hw_data, hw)
86 
87 /**
88  * struct sd_mux_hw_data - SD MUX clock hardware data
89  * @hw_data: clock hw data
90  * @mtable: clock mux table
91  */
92 struct sd_mux_hw_data {
93 	struct clk_hw_data hw_data;
94 	const u32 *mtable;
95 };
96 
97 #define to_sd_mux_hw_data(_hw)	container_of(_hw, struct sd_mux_hw_data, hw_data)
98 
99 /**
100  * struct div_hw_data - divider clock hardware data
101  * @hw_data: clock hw data
102  * @dtable: pointer to divider table
103  * @invalid_rate: invalid rate for divider
104  * @max_rate: maximum rate for divider
105  * @width: divider width
106  */
107 struct div_hw_data {
108 	struct clk_hw_data hw_data;
109 	const struct clk_div_table *dtable;
110 	unsigned long invalid_rate;
111 	unsigned long max_rate;
112 	u32 width;
113 };
114 
115 #define to_div_hw_data(_hw)	container_of(_hw, struct div_hw_data, hw_data)
116 
117 struct rzg2l_pll5_param {
118 	u32 pl5_fracin;
119 	u8 pl5_refdiv;
120 	u8 pl5_intin;
121 	u8 pl5_postdiv1;
122 	u8 pl5_postdiv2;
123 	u8 pl5_spread;
124 };
125 
126 struct rzg2l_pll5_mux_dsi_div_param {
127 	u8 clksrc;
128 	u8 dsi_div_a;
129 	u8 dsi_div_b;
130 };
131 
132 /**
133  * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
134  *
135  * @rcdev: Reset controller entity
136  * @dev: CPG device
137  * @base: CPG register block base address
138  * @rmw_lock: protects register accesses
139  * @clks: Array containing all Core and Module Clocks
140  * @num_core_clks: Number of Core Clocks in clks[]
141  * @num_mod_clks: Number of Module Clocks in clks[]
142  * @num_resets: Number of Module Resets in info->resets[]
143  * @last_dt_core_clk: ID of the last Core Clock exported to DT
144  * @info: Pointer to platform data
145  * @mux_dsi_div_params: pll5 mux and dsi div parameters
146  */
147 struct rzg2l_cpg_priv {
148 	struct reset_controller_dev rcdev;
149 	struct device *dev;
150 	void __iomem *base;
151 	spinlock_t rmw_lock;
152 
153 	struct clk **clks;
154 	unsigned int num_core_clks;
155 	unsigned int num_mod_clks;
156 	unsigned int num_resets;
157 	unsigned int last_dt_core_clk;
158 
159 	const struct rzg2l_cpg_info *info;
160 
161 	struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
162 };
163 
rzg2l_cpg_del_clk_provider(void * data)164 static void rzg2l_cpg_del_clk_provider(void *data)
165 {
166 	of_clk_del_provider(data);
167 }
168 
169 /* Must be called in atomic context. */
rzg2l_cpg_wait_clk_update_done(void __iomem * base,u32 conf)170 static int rzg2l_cpg_wait_clk_update_done(void __iomem *base, u32 conf)
171 {
172 	u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf);
173 	u32 off = GET_REG_OFFSET(conf);
174 	u32 val;
175 
176 	return readl_poll_timeout_atomic(base + off, val, !(val & bitmask), 10, 200);
177 }
178 
rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block * nb,unsigned long event,void * data)179 int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event,
180 				  void *data)
181 {
182 	struct clk_notifier_data *cnd = data;
183 	struct clk_hw *hw = __clk_get_hw(cnd->clk);
184 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
185 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
186 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
187 	u32 shift = GET_SHIFT(clk_hw_data->conf);
188 	const u32 clk_src_266 = 3;
189 	unsigned long flags;
190 	int ret;
191 
192 	if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266))
193 		return NOTIFY_DONE;
194 
195 	spin_lock_irqsave(&priv->rmw_lock, flags);
196 
197 	/*
198 	 * As per the HW manual, we should not directly switch from 533 MHz to
199 	 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
200 	 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
201 	 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
202 	 * (400 MHz)).
203 	 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
204 	 * switching register is prohibited.
205 	 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
206 	 * the index to value mapping is done by adding 1 to the index.
207 	 */
208 
209 	writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off);
210 
211 	/* Wait for the update done. */
212 	ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
213 
214 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
215 
216 	if (ret)
217 		dev_err(priv->dev, "failed to switch to safe clk source\n");
218 
219 	return notifier_from_errno(ret);
220 }
221 
rzg3s_cpg_div_clk_notifier(struct notifier_block * nb,unsigned long event,void * data)222 int rzg3s_cpg_div_clk_notifier(struct notifier_block *nb, unsigned long event,
223 			       void *data)
224 {
225 	struct clk_notifier_data *cnd = data;
226 	struct clk_hw *hw = __clk_get_hw(cnd->clk);
227 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
228 	struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
229 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
230 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
231 	u32 shift = GET_SHIFT(clk_hw_data->conf);
232 	unsigned long flags;
233 	int ret = 0;
234 	u32 val;
235 
236 	if (event != PRE_RATE_CHANGE || !div_hw_data->invalid_rate ||
237 	    div_hw_data->invalid_rate % cnd->new_rate)
238 		return NOTIFY_DONE;
239 
240 	spin_lock_irqsave(&priv->rmw_lock, flags);
241 
242 	val = readl(priv->base + off);
243 	val >>= shift;
244 	val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
245 
246 	/*
247 	 * There are different constraints for the user of this notifiers as follows:
248 	 * 1/ SD div cannot be 1 (val == 0) if parent rate is 800MHz
249 	 * 2/ OCTA / SPI div cannot be 1 (val == 0) if parent rate is 400MHz
250 	 * As SD can have only one parent having 800MHz and OCTA div can have
251 	 * only one parent having 400MHz we took into account the parent rate
252 	 * at the beginning of function (by checking invalid_rate % new_rate).
253 	 * Now it is time to check the hardware divider and update it accordingly.
254 	 */
255 	if (!val) {
256 		writel((CPG_WEN_BIT | 1) << shift, priv->base + off);
257 		/* Wait for the update done. */
258 		ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
259 	}
260 
261 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
262 
263 	if (ret)
264 		dev_err(priv->dev, "Failed to downgrade the div\n");
265 
266 	return notifier_from_errno(ret);
267 }
268 
rzg2l_register_notifier(struct clk_hw * hw,const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)269 static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core,
270 				   struct rzg2l_cpg_priv *priv)
271 {
272 	struct notifier_block *nb;
273 
274 	if (!core->notifier)
275 		return 0;
276 
277 	nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL);
278 	if (!nb)
279 		return -ENOMEM;
280 
281 	nb->notifier_call = core->notifier;
282 
283 	return clk_notifier_register(hw->clk, nb);
284 }
285 
rzg3s_div_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)286 static unsigned long rzg3s_div_clk_recalc_rate(struct clk_hw *hw,
287 					       unsigned long parent_rate)
288 {
289 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
290 	struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
291 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
292 	u32 val;
293 
294 	val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
295 	val >>= GET_SHIFT(clk_hw_data->conf);
296 	val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
297 
298 	return divider_recalc_rate(hw, parent_rate, val, div_hw_data->dtable,
299 				   CLK_DIVIDER_ROUND_CLOSEST, div_hw_data->width);
300 }
301 
rzg3s_div_clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)302 static int rzg3s_div_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
303 {
304 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
305 	struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
306 
307 	if (div_hw_data->max_rate && req->rate > div_hw_data->max_rate)
308 		req->rate = div_hw_data->max_rate;
309 
310 	return divider_determine_rate(hw, req, div_hw_data->dtable, div_hw_data->width,
311 				      CLK_DIVIDER_ROUND_CLOSEST);
312 }
313 
rzg3s_div_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)314 static int rzg3s_div_clk_set_rate(struct clk_hw *hw, unsigned long rate,
315 				  unsigned long parent_rate)
316 {
317 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
318 	struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
319 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
320 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
321 	u32 shift = GET_SHIFT(clk_hw_data->conf);
322 	unsigned long flags;
323 	u32 val;
324 	int ret;
325 
326 	val = divider_get_val(rate, parent_rate, div_hw_data->dtable, div_hw_data->width,
327 			      CLK_DIVIDER_ROUND_CLOSEST);
328 
329 	spin_lock_irqsave(&priv->rmw_lock, flags);
330 	writel((CPG_WEN_BIT | val) << shift, priv->base + off);
331 	/* Wait for the update done. */
332 	ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
333 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
334 
335 	return ret;
336 }
337 
338 static const struct clk_ops rzg3s_div_clk_ops = {
339 	.recalc_rate = rzg3s_div_clk_recalc_rate,
340 	.determine_rate = rzg3s_div_clk_determine_rate,
341 	.set_rate = rzg3s_div_clk_set_rate,
342 };
343 
344 static struct clk * __init
rzg3s_cpg_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)345 rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct rzg2l_cpg_priv *priv)
346 {
347 	struct div_hw_data *div_hw_data;
348 	struct clk_init_data init = {};
349 	const struct clk_div_table *clkt;
350 	struct clk_hw *clk_hw;
351 	const struct clk *parent;
352 	const char *parent_name;
353 	u32 max = 0;
354 	int ret;
355 
356 	parent = priv->clks[core->parent];
357 	if (IS_ERR(parent))
358 		return ERR_CAST(parent);
359 
360 	parent_name = __clk_get_name(parent);
361 
362 	div_hw_data = devm_kzalloc(priv->dev, sizeof(*div_hw_data), GFP_KERNEL);
363 	if (!div_hw_data)
364 		return ERR_PTR(-ENOMEM);
365 
366 	init.name = core->name;
367 	init.flags = core->flag;
368 	init.ops = &rzg3s_div_clk_ops;
369 	init.parent_names = &parent_name;
370 	init.num_parents = 1;
371 
372 	/* Get the maximum divider to retrieve div width. */
373 	for (clkt = core->dtable; clkt->div; clkt++) {
374 		if (max < clkt->div)
375 			max = clkt->div;
376 	}
377 
378 	div_hw_data->hw_data.priv = priv;
379 	div_hw_data->hw_data.conf = core->conf;
380 	div_hw_data->hw_data.sconf = core->sconf;
381 	div_hw_data->dtable = core->dtable;
382 	div_hw_data->invalid_rate = core->invalid_rate;
383 	div_hw_data->max_rate = core->max_rate;
384 	div_hw_data->width = fls(max) - 1;
385 
386 	clk_hw = &div_hw_data->hw_data.hw;
387 	clk_hw->init = &init;
388 
389 	ret = devm_clk_hw_register(priv->dev, clk_hw);
390 	if (ret)
391 		return ERR_PTR(ret);
392 
393 	ret = rzg2l_register_notifier(clk_hw, core, priv);
394 	if (ret) {
395 		dev_err(priv->dev, "Failed to register notifier for %s\n",
396 			core->name);
397 		return ERR_PTR(ret);
398 	}
399 
400 	return clk_hw->clk;
401 }
402 
403 static struct clk * __init
rzg2l_cpg_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)404 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
405 			   struct rzg2l_cpg_priv *priv)
406 {
407 	void __iomem *base = priv->base;
408 	struct device *dev = priv->dev;
409 	const struct clk *parent;
410 	const char *parent_name;
411 	struct clk_hw *clk_hw;
412 
413 	parent = priv->clks[core->parent];
414 	if (IS_ERR(parent))
415 		return ERR_CAST(parent);
416 
417 	parent_name = __clk_get_name(parent);
418 
419 	if (core->dtable)
420 		clk_hw = clk_hw_register_divider_table(dev, core->name,
421 						       parent_name, 0,
422 						       base + GET_REG_OFFSET(core->conf),
423 						       GET_SHIFT(core->conf),
424 						       GET_WIDTH(core->conf),
425 						       core->flag,
426 						       core->dtable,
427 						       &priv->rmw_lock);
428 	else
429 		clk_hw = clk_hw_register_divider(dev, core->name,
430 						 parent_name, 0,
431 						 base + GET_REG_OFFSET(core->conf),
432 						 GET_SHIFT(core->conf),
433 						 GET_WIDTH(core->conf),
434 						 core->flag, &priv->rmw_lock);
435 
436 	if (IS_ERR(clk_hw))
437 		return ERR_CAST(clk_hw);
438 
439 	return clk_hw->clk;
440 }
441 
442 static struct clk * __init
rzg2l_cpg_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)443 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
444 			   struct rzg2l_cpg_priv *priv)
445 {
446 	const struct clk_hw *clk_hw;
447 
448 	clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
449 					  core->parent_names, core->num_parents,
450 					  core->flag,
451 					  priv->base + GET_REG_OFFSET(core->conf),
452 					  GET_SHIFT(core->conf),
453 					  GET_WIDTH(core->conf),
454 					  core->mux_flags, &priv->rmw_lock);
455 	if (IS_ERR(clk_hw))
456 		return ERR_CAST(clk_hw);
457 
458 	return clk_hw->clk;
459 }
460 
rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw * hw,u8 index)461 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
462 {
463 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
464 	struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
465 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
466 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
467 	u32 shift = GET_SHIFT(clk_hw_data->conf);
468 	unsigned long flags;
469 	u32 val;
470 	int ret;
471 
472 	val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index);
473 
474 	spin_lock_irqsave(&priv->rmw_lock, flags);
475 
476 	writel((CPG_WEN_BIT | val) << shift, priv->base + off);
477 
478 	/* Wait for the update done. */
479 	ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
480 
481 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
482 
483 	if (ret)
484 		dev_err(priv->dev, "Failed to switch parent\n");
485 
486 	return ret;
487 }
488 
rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw * hw)489 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
490 {
491 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
492 	struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
493 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
494 	u32 val;
495 
496 	val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
497 	val >>= GET_SHIFT(clk_hw_data->conf);
498 	val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
499 
500 	return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val);
501 }
502 
503 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
504 	.determine_rate = __clk_mux_determine_rate_closest,
505 	.set_parent	= rzg2l_cpg_sd_clk_mux_set_parent,
506 	.get_parent	= rzg2l_cpg_sd_clk_mux_get_parent,
507 };
508 
509 static struct clk * __init
rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)510 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
511 			      struct rzg2l_cpg_priv *priv)
512 {
513 	struct sd_mux_hw_data *sd_mux_hw_data;
514 	struct clk_init_data init;
515 	struct clk_hw *clk_hw;
516 	int ret;
517 
518 	sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL);
519 	if (!sd_mux_hw_data)
520 		return ERR_PTR(-ENOMEM);
521 
522 	sd_mux_hw_data->hw_data.priv = priv;
523 	sd_mux_hw_data->hw_data.conf = core->conf;
524 	sd_mux_hw_data->hw_data.sconf = core->sconf;
525 	sd_mux_hw_data->mtable = core->mtable;
526 
527 	init.name = core->name;
528 	init.ops = &rzg2l_cpg_sd_clk_mux_ops;
529 	init.flags = core->flag;
530 	init.num_parents = core->num_parents;
531 	init.parent_names = core->parent_names;
532 
533 	clk_hw = &sd_mux_hw_data->hw_data.hw;
534 	clk_hw->init = &init;
535 
536 	ret = devm_clk_hw_register(priv->dev, clk_hw);
537 	if (ret)
538 		return ERR_PTR(ret);
539 
540 	ret = rzg2l_register_notifier(clk_hw, core, priv);
541 	if (ret) {
542 		dev_err(priv->dev, "Failed to register notifier for %s\n",
543 			core->name);
544 		return ERR_PTR(ret);
545 	}
546 
547 	return clk_hw->clk;
548 }
549 
550 static unsigned long
rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param * params,unsigned long rate)551 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
552 			       unsigned long rate)
553 {
554 	unsigned long foutpostdiv_rate, foutvco_rate;
555 
556 	params->pl5_intin = rate / MEGA;
557 	params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
558 	params->pl5_refdiv = 2;
559 	params->pl5_postdiv1 = 1;
560 	params->pl5_postdiv2 = 1;
561 	params->pl5_spread = 0x16;
562 
563 	foutvco_rate = div_u64(mul_u32_u32(EXTAL_FREQ_IN_MEGA_HZ * MEGA,
564 					   (params->pl5_intin << 24) + params->pl5_fracin),
565 			       params->pl5_refdiv) >> 24;
566 	foutpostdiv_rate = DIV_ROUND_CLOSEST_ULL(foutvco_rate,
567 						 params->pl5_postdiv1 * params->pl5_postdiv2);
568 
569 	return foutpostdiv_rate;
570 }
571 
572 struct dsi_div_hw_data {
573 	struct clk_hw hw;
574 	u32 conf;
575 	unsigned long rate;
576 	struct rzg2l_cpg_priv *priv;
577 };
578 
579 #define to_dsi_div_hw_data(_hw)	container_of(_hw, struct dsi_div_hw_data, hw)
580 
rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)581 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
582 						   unsigned long parent_rate)
583 {
584 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
585 	unsigned long rate = dsi_div->rate;
586 
587 	if (!rate)
588 		rate = parent_rate;
589 
590 	return rate;
591 }
592 
rzg2l_cpg_get_vclk_parent_rate(struct clk_hw * hw,unsigned long rate)593 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
594 						    unsigned long rate)
595 {
596 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
597 	struct rzg2l_cpg_priv *priv = dsi_div->priv;
598 	struct rzg2l_pll5_param params;
599 	unsigned long parent_rate;
600 
601 	parent_rate = rzg2l_cpg_get_foutpostdiv_rate(&params, rate);
602 
603 	if (priv->mux_dsi_div_params.clksrc)
604 		parent_rate /= 2;
605 
606 	return parent_rate;
607 }
608 
rzg2l_cpg_dsi_div_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)609 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
610 					    struct clk_rate_request *req)
611 {
612 	if (req->rate > MAX_VCLK_FREQ)
613 		req->rate = MAX_VCLK_FREQ;
614 
615 	req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
616 
617 	return 0;
618 }
619 
rzg2l_cpg_dsi_div_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)620 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
621 				      unsigned long rate,
622 				      unsigned long parent_rate)
623 {
624 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
625 	struct rzg2l_cpg_priv *priv = dsi_div->priv;
626 
627 	/*
628 	 * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
629 	 *
630 	 * Based on the dot clock, the DSI divider clock sets the divider value,
631 	 * calculates the pll parameters for generating FOUTPOSTDIV and the clk
632 	 * source for the MUX and propagates that info to the parents.
633 	 */
634 
635 	if (!rate || rate > MAX_VCLK_FREQ)
636 		return -EINVAL;
637 
638 	dsi_div->rate = rate;
639 	writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
640 	       (priv->mux_dsi_div_params.dsi_div_a << 0) |
641 	       (priv->mux_dsi_div_params.dsi_div_b << 8),
642 	       priv->base + CPG_PL5_SDIV);
643 
644 	return 0;
645 }
646 
647 static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
648 	.recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
649 	.determine_rate = rzg2l_cpg_dsi_div_determine_rate,
650 	.set_rate = rzg2l_cpg_dsi_div_set_rate,
651 };
652 
653 static struct clk * __init
rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)654 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
655 			       struct rzg2l_cpg_priv *priv)
656 {
657 	struct dsi_div_hw_data *clk_hw_data;
658 	const struct clk *parent;
659 	const char *parent_name;
660 	struct clk_init_data init;
661 	struct clk_hw *clk_hw;
662 	int ret;
663 
664 	parent = priv->clks[core->parent];
665 	if (IS_ERR(parent))
666 		return ERR_CAST(parent);
667 
668 	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
669 	if (!clk_hw_data)
670 		return ERR_PTR(-ENOMEM);
671 
672 	clk_hw_data->priv = priv;
673 
674 	parent_name = __clk_get_name(parent);
675 	init.name = core->name;
676 	init.ops = &rzg2l_cpg_dsi_div_ops;
677 	init.flags = CLK_SET_RATE_PARENT;
678 	init.parent_names = &parent_name;
679 	init.num_parents = 1;
680 
681 	clk_hw = &clk_hw_data->hw;
682 	clk_hw->init = &init;
683 
684 	ret = devm_clk_hw_register(priv->dev, clk_hw);
685 	if (ret)
686 		return ERR_PTR(ret);
687 
688 	return clk_hw->clk;
689 }
690 
691 struct pll5_mux_hw_data {
692 	struct clk_hw hw;
693 	u32 conf;
694 	unsigned long rate;
695 	struct rzg2l_cpg_priv *priv;
696 };
697 
698 #define to_pll5_mux_hw_data(_hw)	container_of(_hw, struct pll5_mux_hw_data, hw)
699 
rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)700 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
701 						   struct clk_rate_request *req)
702 {
703 	struct clk_hw *parent;
704 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
705 	struct rzg2l_cpg_priv *priv = hwdata->priv;
706 
707 	parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
708 	req->best_parent_hw = parent;
709 	req->best_parent_rate = req->rate;
710 
711 	return 0;
712 }
713 
rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw * hw,u8 index)714 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
715 {
716 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
717 	struct rzg2l_cpg_priv *priv = hwdata->priv;
718 
719 	/*
720 	 * FOUTPOSTDIV--->|
721 	 *  |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
722 	 *  |--FOUT1PH0-->|
723 	 *
724 	 * Based on the dot clock, the DSI divider clock calculates the parent
725 	 * rate and clk source for the MUX. It propagates that info to
726 	 * pll5_4_clk_mux which sets the clock source for DSI divider clock.
727 	 */
728 
729 	writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
730 	       priv->base + CPG_OTHERFUNC1_REG);
731 
732 	return 0;
733 }
734 
rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw * hw)735 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
736 {
737 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
738 	struct rzg2l_cpg_priv *priv = hwdata->priv;
739 
740 	return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
741 }
742 
743 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
744 	.determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
745 	.set_parent	= rzg2l_cpg_pll5_4_clk_mux_set_parent,
746 	.get_parent	= rzg2l_cpg_pll5_4_clk_mux_get_parent,
747 };
748 
749 static struct clk * __init
rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)750 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
751 				  struct rzg2l_cpg_priv *priv)
752 {
753 	struct pll5_mux_hw_data *clk_hw_data;
754 	struct clk_init_data init;
755 	struct clk_hw *clk_hw;
756 	int ret;
757 
758 	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
759 	if (!clk_hw_data)
760 		return ERR_PTR(-ENOMEM);
761 
762 	clk_hw_data->priv = priv;
763 	clk_hw_data->conf = core->conf;
764 
765 	init.name = core->name;
766 	init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
767 	init.flags = CLK_SET_RATE_PARENT;
768 	init.num_parents = core->num_parents;
769 	init.parent_names = core->parent_names;
770 
771 	clk_hw = &clk_hw_data->hw;
772 	clk_hw->init = &init;
773 
774 	ret = devm_clk_hw_register(priv->dev, clk_hw);
775 	if (ret)
776 		return ERR_PTR(ret);
777 
778 	return clk_hw->clk;
779 }
780 
781 struct sipll5 {
782 	struct clk_hw hw;
783 	u32 conf;
784 	unsigned long foutpostdiv_rate;
785 	struct rzg2l_cpg_priv *priv;
786 };
787 
788 #define to_sipll5(_hw)	container_of(_hw, struct sipll5, hw)
789 
rzg2l_cpg_get_vclk_rate(struct clk_hw * hw,unsigned long rate)790 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
791 					     unsigned long rate)
792 {
793 	struct sipll5 *sipll5 = to_sipll5(hw);
794 	struct rzg2l_cpg_priv *priv = sipll5->priv;
795 	unsigned long vclk;
796 
797 	vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
798 		       (priv->mux_dsi_div_params.dsi_div_b + 1));
799 
800 	if (priv->mux_dsi_div_params.clksrc)
801 		vclk /= 2;
802 
803 	return vclk;
804 }
805 
rzg2l_cpg_sipll5_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)806 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
807 						  unsigned long parent_rate)
808 {
809 	struct sipll5 *sipll5 = to_sipll5(hw);
810 	unsigned long pll5_rate = sipll5->foutpostdiv_rate;
811 
812 	if (!pll5_rate)
813 		pll5_rate = parent_rate;
814 
815 	return pll5_rate;
816 }
817 
rzg2l_cpg_sipll5_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)818 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
819 					unsigned long rate,
820 					unsigned long *parent_rate)
821 {
822 	return rate;
823 }
824 
rzg2l_cpg_sipll5_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)825 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
826 				     unsigned long rate,
827 				     unsigned long parent_rate)
828 {
829 	struct sipll5 *sipll5 = to_sipll5(hw);
830 	struct rzg2l_cpg_priv *priv = sipll5->priv;
831 	struct rzg2l_pll5_param params;
832 	unsigned long vclk_rate;
833 	int ret;
834 	u32 val;
835 
836 	/*
837 	 *  OSC --> PLL5 --> FOUTPOSTDIV-->|
838 	 *                   |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
839 	 *                   |--FOUT1PH0-->|
840 	 *
841 	 * Based on the dot clock, the DSI divider clock calculates the parent
842 	 * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
843 	 * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
844 	 *
845 	 * OSC --> PLL5 --> FOUTPOSTDIV
846 	 */
847 
848 	if (!rate)
849 		return -EINVAL;
850 
851 	vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
852 	sipll5->foutpostdiv_rate =
853 		rzg2l_cpg_get_foutpostdiv_rate(&params, vclk_rate);
854 
855 	/* Put PLL5 into standby mode */
856 	writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
857 	ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
858 				 !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
859 	if (ret) {
860 		dev_err(priv->dev, "failed to release pll5 lock");
861 		return ret;
862 	}
863 
864 	/* Output clock setting 1 */
865 	writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
866 	       (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
867 
868 	/* Output clock setting, SSCG modulation value setting 3 */
869 	writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
870 
871 	/* Output clock setting 4 */
872 	writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
873 	       priv->base + CPG_SIPLL5_CLK4);
874 
875 	/* Output clock setting 5 */
876 	writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
877 
878 	/* PLL normal mode setting */
879 	writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
880 	       CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
881 	       priv->base + CPG_SIPLL5_STBY);
882 
883 	/* PLL normal mode transition, output clock stability check */
884 	ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
885 				 (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
886 	if (ret) {
887 		dev_err(priv->dev, "failed to lock pll5");
888 		return ret;
889 	}
890 
891 	return 0;
892 }
893 
894 static const struct clk_ops rzg2l_cpg_sipll5_ops = {
895 	.recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
896 	.round_rate = rzg2l_cpg_sipll5_round_rate,
897 	.set_rate = rzg2l_cpg_sipll5_set_rate,
898 };
899 
900 static struct clk * __init
rzg2l_cpg_sipll5_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)901 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
902 			  struct rzg2l_cpg_priv *priv)
903 {
904 	const struct clk *parent;
905 	struct clk_init_data init;
906 	const char *parent_name;
907 	struct sipll5 *sipll5;
908 	struct clk_hw *clk_hw;
909 	int ret;
910 
911 	parent = priv->clks[core->parent];
912 	if (IS_ERR(parent))
913 		return ERR_CAST(parent);
914 
915 	sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
916 	if (!sipll5)
917 		return ERR_PTR(-ENOMEM);
918 
919 	init.name = core->name;
920 	parent_name = __clk_get_name(parent);
921 	init.ops = &rzg2l_cpg_sipll5_ops;
922 	init.flags = 0;
923 	init.parent_names = &parent_name;
924 	init.num_parents = 1;
925 
926 	sipll5->hw.init = &init;
927 	sipll5->conf = core->conf;
928 	sipll5->priv = priv;
929 
930 	writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
931 	       CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
932 
933 	clk_hw = &sipll5->hw;
934 	clk_hw->init = &init;
935 
936 	ret = devm_clk_hw_register(priv->dev, clk_hw);
937 	if (ret)
938 		return ERR_PTR(ret);
939 
940 	priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
941 	priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
942 	priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
943 
944 	return clk_hw->clk;
945 }
946 
947 struct pll_clk {
948 	struct clk_hw hw;
949 	unsigned long default_rate;
950 	unsigned int conf;
951 	unsigned int type;
952 	void __iomem *base;
953 	struct rzg2l_cpg_priv *priv;
954 };
955 
956 #define to_pll(_hw)	container_of(_hw, struct pll_clk, hw)
957 
rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)958 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
959 						   unsigned long parent_rate)
960 {
961 	struct pll_clk *pll_clk = to_pll(hw);
962 	struct rzg2l_cpg_priv *priv = pll_clk->priv;
963 	unsigned int val1, val2;
964 	u64 rate;
965 
966 	if (pll_clk->type != CLK_TYPE_SAM_PLL)
967 		return parent_rate;
968 
969 	val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
970 	val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
971 
972 	rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
973 			       16 + SDIV(val2));
974 
975 	return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
976 }
977 
978 static const struct clk_ops rzg2l_cpg_pll_ops = {
979 	.recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
980 };
981 
rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)982 static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
983 						   unsigned long parent_rate)
984 {
985 	struct pll_clk *pll_clk = to_pll(hw);
986 	struct rzg2l_cpg_priv *priv = pll_clk->priv;
987 	u32 nir, nfr, mr, pr, val, setting;
988 	u64 rate;
989 
990 	if (pll_clk->type != CLK_TYPE_G3S_PLL)
991 		return parent_rate;
992 
993 	setting = GET_REG_SAMPLL_SETTING(pll_clk->conf);
994 	if (setting) {
995 		val = readl(priv->base + setting);
996 		if (val & RZG3S_SEL_PLL)
997 			return pll_clk->default_rate;
998 	}
999 
1000 	val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
1001 
1002 	pr = 1 << FIELD_GET(RZG3S_DIV_P, val);
1003 	/* Hardware interprets values higher than 8 as p = 16. */
1004 	if (pr > 8)
1005 		pr = 16;
1006 
1007 	mr  = FIELD_GET(RZG3S_DIV_M, val) + 1;
1008 	nir = FIELD_GET(RZG3S_DIV_NI, val) + 1;
1009 	nfr = FIELD_GET(RZG3S_DIV_NF, val);
1010 
1011 	rate = mul_u64_u32_shr(parent_rate, 4096 * nir + nfr, 12);
1012 
1013 	return DIV_ROUND_CLOSEST_ULL(rate, (mr * pr));
1014 }
1015 
1016 static const struct clk_ops rzg3s_cpg_pll_ops = {
1017 	.recalc_rate = rzg3s_cpg_pll_clk_recalc_rate,
1018 };
1019 
1020 static struct clk * __init
rzg2l_cpg_pll_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv,const struct clk_ops * ops)1021 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
1022 			   struct rzg2l_cpg_priv *priv,
1023 			   const struct clk_ops *ops)
1024 {
1025 	struct device *dev = priv->dev;
1026 	const struct clk *parent;
1027 	struct clk_init_data init;
1028 	const char *parent_name;
1029 	struct pll_clk *pll_clk;
1030 	int ret;
1031 
1032 	parent = priv->clks[core->parent];
1033 	if (IS_ERR(parent))
1034 		return ERR_CAST(parent);
1035 
1036 	pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
1037 	if (!pll_clk)
1038 		return ERR_PTR(-ENOMEM);
1039 
1040 	parent_name = __clk_get_name(parent);
1041 	init.name = core->name;
1042 	init.ops = ops;
1043 	init.flags = 0;
1044 	init.parent_names = &parent_name;
1045 	init.num_parents = 1;
1046 
1047 	pll_clk->hw.init = &init;
1048 	pll_clk->conf = core->conf;
1049 	pll_clk->base = priv->base;
1050 	pll_clk->priv = priv;
1051 	pll_clk->type = core->type;
1052 	pll_clk->default_rate = core->default_rate;
1053 
1054 	ret = devm_clk_hw_register(dev, &pll_clk->hw);
1055 	if (ret)
1056 		return ERR_PTR(ret);
1057 
1058 	return pll_clk->hw.clk;
1059 }
1060 
1061 static struct clk
rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args * clkspec,void * data)1062 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
1063 			       void *data)
1064 {
1065 	unsigned int clkidx = clkspec->args[1];
1066 	struct rzg2l_cpg_priv *priv = data;
1067 	struct device *dev = priv->dev;
1068 	const char *type;
1069 	struct clk *clk;
1070 
1071 	switch (clkspec->args[0]) {
1072 	case CPG_CORE:
1073 		type = "core";
1074 		if (clkidx > priv->last_dt_core_clk) {
1075 			dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
1076 			return ERR_PTR(-EINVAL);
1077 		}
1078 		clk = priv->clks[clkidx];
1079 		break;
1080 
1081 	case CPG_MOD:
1082 		type = "module";
1083 		if (clkidx >= priv->num_mod_clks) {
1084 			dev_err(dev, "Invalid %s clock index %u\n", type,
1085 				clkidx);
1086 			return ERR_PTR(-EINVAL);
1087 		}
1088 		clk = priv->clks[priv->num_core_clks + clkidx];
1089 		break;
1090 
1091 	default:
1092 		dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
1093 		return ERR_PTR(-EINVAL);
1094 	}
1095 
1096 	if (IS_ERR(clk))
1097 		dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
1098 			PTR_ERR(clk));
1099 	else
1100 		dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
1101 			clkspec->args[0], clkspec->args[1], clk,
1102 			clk_get_rate(clk));
1103 	return clk;
1104 }
1105 
1106 static void __init
rzg2l_cpg_register_core_clk(const struct cpg_core_clk * core,const struct rzg2l_cpg_info * info,struct rzg2l_cpg_priv * priv)1107 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
1108 			    const struct rzg2l_cpg_info *info,
1109 			    struct rzg2l_cpg_priv *priv)
1110 {
1111 	struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
1112 	struct device *dev = priv->dev;
1113 	unsigned int id = core->id, div = core->div;
1114 	const char *parent_name;
1115 	struct clk_hw *clk_hw;
1116 
1117 	WARN_DEBUG(id >= priv->num_core_clks);
1118 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1119 
1120 	switch (core->type) {
1121 	case CLK_TYPE_IN:
1122 		clk = of_clk_get_by_name(priv->dev->of_node, core->name);
1123 		break;
1124 	case CLK_TYPE_FF:
1125 		WARN_DEBUG(core->parent >= priv->num_core_clks);
1126 		parent = priv->clks[core->parent];
1127 		if (IS_ERR(parent)) {
1128 			clk = parent;
1129 			goto fail;
1130 		}
1131 
1132 		parent_name = __clk_get_name(parent);
1133 		clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name, parent_name,
1134 							   CLK_SET_RATE_PARENT,
1135 							   core->mult, div);
1136 		if (IS_ERR(clk_hw))
1137 			clk = ERR_CAST(clk_hw);
1138 		else
1139 			clk = clk_hw->clk;
1140 		break;
1141 	case CLK_TYPE_SAM_PLL:
1142 		clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg2l_cpg_pll_ops);
1143 		break;
1144 	case CLK_TYPE_G3S_PLL:
1145 		clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg3s_cpg_pll_ops);
1146 		break;
1147 	case CLK_TYPE_SIPLL5:
1148 		clk = rzg2l_cpg_sipll5_register(core, priv);
1149 		break;
1150 	case CLK_TYPE_DIV:
1151 		clk = rzg2l_cpg_div_clk_register(core, priv);
1152 		break;
1153 	case CLK_TYPE_G3S_DIV:
1154 		clk = rzg3s_cpg_div_clk_register(core, priv);
1155 		break;
1156 	case CLK_TYPE_MUX:
1157 		clk = rzg2l_cpg_mux_clk_register(core, priv);
1158 		break;
1159 	case CLK_TYPE_SD_MUX:
1160 		clk = rzg2l_cpg_sd_mux_clk_register(core, priv);
1161 		break;
1162 	case CLK_TYPE_PLL5_4_MUX:
1163 		clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
1164 		break;
1165 	case CLK_TYPE_DSI_DIV:
1166 		clk = rzg2l_cpg_dsi_div_clk_register(core, priv);
1167 		break;
1168 	default:
1169 		goto fail;
1170 	}
1171 
1172 	if (IS_ERR_OR_NULL(clk))
1173 		goto fail;
1174 
1175 	dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1176 	priv->clks[id] = clk;
1177 	return;
1178 
1179 fail:
1180 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
1181 		core->name, PTR_ERR(clk));
1182 }
1183 
1184 /**
1185  * struct mstp_clock - MSTP gating clock
1186  *
1187  * @hw: handle between common and hardware-specific interfaces
1188  * @off: register offset
1189  * @bit: ON/MON bit
1190  * @enabled: soft state of the clock, if it is coupled with another clock
1191  * @priv: CPG/MSTP private data
1192  * @sibling: pointer to the other coupled clock
1193  */
1194 struct mstp_clock {
1195 	struct clk_hw hw;
1196 	u16 off;
1197 	u8 bit;
1198 	bool enabled;
1199 	struct rzg2l_cpg_priv *priv;
1200 	struct mstp_clock *sibling;
1201 };
1202 
1203 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
1204 
rzg2l_mod_clock_endisable(struct clk_hw * hw,bool enable)1205 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
1206 {
1207 	struct mstp_clock *clock = to_mod_clock(hw);
1208 	struct rzg2l_cpg_priv *priv = clock->priv;
1209 	unsigned int reg = clock->off;
1210 	struct device *dev = priv->dev;
1211 	u32 bitmask = BIT(clock->bit);
1212 	u32 value;
1213 	int error;
1214 
1215 	if (!clock->off) {
1216 		dev_dbg(dev, "%pC does not support ON/OFF\n",  hw->clk);
1217 		return 0;
1218 	}
1219 
1220 	dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk,
1221 		str_on_off(enable));
1222 
1223 	value = bitmask << 16;
1224 	if (enable)
1225 		value |= bitmask;
1226 
1227 	writel(value, priv->base + CLK_ON_R(reg));
1228 
1229 	if (!enable)
1230 		return 0;
1231 
1232 	if (!priv->info->has_clk_mon_regs)
1233 		return 0;
1234 
1235 	error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
1236 					  value & bitmask, 0, 10);
1237 	if (error)
1238 		dev_err(dev, "Failed to enable CLK_ON 0x%x/%pC\n",
1239 			CLK_ON_R(reg), hw->clk);
1240 
1241 	return error;
1242 }
1243 
rzg2l_mod_clock_enable(struct clk_hw * hw)1244 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
1245 {
1246 	struct mstp_clock *clock = to_mod_clock(hw);
1247 
1248 	if (clock->sibling) {
1249 		struct rzg2l_cpg_priv *priv = clock->priv;
1250 		unsigned long flags;
1251 		bool enabled;
1252 
1253 		spin_lock_irqsave(&priv->rmw_lock, flags);
1254 		enabled = clock->sibling->enabled;
1255 		clock->enabled = true;
1256 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
1257 		if (enabled)
1258 			return 0;
1259 	}
1260 
1261 	return rzg2l_mod_clock_endisable(hw, true);
1262 }
1263 
rzg2l_mod_clock_disable(struct clk_hw * hw)1264 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
1265 {
1266 	struct mstp_clock *clock = to_mod_clock(hw);
1267 
1268 	if (clock->sibling) {
1269 		struct rzg2l_cpg_priv *priv = clock->priv;
1270 		unsigned long flags;
1271 		bool enabled;
1272 
1273 		spin_lock_irqsave(&priv->rmw_lock, flags);
1274 		enabled = clock->sibling->enabled;
1275 		clock->enabled = false;
1276 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
1277 		if (enabled)
1278 			return;
1279 	}
1280 
1281 	rzg2l_mod_clock_endisable(hw, false);
1282 }
1283 
rzg2l_mod_clock_is_enabled(struct clk_hw * hw)1284 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
1285 {
1286 	struct mstp_clock *clock = to_mod_clock(hw);
1287 	struct rzg2l_cpg_priv *priv = clock->priv;
1288 	u32 bitmask = BIT(clock->bit);
1289 	u32 value;
1290 
1291 	if (!clock->off) {
1292 		dev_dbg(priv->dev, "%pC does not support ON/OFF\n",  hw->clk);
1293 		return 1;
1294 	}
1295 
1296 	if (clock->sibling)
1297 		return clock->enabled;
1298 
1299 	if (priv->info->has_clk_mon_regs)
1300 		value = readl(priv->base + CLK_MON_R(clock->off));
1301 	else
1302 		value = readl(priv->base + clock->off);
1303 
1304 	return value & bitmask;
1305 }
1306 
1307 static const struct clk_ops rzg2l_mod_clock_ops = {
1308 	.enable = rzg2l_mod_clock_enable,
1309 	.disable = rzg2l_mod_clock_disable,
1310 	.is_enabled = rzg2l_mod_clock_is_enabled,
1311 };
1312 
1313 static struct mstp_clock
rzg2l_mod_clock_get_sibling(struct mstp_clock * clock,struct rzg2l_cpg_priv * priv)1314 *rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
1315 			     struct rzg2l_cpg_priv *priv)
1316 {
1317 	struct clk_hw *hw;
1318 	unsigned int i;
1319 
1320 	for (i = 0; i < priv->num_mod_clks; i++) {
1321 		struct mstp_clock *clk;
1322 
1323 		if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
1324 			continue;
1325 
1326 		hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
1327 		clk = to_mod_clock(hw);
1328 		if (clock->off == clk->off && clock->bit == clk->bit)
1329 			return clk;
1330 	}
1331 
1332 	return NULL;
1333 }
1334 
1335 static void __init
rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk * mod,const struct rzg2l_cpg_info * info,struct rzg2l_cpg_priv * priv)1336 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1337 			   const struct rzg2l_cpg_info *info,
1338 			   struct rzg2l_cpg_priv *priv)
1339 {
1340 	struct mstp_clock *clock = NULL;
1341 	struct device *dev = priv->dev;
1342 	unsigned int id = mod->id;
1343 	struct clk_init_data init;
1344 	struct clk *parent, *clk;
1345 	const char *parent_name;
1346 	unsigned int i;
1347 	int ret;
1348 
1349 	WARN_DEBUG(id < priv->num_core_clks);
1350 	WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1351 	WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1352 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1353 
1354 	parent = priv->clks[mod->parent];
1355 	if (IS_ERR(parent)) {
1356 		clk = parent;
1357 		goto fail;
1358 	}
1359 
1360 	clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1361 	if (!clock) {
1362 		clk = ERR_PTR(-ENOMEM);
1363 		goto fail;
1364 	}
1365 
1366 	init.name = mod->name;
1367 	init.ops = &rzg2l_mod_clock_ops;
1368 	init.flags = CLK_SET_RATE_PARENT;
1369 	for (i = 0; i < info->num_crit_mod_clks; i++)
1370 		if (id == info->crit_mod_clks[i]) {
1371 			dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1372 				mod->name);
1373 			init.flags |= CLK_IS_CRITICAL;
1374 			break;
1375 		}
1376 
1377 	parent_name = __clk_get_name(parent);
1378 	init.parent_names = &parent_name;
1379 	init.num_parents = 1;
1380 
1381 	clock->off = mod->off;
1382 	clock->bit = mod->bit;
1383 	clock->priv = priv;
1384 	clock->hw.init = &init;
1385 
1386 	ret = devm_clk_hw_register(dev, &clock->hw);
1387 	if (ret) {
1388 		clk = ERR_PTR(ret);
1389 		goto fail;
1390 	}
1391 
1392 	clk = clock->hw.clk;
1393 	dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1394 	priv->clks[id] = clk;
1395 
1396 	if (mod->is_coupled) {
1397 		struct mstp_clock *sibling;
1398 
1399 		clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1400 		sibling = rzg2l_mod_clock_get_sibling(clock, priv);
1401 		if (sibling) {
1402 			clock->sibling = sibling;
1403 			sibling->sibling = clock;
1404 		}
1405 	}
1406 
1407 	return;
1408 
1409 fail:
1410 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1411 		mod->name, PTR_ERR(clk));
1412 }
1413 
1414 #define rcdev_to_priv(x)	container_of(x, struct rzg2l_cpg_priv, rcdev)
1415 
rzg2l_cpg_assert(struct reset_controller_dev * rcdev,unsigned long id)1416 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1417 			    unsigned long id)
1418 {
1419 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1420 	const struct rzg2l_cpg_info *info = priv->info;
1421 	unsigned int reg = info->resets[id].off;
1422 	u32 mask = BIT(info->resets[id].bit);
1423 	s8 monbit = info->resets[id].monbit;
1424 	u32 value = mask << 16;
1425 
1426 	dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1427 
1428 	writel(value, priv->base + CLK_RST_R(reg));
1429 
1430 	if (info->has_clk_mon_regs) {
1431 		reg = CLK_MRST_R(reg);
1432 	} else if (monbit >= 0) {
1433 		reg = CPG_RST_MON;
1434 		mask = BIT(monbit);
1435 	} else {
1436 		/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1437 		udelay(35);
1438 		return 0;
1439 	}
1440 
1441 	return readl_poll_timeout_atomic(priv->base + reg, value,
1442 					 value & mask, 10, 200);
1443 }
1444 
rzg2l_cpg_deassert(struct reset_controller_dev * rcdev,unsigned long id)1445 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1446 			      unsigned long id)
1447 {
1448 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1449 	const struct rzg2l_cpg_info *info = priv->info;
1450 	unsigned int reg = info->resets[id].off;
1451 	u32 mask = BIT(info->resets[id].bit);
1452 	s8 monbit = info->resets[id].monbit;
1453 	u32 value = (mask << 16) | mask;
1454 
1455 	dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1456 		CLK_RST_R(reg));
1457 
1458 	writel(value, priv->base + CLK_RST_R(reg));
1459 
1460 	if (info->has_clk_mon_regs) {
1461 		reg = CLK_MRST_R(reg);
1462 	} else if (monbit >= 0) {
1463 		reg = CPG_RST_MON;
1464 		mask = BIT(monbit);
1465 	} else {
1466 		/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1467 		udelay(35);
1468 		return 0;
1469 	}
1470 
1471 	return readl_poll_timeout_atomic(priv->base + reg, value,
1472 					 !(value & mask), 10, 200);
1473 }
1474 
rzg2l_cpg_reset(struct reset_controller_dev * rcdev,unsigned long id)1475 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1476 			   unsigned long id)
1477 {
1478 	int ret;
1479 
1480 	ret = rzg2l_cpg_assert(rcdev, id);
1481 	if (ret)
1482 		return ret;
1483 
1484 	return rzg2l_cpg_deassert(rcdev, id);
1485 }
1486 
rzg2l_cpg_status(struct reset_controller_dev * rcdev,unsigned long id)1487 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1488 			    unsigned long id)
1489 {
1490 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1491 	const struct rzg2l_cpg_info *info = priv->info;
1492 	s8 monbit = info->resets[id].monbit;
1493 	unsigned int reg;
1494 	u32 bitmask;
1495 
1496 	if (info->has_clk_mon_regs) {
1497 		reg = CLK_MRST_R(info->resets[id].off);
1498 		bitmask = BIT(info->resets[id].bit);
1499 	} else if (monbit >= 0) {
1500 		reg = CPG_RST_MON;
1501 		bitmask = BIT(monbit);
1502 	} else {
1503 		return -ENOTSUPP;
1504 	}
1505 
1506 	return !!(readl(priv->base + reg) & bitmask);
1507 }
1508 
1509 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1510 	.reset = rzg2l_cpg_reset,
1511 	.assert = rzg2l_cpg_assert,
1512 	.deassert = rzg2l_cpg_deassert,
1513 	.status = rzg2l_cpg_status,
1514 };
1515 
rzg2l_cpg_reset_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)1516 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1517 				 const struct of_phandle_args *reset_spec)
1518 {
1519 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1520 	const struct rzg2l_cpg_info *info = priv->info;
1521 	unsigned int id = reset_spec->args[0];
1522 
1523 	if (id >= rcdev->nr_resets || !info->resets[id].off) {
1524 		dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1525 		return -EINVAL;
1526 	}
1527 
1528 	return id;
1529 }
1530 
rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv * priv)1531 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1532 {
1533 	priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1534 	priv->rcdev.of_node = priv->dev->of_node;
1535 	priv->rcdev.dev = priv->dev;
1536 	priv->rcdev.of_reset_n_cells = 1;
1537 	priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1538 	priv->rcdev.nr_resets = priv->num_resets;
1539 
1540 	return devm_reset_controller_register(priv->dev, &priv->rcdev);
1541 }
1542 
1543 /**
1544  * struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure
1545  * @onecell_data: cell data
1546  * @domains: generic PM domains
1547  */
1548 struct rzg2l_cpg_pm_domains {
1549 	struct genpd_onecell_data onecell_data;
1550 	struct generic_pm_domain *domains[];
1551 };
1552 
1553 /**
1554  * struct rzg2l_cpg_pd - RZ/G2L power domain data structure
1555  * @genpd: generic PM domain
1556  * @priv: pointer to CPG private data structure
1557  * @conf: CPG PM domain configuration info
1558  * @id: RZ/G2L power domain ID
1559  */
1560 struct rzg2l_cpg_pd {
1561 	struct generic_pm_domain genpd;
1562 	struct rzg2l_cpg_priv *priv;
1563 	struct rzg2l_cpg_pm_domain_conf conf;
1564 	u16 id;
1565 };
1566 
rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_pd * pd,const struct of_phandle_args * clkspec)1567 static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_pd *pd,
1568 				const struct of_phandle_args *clkspec)
1569 {
1570 	if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
1571 		return false;
1572 
1573 	switch (clkspec->args[0]) {
1574 	case CPG_MOD: {
1575 		struct rzg2l_cpg_priv *priv = pd->priv;
1576 		const struct rzg2l_cpg_info *info = priv->info;
1577 		unsigned int id = clkspec->args[1];
1578 
1579 		if (id >= priv->num_mod_clks)
1580 			return false;
1581 
1582 		id += info->num_total_core_clks;
1583 
1584 		for (unsigned int i = 0; i < info->num_no_pm_mod_clks; i++) {
1585 			if (info->no_pm_mod_clks[i] == id)
1586 				return false;
1587 		}
1588 
1589 		return true;
1590 	}
1591 
1592 	case CPG_CORE:
1593 	default:
1594 		return false;
1595 	}
1596 }
1597 
rzg2l_cpg_attach_dev(struct generic_pm_domain * domain,struct device * dev)1598 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
1599 {
1600 	struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1601 	struct device_node *np = dev->of_node;
1602 	struct of_phandle_args clkspec;
1603 	bool once = true;
1604 	struct clk *clk;
1605 	unsigned int i;
1606 	int error;
1607 
1608 	for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
1609 		if (!rzg2l_cpg_is_pm_clk(pd, &clkspec)) {
1610 			of_node_put(clkspec.np);
1611 			continue;
1612 		}
1613 
1614 		if (once) {
1615 			once = false;
1616 			error = pm_clk_create(dev);
1617 			if (error) {
1618 				of_node_put(clkspec.np);
1619 				goto err;
1620 			}
1621 		}
1622 		clk = of_clk_get_from_provider(&clkspec);
1623 		of_node_put(clkspec.np);
1624 		if (IS_ERR(clk)) {
1625 			error = PTR_ERR(clk);
1626 			goto fail_destroy;
1627 		}
1628 
1629 		error = pm_clk_add_clk(dev, clk);
1630 		if (error) {
1631 			dev_err(dev, "pm_clk_add_clk failed %d\n", error);
1632 			goto fail_put;
1633 		}
1634 	}
1635 
1636 	return 0;
1637 
1638 fail_put:
1639 	clk_put(clk);
1640 
1641 fail_destroy:
1642 	pm_clk_destroy(dev);
1643 err:
1644 	return error;
1645 }
1646 
rzg2l_cpg_detach_dev(struct generic_pm_domain * unused,struct device * dev)1647 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1648 {
1649 	if (!pm_clk_no_clocks(dev))
1650 		pm_clk_destroy(dev);
1651 }
1652 
rzg2l_cpg_genpd_remove(void * data)1653 static void rzg2l_cpg_genpd_remove(void *data)
1654 {
1655 	struct genpd_onecell_data *celldata = data;
1656 
1657 	for (unsigned int i = 0; i < celldata->num_domains; i++)
1658 		pm_genpd_remove(celldata->domains[i]);
1659 }
1660 
rzg2l_cpg_genpd_remove_simple(void * data)1661 static void rzg2l_cpg_genpd_remove_simple(void *data)
1662 {
1663 	pm_genpd_remove(data);
1664 }
1665 
rzg2l_cpg_power_on(struct generic_pm_domain * domain)1666 static int rzg2l_cpg_power_on(struct generic_pm_domain *domain)
1667 {
1668 	struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1669 	struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
1670 	struct rzg2l_cpg_priv *priv = pd->priv;
1671 
1672 	/* Set MSTOP. */
1673 	if (mstop.mask)
1674 		writel(mstop.mask << 16, priv->base + mstop.off);
1675 
1676 	return 0;
1677 }
1678 
rzg2l_cpg_power_off(struct generic_pm_domain * domain)1679 static int rzg2l_cpg_power_off(struct generic_pm_domain *domain)
1680 {
1681 	struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1682 	struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
1683 	struct rzg2l_cpg_priv *priv = pd->priv;
1684 
1685 	/* Set MSTOP. */
1686 	if (mstop.mask)
1687 		writel(mstop.mask | (mstop.mask << 16), priv->base + mstop.off);
1688 
1689 	return 0;
1690 }
1691 
rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd * pd)1692 static int __init rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd *pd)
1693 {
1694 	bool always_on = !!(pd->genpd.flags & GENPD_FLAG_ALWAYS_ON);
1695 	struct dev_power_governor *governor;
1696 	int ret;
1697 
1698 	if (always_on)
1699 		governor = &pm_domain_always_on_gov;
1700 	else
1701 		governor = &simple_qos_governor;
1702 
1703 	pd->genpd.flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
1704 	pd->genpd.attach_dev = rzg2l_cpg_attach_dev;
1705 	pd->genpd.detach_dev = rzg2l_cpg_detach_dev;
1706 	pd->genpd.power_on = rzg2l_cpg_power_on;
1707 	pd->genpd.power_off = rzg2l_cpg_power_off;
1708 
1709 	ret = pm_genpd_init(&pd->genpd, governor, !always_on);
1710 	if (ret)
1711 		return ret;
1712 
1713 	if (always_on)
1714 		ret = rzg2l_cpg_power_on(&pd->genpd);
1715 
1716 	return ret;
1717 }
1718 
rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv * priv)1719 static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
1720 {
1721 	struct device *dev = priv->dev;
1722 	struct device_node *np = dev->of_node;
1723 	struct rzg2l_cpg_pd *pd;
1724 	int ret;
1725 
1726 	pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1727 	if (!pd)
1728 		return -ENOMEM;
1729 
1730 	pd->genpd.name = np->name;
1731 	pd->genpd.flags = GENPD_FLAG_ALWAYS_ON;
1732 	pd->priv = priv;
1733 	ret = rzg2l_cpg_pd_setup(pd);
1734 	if (ret)
1735 		return ret;
1736 
1737 	ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove_simple, &pd->genpd);
1738 	if (ret)
1739 		return ret;
1740 
1741 	return of_genpd_add_provider_simple(np, &pd->genpd);
1742 }
1743 
1744 static struct generic_pm_domain *
rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args * spec,void * data)1745 rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args *spec, void *data)
1746 {
1747 	struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
1748 	struct genpd_onecell_data *genpd = data;
1749 
1750 	if (spec->args_count != 1)
1751 		return ERR_PTR(-EINVAL);
1752 
1753 	for (unsigned int i = 0; i < genpd->num_domains; i++) {
1754 		struct rzg2l_cpg_pd *pd = container_of(genpd->domains[i], struct rzg2l_cpg_pd,
1755 						       genpd);
1756 
1757 		if (pd->id == spec->args[0]) {
1758 			domain = &pd->genpd;
1759 			break;
1760 		}
1761 	}
1762 
1763 	return domain;
1764 }
1765 
rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv * priv)1766 static int __init rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv *priv)
1767 {
1768 	const struct rzg2l_cpg_info *info = priv->info;
1769 	struct device *dev = priv->dev;
1770 	struct device_node *np = dev->of_node;
1771 	struct rzg2l_cpg_pm_domains *domains;
1772 	struct generic_pm_domain *parent;
1773 	u32 ncells;
1774 	int ret;
1775 
1776 	ret = of_property_read_u32(np, "#power-domain-cells", &ncells);
1777 	if (ret)
1778 		return ret;
1779 
1780 	/* For backward compatibility. */
1781 	if (!ncells)
1782 		return rzg2l_cpg_add_clk_domain(priv);
1783 
1784 	domains = devm_kzalloc(dev, struct_size(domains, domains, info->num_pm_domains),
1785 			       GFP_KERNEL);
1786 	if (!domains)
1787 		return -ENOMEM;
1788 
1789 	domains->onecell_data.domains = domains->domains;
1790 	domains->onecell_data.num_domains = info->num_pm_domains;
1791 	domains->onecell_data.xlate = rzg2l_cpg_pm_domain_xlate;
1792 
1793 	ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, &domains->onecell_data);
1794 	if (ret)
1795 		return ret;
1796 
1797 	for (unsigned int i = 0; i < info->num_pm_domains; i++) {
1798 		struct rzg2l_cpg_pd *pd;
1799 
1800 		pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1801 		if (!pd)
1802 			return -ENOMEM;
1803 
1804 		pd->genpd.name = info->pm_domains[i].name;
1805 		pd->genpd.flags = info->pm_domains[i].genpd_flags;
1806 		pd->conf = info->pm_domains[i].conf;
1807 		pd->id = info->pm_domains[i].id;
1808 		pd->priv = priv;
1809 
1810 		ret = rzg2l_cpg_pd_setup(pd);
1811 		if (ret)
1812 			return ret;
1813 
1814 		domains->domains[i] = &pd->genpd;
1815 		/* Parent should be on the very first entry of info->pm_domains[]. */
1816 		if (!i) {
1817 			parent = &pd->genpd;
1818 			continue;
1819 		}
1820 
1821 		ret = pm_genpd_add_subdomain(parent, &pd->genpd);
1822 		if (ret)
1823 			return ret;
1824 	}
1825 
1826 	ret = of_genpd_add_provider_onecell(np, &domains->onecell_data);
1827 	if (ret)
1828 		return ret;
1829 
1830 	return 0;
1831 }
1832 
rzg2l_cpg_probe(struct platform_device * pdev)1833 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1834 {
1835 	struct device *dev = &pdev->dev;
1836 	struct device_node *np = dev->of_node;
1837 	const struct rzg2l_cpg_info *info;
1838 	struct rzg2l_cpg_priv *priv;
1839 	unsigned int nclks, i;
1840 	struct clk **clks;
1841 	int error;
1842 
1843 	info = of_device_get_match_data(dev);
1844 
1845 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1846 	if (!priv)
1847 		return -ENOMEM;
1848 
1849 	priv->dev = dev;
1850 	priv->info = info;
1851 	spin_lock_init(&priv->rmw_lock);
1852 
1853 	priv->base = devm_platform_ioremap_resource(pdev, 0);
1854 	if (IS_ERR(priv->base))
1855 		return PTR_ERR(priv->base);
1856 
1857 	nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1858 	clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1859 	if (!clks)
1860 		return -ENOMEM;
1861 
1862 	dev_set_drvdata(dev, priv);
1863 	priv->clks = clks;
1864 	priv->num_core_clks = info->num_total_core_clks;
1865 	priv->num_mod_clks = info->num_hw_mod_clks;
1866 	priv->num_resets = info->num_resets;
1867 	priv->last_dt_core_clk = info->last_dt_core_clk;
1868 
1869 	for (i = 0; i < nclks; i++)
1870 		clks[i] = ERR_PTR(-ENOENT);
1871 
1872 	for (i = 0; i < info->num_core_clks; i++)
1873 		rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1874 
1875 	for (i = 0; i < info->num_mod_clks; i++)
1876 		rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1877 
1878 	error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1879 	if (error)
1880 		return error;
1881 
1882 	error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1883 	if (error)
1884 		return error;
1885 
1886 	error = rzg2l_cpg_add_pm_domains(priv);
1887 	if (error)
1888 		return error;
1889 
1890 	error = rzg2l_cpg_reset_controller_register(priv);
1891 	if (error)
1892 		return error;
1893 
1894 	return 0;
1895 }
1896 
1897 static const struct of_device_id rzg2l_cpg_match[] = {
1898 #ifdef CONFIG_CLK_R9A07G043
1899 	{
1900 		.compatible = "renesas,r9a07g043-cpg",
1901 		.data = &r9a07g043_cpg_info,
1902 	},
1903 #endif
1904 #ifdef CONFIG_CLK_R9A07G044
1905 	{
1906 		.compatible = "renesas,r9a07g044-cpg",
1907 		.data = &r9a07g044_cpg_info,
1908 	},
1909 #endif
1910 #ifdef CONFIG_CLK_R9A07G054
1911 	{
1912 		.compatible = "renesas,r9a07g054-cpg",
1913 		.data = &r9a07g054_cpg_info,
1914 	},
1915 #endif
1916 #ifdef CONFIG_CLK_R9A08G045
1917 	{
1918 		.compatible = "renesas,r9a08g045-cpg",
1919 		.data = &r9a08g045_cpg_info,
1920 	},
1921 #endif
1922 #ifdef CONFIG_CLK_R9A09G011
1923 	{
1924 		.compatible = "renesas,r9a09g011-cpg",
1925 		.data = &r9a09g011_cpg_info,
1926 	},
1927 #endif
1928 	{ /* sentinel */ }
1929 };
1930 
1931 static struct platform_driver rzg2l_cpg_driver = {
1932 	.driver		= {
1933 		.name	= "rzg2l-cpg",
1934 		.of_match_table = rzg2l_cpg_match,
1935 	},
1936 };
1937 
rzg2l_cpg_init(void)1938 static int __init rzg2l_cpg_init(void)
1939 {
1940 	return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
1941 }
1942 
1943 subsys_initcall(rzg2l_cpg_init);
1944 
1945 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");
1946