xref: /linux/drivers/clk/renesas/rzg2l-cpg.c (revision 9f3a2ba62c7226a6604b8aaeb92b5ff906fa4e6b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * RZ/G2L Clock Pulse Generator
4  *
5  * Copyright (C) 2021 Renesas Electronics Corp.
6  *
7  * Based on renesas-cpg-mssr.c
8  *
9  * Copyright (C) 2015 Glider bvba
10  * Copyright (C) 2013 Ideas On Board SPRL
11  * Copyright (C) 2015 Renesas Electronics Corp.
12  */
13 
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/clk/renesas.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/iopoll.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_clock.h>
27 #include <linux/pm_domain.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 #include <linux/units.h>
31 
32 #include <dt-bindings/clock/renesas-cpg-mssr.h>
33 
34 #include "rzg2l-cpg.h"
35 
36 #ifdef DEBUG
37 #define WARN_DEBUG(x)	WARN_ON(x)
38 #else
39 #define WARN_DEBUG(x)	do { } while (0)
40 #endif
41 
42 #define GET_SHIFT(val)		((val >> 12) & 0xff)
43 #define GET_WIDTH(val)		((val >> 8) & 0xf)
44 
45 #define KDIV(val)		((s16)FIELD_GET(GENMASK(31, 16), val))
46 #define MDIV(val)		FIELD_GET(GENMASK(15, 6), val)
47 #define PDIV(val)		FIELD_GET(GENMASK(5, 0), val)
48 #define SDIV(val)		FIELD_GET(GENMASK(2, 0), val)
49 
50 #define RZG3S_DIV_P		GENMASK(28, 26)
51 #define RZG3S_DIV_M		GENMASK(25, 22)
52 #define RZG3S_DIV_NI		GENMASK(21, 13)
53 #define RZG3S_DIV_NF		GENMASK(12, 1)
54 
55 #define CLK_ON_R(reg)		(reg)
56 #define CLK_MON_R(reg)		(0x180 + (reg))
57 #define CLK_RST_R(reg)		(reg)
58 #define CLK_MRST_R(reg)		(0x180 + (reg))
59 
60 #define GET_REG_OFFSET(val)		((val >> 20) & 0xfff)
61 #define GET_REG_SAMPLL_CLK1(val)	((val >> 22) & 0xfff)
62 #define GET_REG_SAMPLL_CLK2(val)	((val >> 12) & 0xfff)
63 
64 #define CPG_WEN_BIT		BIT(16)
65 
66 #define MAX_VCLK_FREQ		(148500000)
67 
68 /**
69  * struct clk_hw_data - clock hardware data
70  * @hw: clock hw
71  * @conf: clock configuration (register offset, shift, width)
72  * @sconf: clock status configuration (register offset, shift, width)
73  * @priv: CPG private data structure
74  */
75 struct clk_hw_data {
76 	struct clk_hw hw;
77 	u32 conf;
78 	u32 sconf;
79 	struct rzg2l_cpg_priv *priv;
80 };
81 
82 #define to_clk_hw_data(_hw)	container_of(_hw, struct clk_hw_data, hw)
83 
84 /**
85  * struct sd_mux_hw_data - SD MUX clock hardware data
86  * @hw_data: clock hw data
87  * @mtable: clock mux table
88  */
89 struct sd_mux_hw_data {
90 	struct clk_hw_data hw_data;
91 	const u32 *mtable;
92 };
93 
94 #define to_sd_mux_hw_data(_hw)	container_of(_hw, struct sd_mux_hw_data, hw_data)
95 
96 /**
97  * struct div_hw_data - divider clock hardware data
98  * @hw_data: clock hw data
99  * @dtable: pointer to divider table
100  * @invalid_rate: invalid rate for divider
101  * @max_rate: maximum rate for divider
102  * @width: divider width
103  */
104 struct div_hw_data {
105 	struct clk_hw_data hw_data;
106 	const struct clk_div_table *dtable;
107 	unsigned long invalid_rate;
108 	unsigned long max_rate;
109 	u32 width;
110 };
111 
112 #define to_div_hw_data(_hw)	container_of(_hw, struct div_hw_data, hw_data)
113 
114 struct rzg2l_pll5_param {
115 	u32 pl5_fracin;
116 	u8 pl5_refdiv;
117 	u8 pl5_intin;
118 	u8 pl5_postdiv1;
119 	u8 pl5_postdiv2;
120 	u8 pl5_spread;
121 };
122 
123 struct rzg2l_pll5_mux_dsi_div_param {
124 	u8 clksrc;
125 	u8 dsi_div_a;
126 	u8 dsi_div_b;
127 };
128 
129 /**
130  * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
131  *
132  * @rcdev: Reset controller entity
133  * @dev: CPG device
134  * @base: CPG register block base address
135  * @rmw_lock: protects register accesses
136  * @clks: Array containing all Core and Module Clocks
137  * @num_core_clks: Number of Core Clocks in clks[]
138  * @num_mod_clks: Number of Module Clocks in clks[]
139  * @num_resets: Number of Module Resets in info->resets[]
140  * @last_dt_core_clk: ID of the last Core Clock exported to DT
141  * @info: Pointer to platform data
142  * @mux_dsi_div_params: pll5 mux and dsi div parameters
143  */
144 struct rzg2l_cpg_priv {
145 	struct reset_controller_dev rcdev;
146 	struct device *dev;
147 	void __iomem *base;
148 	spinlock_t rmw_lock;
149 
150 	struct clk **clks;
151 	unsigned int num_core_clks;
152 	unsigned int num_mod_clks;
153 	unsigned int num_resets;
154 	unsigned int last_dt_core_clk;
155 
156 	const struct rzg2l_cpg_info *info;
157 
158 	struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
159 };
160 
rzg2l_cpg_del_clk_provider(void * data)161 static void rzg2l_cpg_del_clk_provider(void *data)
162 {
163 	of_clk_del_provider(data);
164 }
165 
166 /* Must be called in atomic context. */
rzg2l_cpg_wait_clk_update_done(void __iomem * base,u32 conf)167 static int rzg2l_cpg_wait_clk_update_done(void __iomem *base, u32 conf)
168 {
169 	u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf);
170 	u32 off = GET_REG_OFFSET(conf);
171 	u32 val;
172 
173 	return readl_poll_timeout_atomic(base + off, val, !(val & bitmask), 10, 200);
174 }
175 
rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block * nb,unsigned long event,void * data)176 int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event,
177 				  void *data)
178 {
179 	struct clk_notifier_data *cnd = data;
180 	struct clk_hw *hw = __clk_get_hw(cnd->clk);
181 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
182 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
183 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
184 	u32 shift = GET_SHIFT(clk_hw_data->conf);
185 	const u32 clk_src_266 = 3;
186 	unsigned long flags;
187 	int ret;
188 
189 	if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266))
190 		return NOTIFY_DONE;
191 
192 	spin_lock_irqsave(&priv->rmw_lock, flags);
193 
194 	/*
195 	 * As per the HW manual, we should not directly switch from 533 MHz to
196 	 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
197 	 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
198 	 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
199 	 * (400 MHz)).
200 	 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
201 	 * switching register is prohibited.
202 	 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
203 	 * the index to value mapping is done by adding 1 to the index.
204 	 */
205 
206 	writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off);
207 
208 	/* Wait for the update done. */
209 	ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
210 
211 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
212 
213 	if (ret)
214 		dev_err(priv->dev, "failed to switch to safe clk source\n");
215 
216 	return notifier_from_errno(ret);
217 }
218 
rzg3s_cpg_div_clk_notifier(struct notifier_block * nb,unsigned long event,void * data)219 int rzg3s_cpg_div_clk_notifier(struct notifier_block *nb, unsigned long event,
220 			       void *data)
221 {
222 	struct clk_notifier_data *cnd = data;
223 	struct clk_hw *hw = __clk_get_hw(cnd->clk);
224 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
225 	struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
226 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
227 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
228 	u32 shift = GET_SHIFT(clk_hw_data->conf);
229 	unsigned long flags;
230 	int ret = 0;
231 	u32 val;
232 
233 	if (event != PRE_RATE_CHANGE || !div_hw_data->invalid_rate ||
234 	    div_hw_data->invalid_rate % cnd->new_rate)
235 		return NOTIFY_DONE;
236 
237 	spin_lock_irqsave(&priv->rmw_lock, flags);
238 
239 	val = readl(priv->base + off);
240 	val >>= shift;
241 	val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
242 
243 	/*
244 	 * There are different constraints for the user of this notifiers as follows:
245 	 * 1/ SD div cannot be 1 (val == 0) if parent rate is 800MHz
246 	 * 2/ OCTA / SPI div cannot be 1 (val == 0) if parent rate is 400MHz
247 	 * As SD can have only one parent having 800MHz and OCTA div can have
248 	 * only one parent having 400MHz we took into account the parent rate
249 	 * at the beginning of function (by checking invalid_rate % new_rate).
250 	 * Now it is time to check the hardware divider and update it accordingly.
251 	 */
252 	if (!val) {
253 		writel((CPG_WEN_BIT | 1) << shift, priv->base + off);
254 		/* Wait for the update done. */
255 		ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
256 	}
257 
258 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
259 
260 	if (ret)
261 		dev_err(priv->dev, "Failed to downgrade the div\n");
262 
263 	return notifier_from_errno(ret);
264 }
265 
rzg2l_register_notifier(struct clk_hw * hw,const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)266 static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core,
267 				   struct rzg2l_cpg_priv *priv)
268 {
269 	struct notifier_block *nb;
270 
271 	if (!core->notifier)
272 		return 0;
273 
274 	nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL);
275 	if (!nb)
276 		return -ENOMEM;
277 
278 	nb->notifier_call = core->notifier;
279 
280 	return clk_notifier_register(hw->clk, nb);
281 }
282 
rzg3s_div_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)283 static unsigned long rzg3s_div_clk_recalc_rate(struct clk_hw *hw,
284 					       unsigned long parent_rate)
285 {
286 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
287 	struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
288 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
289 	u32 val;
290 
291 	val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
292 	val >>= GET_SHIFT(clk_hw_data->conf);
293 	val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
294 
295 	return divider_recalc_rate(hw, parent_rate, val, div_hw_data->dtable,
296 				   CLK_DIVIDER_ROUND_CLOSEST, div_hw_data->width);
297 }
298 
rzg3s_div_clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)299 static int rzg3s_div_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
300 {
301 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
302 	struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
303 
304 	if (div_hw_data->max_rate && req->rate > div_hw_data->max_rate)
305 		req->rate = div_hw_data->max_rate;
306 
307 	return divider_determine_rate(hw, req, div_hw_data->dtable, div_hw_data->width,
308 				      CLK_DIVIDER_ROUND_CLOSEST);
309 }
310 
rzg3s_div_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)311 static int rzg3s_div_clk_set_rate(struct clk_hw *hw, unsigned long rate,
312 				  unsigned long parent_rate)
313 {
314 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
315 	struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
316 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
317 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
318 	u32 shift = GET_SHIFT(clk_hw_data->conf);
319 	unsigned long flags;
320 	u32 val;
321 	int ret;
322 
323 	val = divider_get_val(rate, parent_rate, div_hw_data->dtable, div_hw_data->width,
324 			      CLK_DIVIDER_ROUND_CLOSEST);
325 
326 	spin_lock_irqsave(&priv->rmw_lock, flags);
327 	writel((CPG_WEN_BIT | val) << shift, priv->base + off);
328 	/* Wait for the update done. */
329 	ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
330 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
331 
332 	return ret;
333 }
334 
335 static const struct clk_ops rzg3s_div_clk_ops = {
336 	.recalc_rate = rzg3s_div_clk_recalc_rate,
337 	.determine_rate = rzg3s_div_clk_determine_rate,
338 	.set_rate = rzg3s_div_clk_set_rate,
339 };
340 
341 static struct clk * __init
rzg3s_cpg_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)342 rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct rzg2l_cpg_priv *priv)
343 {
344 	struct div_hw_data *div_hw_data;
345 	struct clk_init_data init = {};
346 	const struct clk_div_table *clkt;
347 	struct clk_hw *clk_hw;
348 	const struct clk *parent;
349 	const char *parent_name;
350 	u32 max = 0;
351 	int ret;
352 
353 	parent = priv->clks[core->parent];
354 	if (IS_ERR(parent))
355 		return ERR_CAST(parent);
356 
357 	parent_name = __clk_get_name(parent);
358 
359 	div_hw_data = devm_kzalloc(priv->dev, sizeof(*div_hw_data), GFP_KERNEL);
360 	if (!div_hw_data)
361 		return ERR_PTR(-ENOMEM);
362 
363 	init.name = core->name;
364 	init.flags = core->flag;
365 	init.ops = &rzg3s_div_clk_ops;
366 	init.parent_names = &parent_name;
367 	init.num_parents = 1;
368 
369 	/* Get the maximum divider to retrieve div width. */
370 	for (clkt = core->dtable; clkt->div; clkt++) {
371 		if (max < clkt->div)
372 			max = clkt->div;
373 	}
374 
375 	div_hw_data->hw_data.priv = priv;
376 	div_hw_data->hw_data.conf = core->conf;
377 	div_hw_data->hw_data.sconf = core->sconf;
378 	div_hw_data->dtable = core->dtable;
379 	div_hw_data->invalid_rate = core->invalid_rate;
380 	div_hw_data->max_rate = core->max_rate;
381 	div_hw_data->width = fls(max) - 1;
382 
383 	clk_hw = &div_hw_data->hw_data.hw;
384 	clk_hw->init = &init;
385 
386 	ret = devm_clk_hw_register(priv->dev, clk_hw);
387 	if (ret)
388 		return ERR_PTR(ret);
389 
390 	ret = rzg2l_register_notifier(clk_hw, core, priv);
391 	if (ret) {
392 		dev_err(priv->dev, "Failed to register notifier for %s\n",
393 			core->name);
394 		return ERR_PTR(ret);
395 	}
396 
397 	return clk_hw->clk;
398 }
399 
400 static struct clk * __init
rzg2l_cpg_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)401 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
402 			   struct rzg2l_cpg_priv *priv)
403 {
404 	void __iomem *base = priv->base;
405 	struct device *dev = priv->dev;
406 	const struct clk *parent;
407 	const char *parent_name;
408 	struct clk_hw *clk_hw;
409 
410 	parent = priv->clks[core->parent];
411 	if (IS_ERR(parent))
412 		return ERR_CAST(parent);
413 
414 	parent_name = __clk_get_name(parent);
415 
416 	if (core->dtable)
417 		clk_hw = clk_hw_register_divider_table(dev, core->name,
418 						       parent_name, 0,
419 						       base + GET_REG_OFFSET(core->conf),
420 						       GET_SHIFT(core->conf),
421 						       GET_WIDTH(core->conf),
422 						       core->flag,
423 						       core->dtable,
424 						       &priv->rmw_lock);
425 	else
426 		clk_hw = clk_hw_register_divider(dev, core->name,
427 						 parent_name, 0,
428 						 base + GET_REG_OFFSET(core->conf),
429 						 GET_SHIFT(core->conf),
430 						 GET_WIDTH(core->conf),
431 						 core->flag, &priv->rmw_lock);
432 
433 	if (IS_ERR(clk_hw))
434 		return ERR_CAST(clk_hw);
435 
436 	return clk_hw->clk;
437 }
438 
439 static struct clk * __init
rzg2l_cpg_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)440 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
441 			   struct rzg2l_cpg_priv *priv)
442 {
443 	const struct clk_hw *clk_hw;
444 
445 	clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
446 					  core->parent_names, core->num_parents,
447 					  core->flag,
448 					  priv->base + GET_REG_OFFSET(core->conf),
449 					  GET_SHIFT(core->conf),
450 					  GET_WIDTH(core->conf),
451 					  core->mux_flags, &priv->rmw_lock);
452 	if (IS_ERR(clk_hw))
453 		return ERR_CAST(clk_hw);
454 
455 	return clk_hw->clk;
456 }
457 
rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw * hw,u8 index)458 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
459 {
460 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
461 	struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
462 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
463 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
464 	u32 shift = GET_SHIFT(clk_hw_data->conf);
465 	unsigned long flags;
466 	u32 val;
467 	int ret;
468 
469 	val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index);
470 
471 	spin_lock_irqsave(&priv->rmw_lock, flags);
472 
473 	writel((CPG_WEN_BIT | val) << shift, priv->base + off);
474 
475 	/* Wait for the update done. */
476 	ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
477 
478 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
479 
480 	if (ret)
481 		dev_err(priv->dev, "Failed to switch parent\n");
482 
483 	return ret;
484 }
485 
rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw * hw)486 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
487 {
488 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
489 	struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
490 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
491 	u32 val;
492 
493 	val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
494 	val >>= GET_SHIFT(clk_hw_data->conf);
495 	val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
496 
497 	return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val);
498 }
499 
500 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
501 	.determine_rate = __clk_mux_determine_rate_closest,
502 	.set_parent	= rzg2l_cpg_sd_clk_mux_set_parent,
503 	.get_parent	= rzg2l_cpg_sd_clk_mux_get_parent,
504 };
505 
506 static struct clk * __init
rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)507 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
508 			      struct rzg2l_cpg_priv *priv)
509 {
510 	struct sd_mux_hw_data *sd_mux_hw_data;
511 	struct clk_init_data init;
512 	struct clk_hw *clk_hw;
513 	int ret;
514 
515 	sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL);
516 	if (!sd_mux_hw_data)
517 		return ERR_PTR(-ENOMEM);
518 
519 	sd_mux_hw_data->hw_data.priv = priv;
520 	sd_mux_hw_data->hw_data.conf = core->conf;
521 	sd_mux_hw_data->hw_data.sconf = core->sconf;
522 	sd_mux_hw_data->mtable = core->mtable;
523 
524 	init.name = core->name;
525 	init.ops = &rzg2l_cpg_sd_clk_mux_ops;
526 	init.flags = core->flag;
527 	init.num_parents = core->num_parents;
528 	init.parent_names = core->parent_names;
529 
530 	clk_hw = &sd_mux_hw_data->hw_data.hw;
531 	clk_hw->init = &init;
532 
533 	ret = devm_clk_hw_register(priv->dev, clk_hw);
534 	if (ret)
535 		return ERR_PTR(ret);
536 
537 	ret = rzg2l_register_notifier(clk_hw, core, priv);
538 	if (ret) {
539 		dev_err(priv->dev, "Failed to register notifier for %s\n",
540 			core->name);
541 		return ERR_PTR(ret);
542 	}
543 
544 	return clk_hw->clk;
545 }
546 
547 static unsigned long
rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param * params,unsigned long rate)548 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
549 			       unsigned long rate)
550 {
551 	unsigned long foutpostdiv_rate, foutvco_rate;
552 
553 	params->pl5_intin = rate / MEGA;
554 	params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
555 	params->pl5_refdiv = 2;
556 	params->pl5_postdiv1 = 1;
557 	params->pl5_postdiv2 = 1;
558 	params->pl5_spread = 0x16;
559 
560 	foutvco_rate = div_u64(mul_u32_u32(EXTAL_FREQ_IN_MEGA_HZ * MEGA,
561 					   (params->pl5_intin << 24) + params->pl5_fracin),
562 			       params->pl5_refdiv) >> 24;
563 	foutpostdiv_rate = DIV_ROUND_CLOSEST_ULL(foutvco_rate,
564 						 params->pl5_postdiv1 * params->pl5_postdiv2);
565 
566 	return foutpostdiv_rate;
567 }
568 
569 struct dsi_div_hw_data {
570 	struct clk_hw hw;
571 	u32 conf;
572 	unsigned long rate;
573 	struct rzg2l_cpg_priv *priv;
574 };
575 
576 #define to_dsi_div_hw_data(_hw)	container_of(_hw, struct dsi_div_hw_data, hw)
577 
rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)578 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
579 						   unsigned long parent_rate)
580 {
581 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
582 	unsigned long rate = dsi_div->rate;
583 
584 	if (!rate)
585 		rate = parent_rate;
586 
587 	return rate;
588 }
589 
rzg2l_cpg_get_vclk_parent_rate(struct clk_hw * hw,unsigned long rate)590 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
591 						    unsigned long rate)
592 {
593 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
594 	struct rzg2l_cpg_priv *priv = dsi_div->priv;
595 	struct rzg2l_pll5_param params;
596 	unsigned long parent_rate;
597 
598 	parent_rate = rzg2l_cpg_get_foutpostdiv_rate(&params, rate);
599 
600 	if (priv->mux_dsi_div_params.clksrc)
601 		parent_rate /= 2;
602 
603 	return parent_rate;
604 }
605 
rzg2l_cpg_dsi_div_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)606 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
607 					    struct clk_rate_request *req)
608 {
609 	if (req->rate > MAX_VCLK_FREQ)
610 		req->rate = MAX_VCLK_FREQ;
611 
612 	req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
613 
614 	return 0;
615 }
616 
rzg2l_cpg_dsi_div_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)617 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
618 				      unsigned long rate,
619 				      unsigned long parent_rate)
620 {
621 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
622 	struct rzg2l_cpg_priv *priv = dsi_div->priv;
623 
624 	/*
625 	 * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
626 	 *
627 	 * Based on the dot clock, the DSI divider clock sets the divider value,
628 	 * calculates the pll parameters for generating FOUTPOSTDIV and the clk
629 	 * source for the MUX and propagates that info to the parents.
630 	 */
631 
632 	if (!rate || rate > MAX_VCLK_FREQ)
633 		return -EINVAL;
634 
635 	dsi_div->rate = rate;
636 	writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
637 	       (priv->mux_dsi_div_params.dsi_div_a << 0) |
638 	       (priv->mux_dsi_div_params.dsi_div_b << 8),
639 	       priv->base + CPG_PL5_SDIV);
640 
641 	return 0;
642 }
643 
644 static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
645 	.recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
646 	.determine_rate = rzg2l_cpg_dsi_div_determine_rate,
647 	.set_rate = rzg2l_cpg_dsi_div_set_rate,
648 };
649 
650 static struct clk * __init
rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)651 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
652 			       struct rzg2l_cpg_priv *priv)
653 {
654 	struct dsi_div_hw_data *clk_hw_data;
655 	const struct clk *parent;
656 	const char *parent_name;
657 	struct clk_init_data init;
658 	struct clk_hw *clk_hw;
659 	int ret;
660 
661 	parent = priv->clks[core->parent];
662 	if (IS_ERR(parent))
663 		return ERR_CAST(parent);
664 
665 	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
666 	if (!clk_hw_data)
667 		return ERR_PTR(-ENOMEM);
668 
669 	clk_hw_data->priv = priv;
670 
671 	parent_name = __clk_get_name(parent);
672 	init.name = core->name;
673 	init.ops = &rzg2l_cpg_dsi_div_ops;
674 	init.flags = CLK_SET_RATE_PARENT;
675 	init.parent_names = &parent_name;
676 	init.num_parents = 1;
677 
678 	clk_hw = &clk_hw_data->hw;
679 	clk_hw->init = &init;
680 
681 	ret = devm_clk_hw_register(priv->dev, clk_hw);
682 	if (ret)
683 		return ERR_PTR(ret);
684 
685 	return clk_hw->clk;
686 }
687 
688 struct pll5_mux_hw_data {
689 	struct clk_hw hw;
690 	u32 conf;
691 	unsigned long rate;
692 	struct rzg2l_cpg_priv *priv;
693 };
694 
695 #define to_pll5_mux_hw_data(_hw)	container_of(_hw, struct pll5_mux_hw_data, hw)
696 
rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)697 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
698 						   struct clk_rate_request *req)
699 {
700 	struct clk_hw *parent;
701 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
702 	struct rzg2l_cpg_priv *priv = hwdata->priv;
703 
704 	parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
705 	req->best_parent_hw = parent;
706 	req->best_parent_rate = req->rate;
707 
708 	return 0;
709 }
710 
rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw * hw,u8 index)711 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
712 {
713 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
714 	struct rzg2l_cpg_priv *priv = hwdata->priv;
715 
716 	/*
717 	 * FOUTPOSTDIV--->|
718 	 *  |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
719 	 *  |--FOUT1PH0-->|
720 	 *
721 	 * Based on the dot clock, the DSI divider clock calculates the parent
722 	 * rate and clk source for the MUX. It propagates that info to
723 	 * pll5_4_clk_mux which sets the clock source for DSI divider clock.
724 	 */
725 
726 	writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
727 	       priv->base + CPG_OTHERFUNC1_REG);
728 
729 	return 0;
730 }
731 
rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw * hw)732 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
733 {
734 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
735 	struct rzg2l_cpg_priv *priv = hwdata->priv;
736 
737 	return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
738 }
739 
740 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
741 	.determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
742 	.set_parent	= rzg2l_cpg_pll5_4_clk_mux_set_parent,
743 	.get_parent	= rzg2l_cpg_pll5_4_clk_mux_get_parent,
744 };
745 
746 static struct clk * __init
rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)747 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
748 				  struct rzg2l_cpg_priv *priv)
749 {
750 	struct pll5_mux_hw_data *clk_hw_data;
751 	struct clk_init_data init;
752 	struct clk_hw *clk_hw;
753 	int ret;
754 
755 	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
756 	if (!clk_hw_data)
757 		return ERR_PTR(-ENOMEM);
758 
759 	clk_hw_data->priv = priv;
760 	clk_hw_data->conf = core->conf;
761 
762 	init.name = core->name;
763 	init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
764 	init.flags = CLK_SET_RATE_PARENT;
765 	init.num_parents = core->num_parents;
766 	init.parent_names = core->parent_names;
767 
768 	clk_hw = &clk_hw_data->hw;
769 	clk_hw->init = &init;
770 
771 	ret = devm_clk_hw_register(priv->dev, clk_hw);
772 	if (ret)
773 		return ERR_PTR(ret);
774 
775 	return clk_hw->clk;
776 }
777 
778 struct sipll5 {
779 	struct clk_hw hw;
780 	u32 conf;
781 	unsigned long foutpostdiv_rate;
782 	struct rzg2l_cpg_priv *priv;
783 };
784 
785 #define to_sipll5(_hw)	container_of(_hw, struct sipll5, hw)
786 
rzg2l_cpg_get_vclk_rate(struct clk_hw * hw,unsigned long rate)787 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
788 					     unsigned long rate)
789 {
790 	struct sipll5 *sipll5 = to_sipll5(hw);
791 	struct rzg2l_cpg_priv *priv = sipll5->priv;
792 	unsigned long vclk;
793 
794 	vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
795 		       (priv->mux_dsi_div_params.dsi_div_b + 1));
796 
797 	if (priv->mux_dsi_div_params.clksrc)
798 		vclk /= 2;
799 
800 	return vclk;
801 }
802 
rzg2l_cpg_sipll5_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)803 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
804 						  unsigned long parent_rate)
805 {
806 	struct sipll5 *sipll5 = to_sipll5(hw);
807 	unsigned long pll5_rate = sipll5->foutpostdiv_rate;
808 
809 	if (!pll5_rate)
810 		pll5_rate = parent_rate;
811 
812 	return pll5_rate;
813 }
814 
rzg2l_cpg_sipll5_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)815 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
816 					unsigned long rate,
817 					unsigned long *parent_rate)
818 {
819 	return rate;
820 }
821 
rzg2l_cpg_sipll5_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)822 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
823 				     unsigned long rate,
824 				     unsigned long parent_rate)
825 {
826 	struct sipll5 *sipll5 = to_sipll5(hw);
827 	struct rzg2l_cpg_priv *priv = sipll5->priv;
828 	struct rzg2l_pll5_param params;
829 	unsigned long vclk_rate;
830 	int ret;
831 	u32 val;
832 
833 	/*
834 	 *  OSC --> PLL5 --> FOUTPOSTDIV-->|
835 	 *                   |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
836 	 *                   |--FOUT1PH0-->|
837 	 *
838 	 * Based on the dot clock, the DSI divider clock calculates the parent
839 	 * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
840 	 * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
841 	 *
842 	 * OSC --> PLL5 --> FOUTPOSTDIV
843 	 */
844 
845 	if (!rate)
846 		return -EINVAL;
847 
848 	vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
849 	sipll5->foutpostdiv_rate =
850 		rzg2l_cpg_get_foutpostdiv_rate(&params, vclk_rate);
851 
852 	/* Put PLL5 into standby mode */
853 	writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
854 	ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
855 				 !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
856 	if (ret) {
857 		dev_err(priv->dev, "failed to release pll5 lock");
858 		return ret;
859 	}
860 
861 	/* Output clock setting 1 */
862 	writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
863 	       (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
864 
865 	/* Output clock setting, SSCG modulation value setting 3 */
866 	writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
867 
868 	/* Output clock setting 4 */
869 	writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
870 	       priv->base + CPG_SIPLL5_CLK4);
871 
872 	/* Output clock setting 5 */
873 	writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
874 
875 	/* PLL normal mode setting */
876 	writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
877 	       CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
878 	       priv->base + CPG_SIPLL5_STBY);
879 
880 	/* PLL normal mode transition, output clock stability check */
881 	ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
882 				 (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
883 	if (ret) {
884 		dev_err(priv->dev, "failed to lock pll5");
885 		return ret;
886 	}
887 
888 	return 0;
889 }
890 
891 static const struct clk_ops rzg2l_cpg_sipll5_ops = {
892 	.recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
893 	.round_rate = rzg2l_cpg_sipll5_round_rate,
894 	.set_rate = rzg2l_cpg_sipll5_set_rate,
895 };
896 
897 static struct clk * __init
rzg2l_cpg_sipll5_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)898 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
899 			  struct rzg2l_cpg_priv *priv)
900 {
901 	const struct clk *parent;
902 	struct clk_init_data init;
903 	const char *parent_name;
904 	struct sipll5 *sipll5;
905 	struct clk_hw *clk_hw;
906 	int ret;
907 
908 	parent = priv->clks[core->parent];
909 	if (IS_ERR(parent))
910 		return ERR_CAST(parent);
911 
912 	sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
913 	if (!sipll5)
914 		return ERR_PTR(-ENOMEM);
915 
916 	init.name = core->name;
917 	parent_name = __clk_get_name(parent);
918 	init.ops = &rzg2l_cpg_sipll5_ops;
919 	init.flags = 0;
920 	init.parent_names = &parent_name;
921 	init.num_parents = 1;
922 
923 	sipll5->hw.init = &init;
924 	sipll5->conf = core->conf;
925 	sipll5->priv = priv;
926 
927 	writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
928 	       CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
929 
930 	clk_hw = &sipll5->hw;
931 	clk_hw->init = &init;
932 
933 	ret = devm_clk_hw_register(priv->dev, clk_hw);
934 	if (ret)
935 		return ERR_PTR(ret);
936 
937 	priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
938 	priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
939 	priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
940 
941 	return clk_hw->clk;
942 }
943 
944 struct pll_clk {
945 	struct clk_hw hw;
946 	unsigned int conf;
947 	unsigned int type;
948 	void __iomem *base;
949 	struct rzg2l_cpg_priv *priv;
950 };
951 
952 #define to_pll(_hw)	container_of(_hw, struct pll_clk, hw)
953 
rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)954 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
955 						   unsigned long parent_rate)
956 {
957 	struct pll_clk *pll_clk = to_pll(hw);
958 	struct rzg2l_cpg_priv *priv = pll_clk->priv;
959 	unsigned int val1, val2;
960 	u64 rate;
961 
962 	if (pll_clk->type != CLK_TYPE_SAM_PLL)
963 		return parent_rate;
964 
965 	val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
966 	val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
967 
968 	rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
969 			       16 + SDIV(val2));
970 
971 	return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
972 }
973 
974 static const struct clk_ops rzg2l_cpg_pll_ops = {
975 	.recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
976 };
977 
rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)978 static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
979 						   unsigned long parent_rate)
980 {
981 	struct pll_clk *pll_clk = to_pll(hw);
982 	struct rzg2l_cpg_priv *priv = pll_clk->priv;
983 	u32 nir, nfr, mr, pr, val;
984 	u64 rate;
985 
986 	if (pll_clk->type != CLK_TYPE_G3S_PLL)
987 		return parent_rate;
988 
989 	val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
990 
991 	pr = 1 << FIELD_GET(RZG3S_DIV_P, val);
992 	/* Hardware interprets values higher than 8 as p = 16. */
993 	if (pr > 8)
994 		pr = 16;
995 
996 	mr  = FIELD_GET(RZG3S_DIV_M, val) + 1;
997 	nir = FIELD_GET(RZG3S_DIV_NI, val) + 1;
998 	nfr = FIELD_GET(RZG3S_DIV_NF, val);
999 
1000 	rate = mul_u64_u32_shr(parent_rate, 4096 * nir + nfr, 12);
1001 
1002 	return DIV_ROUND_CLOSEST_ULL(rate, (mr * pr));
1003 }
1004 
1005 static const struct clk_ops rzg3s_cpg_pll_ops = {
1006 	.recalc_rate = rzg3s_cpg_pll_clk_recalc_rate,
1007 };
1008 
1009 static struct clk * __init
rzg2l_cpg_pll_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv,const struct clk_ops * ops)1010 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
1011 			   struct rzg2l_cpg_priv *priv,
1012 			   const struct clk_ops *ops)
1013 {
1014 	struct device *dev = priv->dev;
1015 	const struct clk *parent;
1016 	struct clk_init_data init;
1017 	const char *parent_name;
1018 	struct pll_clk *pll_clk;
1019 	int ret;
1020 
1021 	parent = priv->clks[core->parent];
1022 	if (IS_ERR(parent))
1023 		return ERR_CAST(parent);
1024 
1025 	pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
1026 	if (!pll_clk)
1027 		return ERR_PTR(-ENOMEM);
1028 
1029 	parent_name = __clk_get_name(parent);
1030 	init.name = core->name;
1031 	init.ops = ops;
1032 	init.flags = 0;
1033 	init.parent_names = &parent_name;
1034 	init.num_parents = 1;
1035 
1036 	pll_clk->hw.init = &init;
1037 	pll_clk->conf = core->conf;
1038 	pll_clk->base = priv->base;
1039 	pll_clk->priv = priv;
1040 	pll_clk->type = core->type;
1041 
1042 	ret = devm_clk_hw_register(dev, &pll_clk->hw);
1043 	if (ret)
1044 		return ERR_PTR(ret);
1045 
1046 	return pll_clk->hw.clk;
1047 }
1048 
1049 static struct clk
rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args * clkspec,void * data)1050 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
1051 			       void *data)
1052 {
1053 	unsigned int clkidx = clkspec->args[1];
1054 	struct rzg2l_cpg_priv *priv = data;
1055 	struct device *dev = priv->dev;
1056 	const char *type;
1057 	struct clk *clk;
1058 
1059 	switch (clkspec->args[0]) {
1060 	case CPG_CORE:
1061 		type = "core";
1062 		if (clkidx > priv->last_dt_core_clk) {
1063 			dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
1064 			return ERR_PTR(-EINVAL);
1065 		}
1066 		clk = priv->clks[clkidx];
1067 		break;
1068 
1069 	case CPG_MOD:
1070 		type = "module";
1071 		if (clkidx >= priv->num_mod_clks) {
1072 			dev_err(dev, "Invalid %s clock index %u\n", type,
1073 				clkidx);
1074 			return ERR_PTR(-EINVAL);
1075 		}
1076 		clk = priv->clks[priv->num_core_clks + clkidx];
1077 		break;
1078 
1079 	default:
1080 		dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
1081 		return ERR_PTR(-EINVAL);
1082 	}
1083 
1084 	if (IS_ERR(clk))
1085 		dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
1086 			PTR_ERR(clk));
1087 	else
1088 		dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
1089 			clkspec->args[0], clkspec->args[1], clk,
1090 			clk_get_rate(clk));
1091 	return clk;
1092 }
1093 
1094 static void __init
rzg2l_cpg_register_core_clk(const struct cpg_core_clk * core,const struct rzg2l_cpg_info * info,struct rzg2l_cpg_priv * priv)1095 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
1096 			    const struct rzg2l_cpg_info *info,
1097 			    struct rzg2l_cpg_priv *priv)
1098 {
1099 	struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
1100 	struct device *dev = priv->dev;
1101 	unsigned int id = core->id, div = core->div;
1102 	const char *parent_name;
1103 	struct clk_hw *clk_hw;
1104 
1105 	WARN_DEBUG(id >= priv->num_core_clks);
1106 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1107 
1108 	if (!core->name) {
1109 		/* Skip NULLified clock */
1110 		return;
1111 	}
1112 
1113 	switch (core->type) {
1114 	case CLK_TYPE_IN:
1115 		clk = of_clk_get_by_name(priv->dev->of_node, core->name);
1116 		break;
1117 	case CLK_TYPE_FF:
1118 		WARN_DEBUG(core->parent >= priv->num_core_clks);
1119 		parent = priv->clks[core->parent];
1120 		if (IS_ERR(parent)) {
1121 			clk = parent;
1122 			goto fail;
1123 		}
1124 
1125 		parent_name = __clk_get_name(parent);
1126 		clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name, parent_name,
1127 							   CLK_SET_RATE_PARENT,
1128 							   core->mult, div);
1129 		if (IS_ERR(clk_hw))
1130 			clk = ERR_CAST(clk_hw);
1131 		else
1132 			clk = clk_hw->clk;
1133 		break;
1134 	case CLK_TYPE_SAM_PLL:
1135 		clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg2l_cpg_pll_ops);
1136 		break;
1137 	case CLK_TYPE_G3S_PLL:
1138 		clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg3s_cpg_pll_ops);
1139 		break;
1140 	case CLK_TYPE_SIPLL5:
1141 		clk = rzg2l_cpg_sipll5_register(core, priv);
1142 		break;
1143 	case CLK_TYPE_DIV:
1144 		clk = rzg2l_cpg_div_clk_register(core, priv);
1145 		break;
1146 	case CLK_TYPE_G3S_DIV:
1147 		clk = rzg3s_cpg_div_clk_register(core, priv);
1148 		break;
1149 	case CLK_TYPE_MUX:
1150 		clk = rzg2l_cpg_mux_clk_register(core, priv);
1151 		break;
1152 	case CLK_TYPE_SD_MUX:
1153 		clk = rzg2l_cpg_sd_mux_clk_register(core, priv);
1154 		break;
1155 	case CLK_TYPE_PLL5_4_MUX:
1156 		clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
1157 		break;
1158 	case CLK_TYPE_DSI_DIV:
1159 		clk = rzg2l_cpg_dsi_div_clk_register(core, priv);
1160 		break;
1161 	default:
1162 		goto fail;
1163 	}
1164 
1165 	if (IS_ERR_OR_NULL(clk))
1166 		goto fail;
1167 
1168 	dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1169 	priv->clks[id] = clk;
1170 	return;
1171 
1172 fail:
1173 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
1174 		core->name, PTR_ERR(clk));
1175 }
1176 
1177 /**
1178  * struct mstp_clock - MSTP gating clock
1179  *
1180  * @hw: handle between common and hardware-specific interfaces
1181  * @off: register offset
1182  * @bit: ON/MON bit
1183  * @enabled: soft state of the clock, if it is coupled with another clock
1184  * @priv: CPG/MSTP private data
1185  * @sibling: pointer to the other coupled clock
1186  */
1187 struct mstp_clock {
1188 	struct clk_hw hw;
1189 	u16 off;
1190 	u8 bit;
1191 	bool enabled;
1192 	struct rzg2l_cpg_priv *priv;
1193 	struct mstp_clock *sibling;
1194 };
1195 
1196 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
1197 
rzg2l_mod_clock_endisable(struct clk_hw * hw,bool enable)1198 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
1199 {
1200 	struct mstp_clock *clock = to_mod_clock(hw);
1201 	struct rzg2l_cpg_priv *priv = clock->priv;
1202 	unsigned int reg = clock->off;
1203 	struct device *dev = priv->dev;
1204 	u32 bitmask = BIT(clock->bit);
1205 	u32 value;
1206 	int error;
1207 
1208 	if (!clock->off) {
1209 		dev_dbg(dev, "%pC does not support ON/OFF\n",  hw->clk);
1210 		return 0;
1211 	}
1212 
1213 	dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk,
1214 		enable ? "ON" : "OFF");
1215 
1216 	value = bitmask << 16;
1217 	if (enable)
1218 		value |= bitmask;
1219 
1220 	writel(value, priv->base + CLK_ON_R(reg));
1221 
1222 	if (!enable)
1223 		return 0;
1224 
1225 	if (!priv->info->has_clk_mon_regs)
1226 		return 0;
1227 
1228 	error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
1229 					  value & bitmask, 0, 10);
1230 	if (error)
1231 		dev_err(dev, "Failed to enable CLK_ON %p\n",
1232 			priv->base + CLK_ON_R(reg));
1233 
1234 	return error;
1235 }
1236 
rzg2l_mod_clock_enable(struct clk_hw * hw)1237 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
1238 {
1239 	struct mstp_clock *clock = to_mod_clock(hw);
1240 
1241 	if (clock->sibling) {
1242 		struct rzg2l_cpg_priv *priv = clock->priv;
1243 		unsigned long flags;
1244 		bool enabled;
1245 
1246 		spin_lock_irqsave(&priv->rmw_lock, flags);
1247 		enabled = clock->sibling->enabled;
1248 		clock->enabled = true;
1249 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
1250 		if (enabled)
1251 			return 0;
1252 	}
1253 
1254 	return rzg2l_mod_clock_endisable(hw, true);
1255 }
1256 
rzg2l_mod_clock_disable(struct clk_hw * hw)1257 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
1258 {
1259 	struct mstp_clock *clock = to_mod_clock(hw);
1260 
1261 	if (clock->sibling) {
1262 		struct rzg2l_cpg_priv *priv = clock->priv;
1263 		unsigned long flags;
1264 		bool enabled;
1265 
1266 		spin_lock_irqsave(&priv->rmw_lock, flags);
1267 		enabled = clock->sibling->enabled;
1268 		clock->enabled = false;
1269 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
1270 		if (enabled)
1271 			return;
1272 	}
1273 
1274 	rzg2l_mod_clock_endisable(hw, false);
1275 }
1276 
rzg2l_mod_clock_is_enabled(struct clk_hw * hw)1277 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
1278 {
1279 	struct mstp_clock *clock = to_mod_clock(hw);
1280 	struct rzg2l_cpg_priv *priv = clock->priv;
1281 	u32 bitmask = BIT(clock->bit);
1282 	u32 value;
1283 
1284 	if (!clock->off) {
1285 		dev_dbg(priv->dev, "%pC does not support ON/OFF\n",  hw->clk);
1286 		return 1;
1287 	}
1288 
1289 	if (clock->sibling)
1290 		return clock->enabled;
1291 
1292 	if (priv->info->has_clk_mon_regs)
1293 		value = readl(priv->base + CLK_MON_R(clock->off));
1294 	else
1295 		value = readl(priv->base + clock->off);
1296 
1297 	return value & bitmask;
1298 }
1299 
1300 static const struct clk_ops rzg2l_mod_clock_ops = {
1301 	.enable = rzg2l_mod_clock_enable,
1302 	.disable = rzg2l_mod_clock_disable,
1303 	.is_enabled = rzg2l_mod_clock_is_enabled,
1304 };
1305 
1306 static struct mstp_clock
rzg2l_mod_clock_get_sibling(struct mstp_clock * clock,struct rzg2l_cpg_priv * priv)1307 *rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
1308 			     struct rzg2l_cpg_priv *priv)
1309 {
1310 	struct clk_hw *hw;
1311 	unsigned int i;
1312 
1313 	for (i = 0; i < priv->num_mod_clks; i++) {
1314 		struct mstp_clock *clk;
1315 
1316 		if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
1317 			continue;
1318 
1319 		hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
1320 		clk = to_mod_clock(hw);
1321 		if (clock->off == clk->off && clock->bit == clk->bit)
1322 			return clk;
1323 	}
1324 
1325 	return NULL;
1326 }
1327 
1328 static void __init
rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk * mod,const struct rzg2l_cpg_info * info,struct rzg2l_cpg_priv * priv)1329 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1330 			   const struct rzg2l_cpg_info *info,
1331 			   struct rzg2l_cpg_priv *priv)
1332 {
1333 	struct mstp_clock *clock = NULL;
1334 	struct device *dev = priv->dev;
1335 	unsigned int id = mod->id;
1336 	struct clk_init_data init;
1337 	struct clk *parent, *clk;
1338 	const char *parent_name;
1339 	unsigned int i;
1340 	int ret;
1341 
1342 	WARN_DEBUG(id < priv->num_core_clks);
1343 	WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1344 	WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1345 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1346 
1347 	if (!mod->name) {
1348 		/* Skip NULLified clock */
1349 		return;
1350 	}
1351 
1352 	parent = priv->clks[mod->parent];
1353 	if (IS_ERR(parent)) {
1354 		clk = parent;
1355 		goto fail;
1356 	}
1357 
1358 	clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1359 	if (!clock) {
1360 		clk = ERR_PTR(-ENOMEM);
1361 		goto fail;
1362 	}
1363 
1364 	init.name = mod->name;
1365 	init.ops = &rzg2l_mod_clock_ops;
1366 	init.flags = CLK_SET_RATE_PARENT;
1367 	for (i = 0; i < info->num_crit_mod_clks; i++)
1368 		if (id == info->crit_mod_clks[i]) {
1369 			dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1370 				mod->name);
1371 			init.flags |= CLK_IS_CRITICAL;
1372 			break;
1373 		}
1374 
1375 	parent_name = __clk_get_name(parent);
1376 	init.parent_names = &parent_name;
1377 	init.num_parents = 1;
1378 
1379 	clock->off = mod->off;
1380 	clock->bit = mod->bit;
1381 	clock->priv = priv;
1382 	clock->hw.init = &init;
1383 
1384 	ret = devm_clk_hw_register(dev, &clock->hw);
1385 	if (ret) {
1386 		clk = ERR_PTR(ret);
1387 		goto fail;
1388 	}
1389 
1390 	clk = clock->hw.clk;
1391 	dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1392 	priv->clks[id] = clk;
1393 
1394 	if (mod->is_coupled) {
1395 		struct mstp_clock *sibling;
1396 
1397 		clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1398 		sibling = rzg2l_mod_clock_get_sibling(clock, priv);
1399 		if (sibling) {
1400 			clock->sibling = sibling;
1401 			sibling->sibling = clock;
1402 		}
1403 	}
1404 
1405 	return;
1406 
1407 fail:
1408 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1409 		mod->name, PTR_ERR(clk));
1410 }
1411 
1412 #define rcdev_to_priv(x)	container_of(x, struct rzg2l_cpg_priv, rcdev)
1413 
rzg2l_cpg_assert(struct reset_controller_dev * rcdev,unsigned long id)1414 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1415 			    unsigned long id)
1416 {
1417 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1418 	const struct rzg2l_cpg_info *info = priv->info;
1419 	unsigned int reg = info->resets[id].off;
1420 	u32 mask = BIT(info->resets[id].bit);
1421 	s8 monbit = info->resets[id].monbit;
1422 	u32 value = mask << 16;
1423 
1424 	dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1425 
1426 	writel(value, priv->base + CLK_RST_R(reg));
1427 
1428 	if (info->has_clk_mon_regs) {
1429 		reg = CLK_MRST_R(reg);
1430 	} else if (monbit >= 0) {
1431 		reg = CPG_RST_MON;
1432 		mask = BIT(monbit);
1433 	} else {
1434 		/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1435 		udelay(35);
1436 		return 0;
1437 	}
1438 
1439 	return readl_poll_timeout_atomic(priv->base + reg, value,
1440 					 value & mask, 10, 200);
1441 }
1442 
rzg2l_cpg_deassert(struct reset_controller_dev * rcdev,unsigned long id)1443 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1444 			      unsigned long id)
1445 {
1446 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1447 	const struct rzg2l_cpg_info *info = priv->info;
1448 	unsigned int reg = info->resets[id].off;
1449 	u32 mask = BIT(info->resets[id].bit);
1450 	s8 monbit = info->resets[id].monbit;
1451 	u32 value = (mask << 16) | mask;
1452 
1453 	dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1454 		CLK_RST_R(reg));
1455 
1456 	writel(value, priv->base + CLK_RST_R(reg));
1457 
1458 	if (info->has_clk_mon_regs) {
1459 		reg = CLK_MRST_R(reg);
1460 	} else if (monbit >= 0) {
1461 		reg = CPG_RST_MON;
1462 		mask = BIT(monbit);
1463 	} else {
1464 		/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1465 		udelay(35);
1466 		return 0;
1467 	}
1468 
1469 	return readl_poll_timeout_atomic(priv->base + reg, value,
1470 					 !(value & mask), 10, 200);
1471 }
1472 
rzg2l_cpg_reset(struct reset_controller_dev * rcdev,unsigned long id)1473 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1474 			   unsigned long id)
1475 {
1476 	int ret;
1477 
1478 	ret = rzg2l_cpg_assert(rcdev, id);
1479 	if (ret)
1480 		return ret;
1481 
1482 	return rzg2l_cpg_deassert(rcdev, id);
1483 }
1484 
rzg2l_cpg_status(struct reset_controller_dev * rcdev,unsigned long id)1485 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1486 			    unsigned long id)
1487 {
1488 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1489 	const struct rzg2l_cpg_info *info = priv->info;
1490 	s8 monbit = info->resets[id].monbit;
1491 	unsigned int reg;
1492 	u32 bitmask;
1493 
1494 	if (info->has_clk_mon_regs) {
1495 		reg = CLK_MRST_R(info->resets[id].off);
1496 		bitmask = BIT(info->resets[id].bit);
1497 	} else if (monbit >= 0) {
1498 		reg = CPG_RST_MON;
1499 		bitmask = BIT(monbit);
1500 	} else {
1501 		return -ENOTSUPP;
1502 	}
1503 
1504 	return !!(readl(priv->base + reg) & bitmask);
1505 }
1506 
1507 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1508 	.reset = rzg2l_cpg_reset,
1509 	.assert = rzg2l_cpg_assert,
1510 	.deassert = rzg2l_cpg_deassert,
1511 	.status = rzg2l_cpg_status,
1512 };
1513 
rzg2l_cpg_reset_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)1514 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1515 				 const struct of_phandle_args *reset_spec)
1516 {
1517 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1518 	const struct rzg2l_cpg_info *info = priv->info;
1519 	unsigned int id = reset_spec->args[0];
1520 
1521 	if (id >= rcdev->nr_resets || !info->resets[id].off) {
1522 		dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1523 		return -EINVAL;
1524 	}
1525 
1526 	return id;
1527 }
1528 
rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv * priv)1529 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1530 {
1531 	priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1532 	priv->rcdev.of_node = priv->dev->of_node;
1533 	priv->rcdev.dev = priv->dev;
1534 	priv->rcdev.of_reset_n_cells = 1;
1535 	priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1536 	priv->rcdev.nr_resets = priv->num_resets;
1537 
1538 	return devm_reset_controller_register(priv->dev, &priv->rcdev);
1539 }
1540 
rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv * priv,const struct of_phandle_args * clkspec)1541 static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
1542 				const struct of_phandle_args *clkspec)
1543 {
1544 	const struct rzg2l_cpg_info *info = priv->info;
1545 	unsigned int id;
1546 	unsigned int i;
1547 
1548 	if (clkspec->args_count != 2)
1549 		return false;
1550 
1551 	if (clkspec->args[0] != CPG_MOD)
1552 		return false;
1553 
1554 	id = clkspec->args[1] + info->num_total_core_clks;
1555 	for (i = 0; i < info->num_no_pm_mod_clks; i++) {
1556 		if (info->no_pm_mod_clks[i] == id)
1557 			return false;
1558 	}
1559 
1560 	return true;
1561 }
1562 
1563 /**
1564  * struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure
1565  * @onecell_data: cell data
1566  * @domains: generic PM domains
1567  */
1568 struct rzg2l_cpg_pm_domains {
1569 	struct genpd_onecell_data onecell_data;
1570 	struct generic_pm_domain *domains[];
1571 };
1572 
1573 /**
1574  * struct rzg2l_cpg_pd - RZ/G2L power domain data structure
1575  * @genpd: generic PM domain
1576  * @priv: pointer to CPG private data structure
1577  * @conf: CPG PM domain configuration info
1578  * @id: RZ/G2L power domain ID
1579  */
1580 struct rzg2l_cpg_pd {
1581 	struct generic_pm_domain genpd;
1582 	struct rzg2l_cpg_priv *priv;
1583 	struct rzg2l_cpg_pm_domain_conf conf;
1584 	u16 id;
1585 };
1586 
rzg2l_cpg_attach_dev(struct generic_pm_domain * domain,struct device * dev)1587 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
1588 {
1589 	struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1590 	struct rzg2l_cpg_priv *priv = pd->priv;
1591 	struct device_node *np = dev->of_node;
1592 	struct of_phandle_args clkspec;
1593 	bool once = true;
1594 	struct clk *clk;
1595 	int error;
1596 	int i = 0;
1597 
1598 	while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
1599 					   &clkspec)) {
1600 		if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
1601 			if (once) {
1602 				once = false;
1603 				error = pm_clk_create(dev);
1604 				if (error) {
1605 					of_node_put(clkspec.np);
1606 					goto err;
1607 				}
1608 			}
1609 			clk = of_clk_get_from_provider(&clkspec);
1610 			of_node_put(clkspec.np);
1611 			if (IS_ERR(clk)) {
1612 				error = PTR_ERR(clk);
1613 				goto fail_destroy;
1614 			}
1615 
1616 			error = pm_clk_add_clk(dev, clk);
1617 			if (error) {
1618 				dev_err(dev, "pm_clk_add_clk failed %d\n",
1619 					error);
1620 				goto fail_put;
1621 			}
1622 		} else {
1623 			of_node_put(clkspec.np);
1624 		}
1625 		i++;
1626 	}
1627 
1628 	return 0;
1629 
1630 fail_put:
1631 	clk_put(clk);
1632 
1633 fail_destroy:
1634 	pm_clk_destroy(dev);
1635 err:
1636 	return error;
1637 }
1638 
rzg2l_cpg_detach_dev(struct generic_pm_domain * unused,struct device * dev)1639 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1640 {
1641 	if (!pm_clk_no_clocks(dev))
1642 		pm_clk_destroy(dev);
1643 }
1644 
rzg2l_cpg_genpd_remove(void * data)1645 static void rzg2l_cpg_genpd_remove(void *data)
1646 {
1647 	struct genpd_onecell_data *celldata = data;
1648 
1649 	for (unsigned int i = 0; i < celldata->num_domains; i++)
1650 		pm_genpd_remove(celldata->domains[i]);
1651 }
1652 
rzg2l_cpg_genpd_remove_simple(void * data)1653 static void rzg2l_cpg_genpd_remove_simple(void *data)
1654 {
1655 	pm_genpd_remove(data);
1656 }
1657 
rzg2l_cpg_power_on(struct generic_pm_domain * domain)1658 static int rzg2l_cpg_power_on(struct generic_pm_domain *domain)
1659 {
1660 	struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1661 	struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
1662 	struct rzg2l_cpg_priv *priv = pd->priv;
1663 
1664 	/* Set MSTOP. */
1665 	if (mstop.mask)
1666 		writel(mstop.mask << 16, priv->base + mstop.off);
1667 
1668 	return 0;
1669 }
1670 
rzg2l_cpg_power_off(struct generic_pm_domain * domain)1671 static int rzg2l_cpg_power_off(struct generic_pm_domain *domain)
1672 {
1673 	struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
1674 	struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
1675 	struct rzg2l_cpg_priv *priv = pd->priv;
1676 
1677 	/* Set MSTOP. */
1678 	if (mstop.mask)
1679 		writel(mstop.mask | (mstop.mask << 16), priv->base + mstop.off);
1680 
1681 	return 0;
1682 }
1683 
rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd * pd)1684 static int __init rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd *pd)
1685 {
1686 	bool always_on = !!(pd->genpd.flags & GENPD_FLAG_ALWAYS_ON);
1687 	struct dev_power_governor *governor;
1688 	int ret;
1689 
1690 	if (always_on)
1691 		governor = &pm_domain_always_on_gov;
1692 	else
1693 		governor = &simple_qos_governor;
1694 
1695 	pd->genpd.flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
1696 	pd->genpd.attach_dev = rzg2l_cpg_attach_dev;
1697 	pd->genpd.detach_dev = rzg2l_cpg_detach_dev;
1698 	pd->genpd.power_on = rzg2l_cpg_power_on;
1699 	pd->genpd.power_off = rzg2l_cpg_power_off;
1700 
1701 	ret = pm_genpd_init(&pd->genpd, governor, !always_on);
1702 	if (ret)
1703 		return ret;
1704 
1705 	if (always_on)
1706 		ret = rzg2l_cpg_power_on(&pd->genpd);
1707 
1708 	return ret;
1709 }
1710 
rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv * priv)1711 static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
1712 {
1713 	struct device *dev = priv->dev;
1714 	struct device_node *np = dev->of_node;
1715 	struct rzg2l_cpg_pd *pd;
1716 	int ret;
1717 
1718 	pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1719 	if (!pd)
1720 		return -ENOMEM;
1721 
1722 	pd->genpd.name = np->name;
1723 	pd->genpd.flags = GENPD_FLAG_ALWAYS_ON;
1724 	pd->priv = priv;
1725 	ret = rzg2l_cpg_pd_setup(pd);
1726 	if (ret)
1727 		return ret;
1728 
1729 	ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove_simple, &pd->genpd);
1730 	if (ret)
1731 		return ret;
1732 
1733 	return of_genpd_add_provider_simple(np, &pd->genpd);
1734 }
1735 
1736 static struct generic_pm_domain *
rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args * spec,void * data)1737 rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args *spec, void *data)
1738 {
1739 	struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
1740 	struct genpd_onecell_data *genpd = data;
1741 
1742 	if (spec->args_count != 1)
1743 		return ERR_PTR(-EINVAL);
1744 
1745 	for (unsigned int i = 0; i < genpd->num_domains; i++) {
1746 		struct rzg2l_cpg_pd *pd = container_of(genpd->domains[i], struct rzg2l_cpg_pd,
1747 						       genpd);
1748 
1749 		if (pd->id == spec->args[0]) {
1750 			domain = &pd->genpd;
1751 			break;
1752 		}
1753 	}
1754 
1755 	return domain;
1756 }
1757 
rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv * priv)1758 static int __init rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv *priv)
1759 {
1760 	const struct rzg2l_cpg_info *info = priv->info;
1761 	struct device *dev = priv->dev;
1762 	struct device_node *np = dev->of_node;
1763 	struct rzg2l_cpg_pm_domains *domains;
1764 	struct generic_pm_domain *parent;
1765 	u32 ncells;
1766 	int ret;
1767 
1768 	ret = of_property_read_u32(np, "#power-domain-cells", &ncells);
1769 	if (ret)
1770 		return ret;
1771 
1772 	/* For backward compatibility. */
1773 	if (!ncells)
1774 		return rzg2l_cpg_add_clk_domain(priv);
1775 
1776 	domains = devm_kzalloc(dev, struct_size(domains, domains, info->num_pm_domains),
1777 			       GFP_KERNEL);
1778 	if (!domains)
1779 		return -ENOMEM;
1780 
1781 	domains->onecell_data.domains = domains->domains;
1782 	domains->onecell_data.num_domains = info->num_pm_domains;
1783 	domains->onecell_data.xlate = rzg2l_cpg_pm_domain_xlate;
1784 
1785 	ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, &domains->onecell_data);
1786 	if (ret)
1787 		return ret;
1788 
1789 	for (unsigned int i = 0; i < info->num_pm_domains; i++) {
1790 		struct rzg2l_cpg_pd *pd;
1791 
1792 		pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
1793 		if (!pd)
1794 			return -ENOMEM;
1795 
1796 		pd->genpd.name = info->pm_domains[i].name;
1797 		pd->genpd.flags = info->pm_domains[i].genpd_flags;
1798 		pd->conf = info->pm_domains[i].conf;
1799 		pd->id = info->pm_domains[i].id;
1800 		pd->priv = priv;
1801 
1802 		ret = rzg2l_cpg_pd_setup(pd);
1803 		if (ret)
1804 			return ret;
1805 
1806 		domains->domains[i] = &pd->genpd;
1807 		/* Parent should be on the very first entry of info->pm_domains[]. */
1808 		if (!i) {
1809 			parent = &pd->genpd;
1810 			continue;
1811 		}
1812 
1813 		ret = pm_genpd_add_subdomain(parent, &pd->genpd);
1814 		if (ret)
1815 			return ret;
1816 	}
1817 
1818 	ret = of_genpd_add_provider_onecell(np, &domains->onecell_data);
1819 	if (ret)
1820 		return ret;
1821 
1822 	return 0;
1823 }
1824 
rzg2l_cpg_probe(struct platform_device * pdev)1825 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1826 {
1827 	struct device *dev = &pdev->dev;
1828 	struct device_node *np = dev->of_node;
1829 	const struct rzg2l_cpg_info *info;
1830 	struct rzg2l_cpg_priv *priv;
1831 	unsigned int nclks, i;
1832 	struct clk **clks;
1833 	int error;
1834 
1835 	info = of_device_get_match_data(dev);
1836 
1837 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1838 	if (!priv)
1839 		return -ENOMEM;
1840 
1841 	priv->dev = dev;
1842 	priv->info = info;
1843 	spin_lock_init(&priv->rmw_lock);
1844 
1845 	priv->base = devm_platform_ioremap_resource(pdev, 0);
1846 	if (IS_ERR(priv->base))
1847 		return PTR_ERR(priv->base);
1848 
1849 	nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1850 	clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1851 	if (!clks)
1852 		return -ENOMEM;
1853 
1854 	dev_set_drvdata(dev, priv);
1855 	priv->clks = clks;
1856 	priv->num_core_clks = info->num_total_core_clks;
1857 	priv->num_mod_clks = info->num_hw_mod_clks;
1858 	priv->num_resets = info->num_resets;
1859 	priv->last_dt_core_clk = info->last_dt_core_clk;
1860 
1861 	for (i = 0; i < nclks; i++)
1862 		clks[i] = ERR_PTR(-ENOENT);
1863 
1864 	for (i = 0; i < info->num_core_clks; i++)
1865 		rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1866 
1867 	for (i = 0; i < info->num_mod_clks; i++)
1868 		rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1869 
1870 	error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1871 	if (error)
1872 		return error;
1873 
1874 	error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1875 	if (error)
1876 		return error;
1877 
1878 	error = rzg2l_cpg_add_pm_domains(priv);
1879 	if (error)
1880 		return error;
1881 
1882 	error = rzg2l_cpg_reset_controller_register(priv);
1883 	if (error)
1884 		return error;
1885 
1886 	return 0;
1887 }
1888 
1889 static const struct of_device_id rzg2l_cpg_match[] = {
1890 #ifdef CONFIG_CLK_R9A07G043
1891 	{
1892 		.compatible = "renesas,r9a07g043-cpg",
1893 		.data = &r9a07g043_cpg_info,
1894 	},
1895 #endif
1896 #ifdef CONFIG_CLK_R9A07G044
1897 	{
1898 		.compatible = "renesas,r9a07g044-cpg",
1899 		.data = &r9a07g044_cpg_info,
1900 	},
1901 #endif
1902 #ifdef CONFIG_CLK_R9A07G054
1903 	{
1904 		.compatible = "renesas,r9a07g054-cpg",
1905 		.data = &r9a07g054_cpg_info,
1906 	},
1907 #endif
1908 #ifdef CONFIG_CLK_R9A08G045
1909 	{
1910 		.compatible = "renesas,r9a08g045-cpg",
1911 		.data = &r9a08g045_cpg_info,
1912 	},
1913 #endif
1914 #ifdef CONFIG_CLK_R9A09G011
1915 	{
1916 		.compatible = "renesas,r9a09g011-cpg",
1917 		.data = &r9a09g011_cpg_info,
1918 	},
1919 #endif
1920 	{ /* sentinel */ }
1921 };
1922 
1923 static struct platform_driver rzg2l_cpg_driver = {
1924 	.driver		= {
1925 		.name	= "rzg2l-cpg",
1926 		.of_match_table = rzg2l_cpg_match,
1927 	},
1928 };
1929 
rzg2l_cpg_init(void)1930 static int __init rzg2l_cpg_init(void)
1931 {
1932 	return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
1933 }
1934 
1935 subsys_initcall(rzg2l_cpg_init);
1936 
1937 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");
1938