xref: /linux/drivers/clk/renesas/renesas-cpg-mssr.c (revision 323bbfcf1ef8836d0d2ad9e2c1f1c684f0e3b5b3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Renesas Clock Pulse Generator / Module Standby and Software Reset
4  *
5  * Copyright (C) 2015 Glider bvba
6  *
7  * Based on clk-mstp.c, clk-rcar-gen2.c, and clk-rcar-gen3.c
8  *
9  * Copyright (C) 2013 Ideas On Board SPRL
10  * Copyright (C) 2015 Renesas Electronics Corp.
11  */
12 
13 #include <linux/clk.h>
14 #include <linux/clk-provider.h>
15 #include <linux/clk/renesas.h>
16 #include <linux/delay.h>
17 #include <linux/device.h>
18 #include <linux/init.h>
19 #include <linux/io.h>
20 #include <linux/iopoll.h>
21 #include <linux/mod_devicetable.h>
22 #include <linux/module.h>
23 #include <linux/of_address.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_clock.h>
26 #include <linux/pm_domain.h>
27 #include <linux/psci.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 #include <linux/string_choices.h>
31 
32 #include <dt-bindings/clock/renesas-cpg-mssr.h>
33 
34 #include "renesas-cpg-mssr.h"
35 #include "clk-div6.h"
36 
37 #ifdef DEBUG
38 #define WARN_DEBUG(x)	WARN_ON(x)
39 #else
40 #define WARN_DEBUG(x)	do { } while (0)
41 #endif
42 
43 #define RZT2H_RESET_REG_READ_COUNT	7
44 
45 /*
46  * Module Standby and Software Reset register offsets.
47  *
48  * If the registers exist, these are valid for SH-Mobile, R-Mobile,
49  * R-Car Gen2, R-Car Gen3, and RZ/G1.
50  * These are NOT valid for R-Car Gen1 and RZ/A1!
51  */
52 
53 /*
54  * Module Stop Status Register offsets
55  */
56 
57 static const u16 mstpsr[] = {
58 	0x030, 0x038, 0x040, 0x048, 0x04C, 0x03C, 0x1C0, 0x1C4,
59 	0x9A0, 0x9A4, 0x9A8, 0x9AC,
60 };
61 
62 static const u16 mstpsr_for_gen4[] = {
63 	0x2E00, 0x2E04, 0x2E08, 0x2E0C, 0x2E10, 0x2E14, 0x2E18, 0x2E1C,
64 	0x2E20, 0x2E24, 0x2E28, 0x2E2C, 0x2E30, 0x2E34, 0x2E38, 0x2E3C,
65 	0x2E40, 0x2E44, 0x2E48, 0x2E4C, 0x2E50, 0x2E54, 0x2E58, 0x2E5C,
66 	0x2E60, 0x2E64, 0x2E68, 0x2E6C, 0x2E70, 0x2E74,
67 };
68 
69 /*
70  * System Module Stop Control Register offsets
71  */
72 
73 static const u16 smstpcr[] = {
74 	0x130, 0x134, 0x138, 0x13C, 0x140, 0x144, 0x148, 0x14C,
75 	0x990, 0x994, 0x998, 0x99C,
76 };
77 
78 static const u16 mstpcr_for_gen4[] = {
79 	0x2D00, 0x2D04, 0x2D08, 0x2D0C, 0x2D10, 0x2D14, 0x2D18, 0x2D1C,
80 	0x2D20, 0x2D24, 0x2D28, 0x2D2C, 0x2D30, 0x2D34, 0x2D38, 0x2D3C,
81 	0x2D40, 0x2D44, 0x2D48, 0x2D4C, 0x2D50, 0x2D54, 0x2D58, 0x2D5C,
82 	0x2D60, 0x2D64, 0x2D68, 0x2D6C, 0x2D70, 0x2D74,
83 };
84 
85 /*
86  * Module Stop Control Register (RZ/T2H)
87  * RZ/T2H has 2 registers blocks,
88  * Bit 12 is used to differentiate them
89  */
90 
91 #define RZT2H_MSTPCR_BLOCK_SHIFT	12
92 #define RZT2H_MSTPCR_OFFSET_MASK	GENMASK(11, 0)
93 #define RZT2H_MSTPCR(block, offset)	(((block) << RZT2H_MSTPCR_BLOCK_SHIFT) | \
94 					((offset) & RZT2H_MSTPCR_OFFSET_MASK))
95 
96 #define RZT2H_MSTPCR_BLOCK(x)		((x) >> RZT2H_MSTPCR_BLOCK_SHIFT)
97 #define RZT2H_MSTPCR_OFFSET(x)		((x) & RZT2H_MSTPCR_OFFSET_MASK)
98 
99 static const u16 mstpcr_for_rzt2h[] = {
100 	RZT2H_MSTPCR(0, 0x300), /* MSTPCRA */
101 	RZT2H_MSTPCR(0, 0x304), /* MSTPCRB */
102 	RZT2H_MSTPCR(0, 0x308), /* MSTPCRC */
103 	RZT2H_MSTPCR(0, 0x30c),	/* MSTPCRD */
104 	RZT2H_MSTPCR(0, 0x310), /* MSTPCRE */
105 	0,
106 	RZT2H_MSTPCR(1, 0x318), /* MSTPCRG */
107 	0,
108 	RZT2H_MSTPCR(1, 0x320), /* MSTPCRI */
109 	RZT2H_MSTPCR(0, 0x324), /* MSTPCRJ */
110 	RZT2H_MSTPCR(0, 0x328), /* MSTPCRK */
111 	RZT2H_MSTPCR(0, 0x32c), /* MSTPCRL */
112 	RZT2H_MSTPCR(0, 0x330), /* MSTPCRM */
113 	RZT2H_MSTPCR(1, 0x334), /* MSTPCRN */
114 };
115 
116 /*
117  * Standby Control Register offsets (RZ/A)
118  * Base address is FRQCR register
119  */
120 
121 static const u16 stbcr[] = {
122 	0xFFFF/*dummy*/, 0x010, 0x014, 0x410, 0x414, 0x418, 0x41C, 0x420,
123 	0x424, 0x428, 0x42C,
124 };
125 
126 /*
127  * Software Reset Register offsets
128  */
129 
130 static const u16 srcr[] = {
131 	0x0A0, 0x0A8, 0x0B0, 0x0B8, 0x0BC, 0x0C4, 0x1C8, 0x1CC,
132 	0x920, 0x924, 0x928, 0x92C,
133 };
134 
135 static const u16 srcr_for_gen4[] = {
136 	0x2C00, 0x2C04, 0x2C08, 0x2C0C, 0x2C10, 0x2C14, 0x2C18, 0x2C1C,
137 	0x2C20, 0x2C24, 0x2C28, 0x2C2C, 0x2C30, 0x2C34, 0x2C38, 0x2C3C,
138 	0x2C40, 0x2C44, 0x2C48, 0x2C4C, 0x2C50, 0x2C54, 0x2C58, 0x2C5C,
139 	0x2C60, 0x2C64, 0x2C68, 0x2C6C, 0x2C70, 0x2C74,
140 };
141 
142 static const u16 mrcr_for_rzt2h[] = {
143 	0x240,	/* MRCTLA */
144 	0x244,	/* Reserved */
145 	0x248,	/* Reserved */
146 	0x24C,	/* Reserved */
147 	0x250,	/* MRCTLE */
148 	0x254,	/* Reserved */
149 	0x258,	/* Reserved */
150 	0x25C,	/* Reserved */
151 	0x260,	/* MRCTLI */
152 	0x264,	/* Reserved */
153 	0x268,	/* Reserved */
154 	0x26C,	/* Reserved */
155 	0x270,	/* MRCTLM */
156 };
157 
158 /*
159  * Software Reset Clearing Register offsets
160  */
161 
162 static const u16 srstclr[] = {
163 	0x940, 0x944, 0x948, 0x94C, 0x950, 0x954, 0x958, 0x95C,
164 	0x960, 0x964, 0x968, 0x96C,
165 };
166 
167 static const u16 srstclr_for_gen4[] = {
168 	0x2C80, 0x2C84, 0x2C88, 0x2C8C, 0x2C90, 0x2C94, 0x2C98, 0x2C9C,
169 	0x2CA0, 0x2CA4, 0x2CA8, 0x2CAC, 0x2CB0, 0x2CB4, 0x2CB8, 0x2CBC,
170 	0x2CC0, 0x2CC4, 0x2CC8, 0x2CCC, 0x2CD0, 0x2CD4, 0x2CD8, 0x2CDC,
171 	0x2CE0, 0x2CE4, 0x2CE8, 0x2CEC, 0x2CF0, 0x2CF4,
172 };
173 
174 /**
175  * struct cpg_mssr_priv - Clock Pulse Generator / Module Standby
176  *                        and Software Reset Private Data
177  *
178  * @pub: Data passed to clock registration callback
179  * @rcdev: Optional reset controller entity
180  * @dev: CPG/MSSR device
181  * @reg_layout: CPG/MSSR register layout
182  * @np: Device node in DT for this CPG/MSSR module
183  * @num_core_clks: Number of Core Clocks in clks[]
184  * @num_mod_clks: Number of Module Clocks in clks[]
185  * @last_dt_core_clk: ID of the last Core Clock exported to DT
186  * @status_regs: Pointer to status registers array
187  * @control_regs: Pointer to control registers array
188  * @reset_regs: Pointer to reset registers array
189  * @reset_clear_regs:  Pointer to reset clearing registers array
190  * @smstpcr_saved: [].mask: Mask of SMSTPCR[] bits under our control
191  *                 [].val: Saved values of SMSTPCR[]
192  * @reserved_ids: Temporary used, reserved id list
193  * @num_reserved_ids: Temporary used, number of reserved id list
194  * @clks: Array containing all Core and Module Clocks
195  */
196 struct cpg_mssr_priv {
197 	struct cpg_mssr_pub pub;
198 #ifdef CONFIG_RESET_CONTROLLER
199 	struct reset_controller_dev rcdev;
200 #endif
201 	struct device *dev;
202 	enum clk_reg_layout reg_layout;
203 	struct device_node *np;
204 
205 	unsigned int num_core_clks;
206 	unsigned int num_mod_clks;
207 	unsigned int last_dt_core_clk;
208 
209 	const u16 *status_regs;
210 	const u16 *control_regs;
211 	const u16 *reset_regs;
212 	const u16 *reset_clear_regs;
213 	struct {
214 		u32 mask;
215 		u32 val;
216 	} smstpcr_saved[ARRAY_SIZE(mstpsr_for_gen4)];
217 
218 	unsigned int *reserved_ids;
219 	unsigned int num_reserved_ids;
220 
221 	struct clk *clks[];
222 };
223 
224 static struct cpg_mssr_priv *cpg_mssr_priv;
225 
226 /**
227  * struct mstp_clock - MSTP gating clock
228  * @hw: handle between common and hardware-specific interfaces
229  * @index: MSTP clock number
230  * @priv: CPG/MSSR private data
231  */
232 struct mstp_clock {
233 	struct clk_hw hw;
234 	u32 index;
235 	struct cpg_mssr_priv *priv;
236 };
237 
238 #define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
239 
cpg_rzt2h_mstp_read(struct cpg_mssr_priv * priv,u16 offset)240 static u32 cpg_rzt2h_mstp_read(struct cpg_mssr_priv *priv, u16 offset)
241 {
242 	void __iomem *base =
243 		RZT2H_MSTPCR_BLOCK(offset) ? priv->pub.base1 : priv->pub.base0;
244 
245 	return readl(base + RZT2H_MSTPCR_OFFSET(offset));
246 }
247 
cpg_rzt2h_mstp_write(struct cpg_mssr_priv * priv,u16 offset,u32 value)248 static void cpg_rzt2h_mstp_write(struct cpg_mssr_priv *priv, u16 offset, u32 value)
249 {
250 	void __iomem *base =
251 		RZT2H_MSTPCR_BLOCK(offset) ? priv->pub.base1 : priv->pub.base0;
252 
253 	writel(value, base + RZT2H_MSTPCR_OFFSET(offset));
254 }
255 
cpg_mstp_clock_endisable(struct clk_hw * hw,bool enable)256 static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
257 {
258 	struct mstp_clock *clock = to_mstp_clock(hw);
259 	struct cpg_mssr_priv *priv = clock->priv;
260 	unsigned int reg = clock->index / 32;
261 	unsigned int bit = clock->index % 32;
262 	struct device *dev = priv->dev;
263 	u32 bitmask = BIT(bit);
264 	unsigned long flags;
265 	u32 value;
266 	int error;
267 
268 	dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk,
269 		str_on_off(enable));
270 	spin_lock_irqsave(&priv->pub.rmw_lock, flags);
271 
272 	if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
273 		value = readb(priv->pub.base0 + priv->control_regs[reg]);
274 		if (enable)
275 			value &= ~bitmask;
276 		else
277 			value |= bitmask;
278 		writeb(value, priv->pub.base0 + priv->control_regs[reg]);
279 
280 		/* dummy read to ensure write has completed */
281 		readb(priv->pub.base0 + priv->control_regs[reg]);
282 		barrier_data(priv->pub.base0 + priv->control_regs[reg]);
283 
284 	} else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
285 		value = cpg_rzt2h_mstp_read(priv, priv->control_regs[reg]);
286 
287 		if (enable)
288 			value &= ~bitmask;
289 		else
290 			value |= bitmask;
291 
292 		cpg_rzt2h_mstp_write(priv, priv->control_regs[reg], value);
293 	} else {
294 		value = readl(priv->pub.base0 + priv->control_regs[reg]);
295 		if (enable)
296 			value &= ~bitmask;
297 		else
298 			value |= bitmask;
299 		writel(value, priv->pub.base0 + priv->control_regs[reg]);
300 	}
301 
302 	spin_unlock_irqrestore(&priv->pub.rmw_lock, flags);
303 
304 	if (!enable || priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
305 		return 0;
306 
307 	if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
308 		/*
309 		 * For the RZ/T2H case, it is necessary to perform a read-back after
310 		 * accessing the MSTPCRm register and to dummy-read any register of
311 		 * the IP at least seven times. Instead of memory-mapping the IP
312 		 * register, we simply add a delay after the read operation.
313 		 */
314 		cpg_rzt2h_mstp_read(priv, priv->control_regs[reg]);
315 		udelay(10);
316 		return 0;
317 	}
318 
319 	error = readl_poll_timeout_atomic(priv->pub.base0 + priv->status_regs[reg],
320 					  value, !(value & bitmask), 0, 10);
321 	if (error)
322 		dev_err(dev, "Failed to enable SMSTP %p[%d]\n",
323 			priv->pub.base0 + priv->control_regs[reg], bit);
324 
325 	return error;
326 }
327 
cpg_mstp_clock_enable(struct clk_hw * hw)328 static int cpg_mstp_clock_enable(struct clk_hw *hw)
329 {
330 	return cpg_mstp_clock_endisable(hw, true);
331 }
332 
cpg_mstp_clock_disable(struct clk_hw * hw)333 static void cpg_mstp_clock_disable(struct clk_hw *hw)
334 {
335 	cpg_mstp_clock_endisable(hw, false);
336 }
337 
cpg_mstp_clock_is_enabled(struct clk_hw * hw)338 static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
339 {
340 	struct mstp_clock *clock = to_mstp_clock(hw);
341 	struct cpg_mssr_priv *priv = clock->priv;
342 	unsigned int reg = clock->index / 32;
343 	u32 value;
344 
345 	if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
346 		value = readb(priv->pub.base0 + priv->control_regs[reg]);
347 	else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H)
348 		value = cpg_rzt2h_mstp_read(priv, priv->control_regs[reg]);
349 	else
350 		value = readl(priv->pub.base0 + priv->status_regs[reg]);
351 
352 	return !(value & BIT(clock->index % 32));
353 }
354 
355 static const struct clk_ops cpg_mstp_clock_ops = {
356 	.enable = cpg_mstp_clock_enable,
357 	.disable = cpg_mstp_clock_disable,
358 	.is_enabled = cpg_mstp_clock_is_enabled,
359 };
360 
361 static
cpg_mssr_clk_src_twocell_get(struct of_phandle_args * clkspec,void * data)362 struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
363 					 void *data)
364 {
365 	unsigned int clkidx = clkspec->args[1];
366 	struct cpg_mssr_priv *priv = data;
367 	struct device *dev = priv->dev;
368 	unsigned int idx;
369 	const char *type;
370 	struct clk *clk;
371 	int range_check;
372 
373 	switch (clkspec->args[0]) {
374 	case CPG_CORE:
375 		type = "core";
376 		if (clkidx > priv->last_dt_core_clk) {
377 			dev_err(dev, "Invalid %s clock index %u\n", type,
378 			       clkidx);
379 			return ERR_PTR(-EINVAL);
380 		}
381 		clk = priv->clks[clkidx];
382 		break;
383 
384 	case CPG_MOD:
385 		type = "module";
386 		if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
387 			idx = MOD_CLK_PACK_10(clkidx);
388 			range_check = 7 - (clkidx % 10);
389 		} else {
390 			idx = MOD_CLK_PACK(clkidx);
391 			range_check = 31 - (clkidx % 100);
392 		}
393 		if (range_check < 0 || idx >= priv->num_mod_clks) {
394 			dev_err(dev, "Invalid %s clock index %u\n", type,
395 				clkidx);
396 			return ERR_PTR(-EINVAL);
397 		}
398 		clk = priv->clks[priv->num_core_clks + idx];
399 		break;
400 
401 	default:
402 		dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
403 		return ERR_PTR(-EINVAL);
404 	}
405 
406 	if (IS_ERR(clk))
407 		dev_err(dev, "Cannot get %s clock %u: %ld\n", type, clkidx,
408 		       PTR_ERR(clk));
409 	else
410 		dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
411 			clkspec->args[0], clkspec->args[1], clk,
412 			clk_get_rate(clk));
413 	return clk;
414 }
415 
cpg_mssr_register_core_clk(const struct cpg_core_clk * core,const struct cpg_mssr_info * info,struct cpg_mssr_priv * priv)416 static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
417 					      const struct cpg_mssr_info *info,
418 					      struct cpg_mssr_priv *priv)
419 {
420 	struct clk *clk = ERR_PTR(-ENOTSUPP), *parent;
421 	struct device *dev = priv->dev;
422 	unsigned int id = core->id, div = core->div;
423 	const char *parent_name;
424 
425 	WARN_DEBUG(id >= priv->num_core_clks);
426 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
427 
428 	switch (core->type) {
429 	case CLK_TYPE_IN:
430 		clk = of_clk_get_by_name(priv->np, core->name);
431 		break;
432 
433 	case CLK_TYPE_FF:
434 	case CLK_TYPE_DIV6P1:
435 	case CLK_TYPE_DIV6_RO:
436 		WARN_DEBUG(core->parent >= priv->num_core_clks);
437 		parent = priv->pub.clks[core->parent];
438 		if (IS_ERR(parent)) {
439 			clk = parent;
440 			goto fail;
441 		}
442 
443 		parent_name = __clk_get_name(parent);
444 
445 		if (core->type == CLK_TYPE_DIV6_RO)
446 			/* Multiply with the DIV6 register value */
447 			div *= (readl(priv->pub.base0 + core->offset) & 0x3f) + 1;
448 
449 		if (core->type == CLK_TYPE_DIV6P1) {
450 			clk = cpg_div6_register(core->name, 1, &parent_name,
451 						priv->pub.base0 + core->offset,
452 						&priv->pub.notifiers);
453 		} else {
454 			clk = clk_register_fixed_factor(NULL, core->name,
455 							parent_name, 0,
456 							core->mult, div);
457 		}
458 		break;
459 
460 	case CLK_TYPE_FR:
461 		clk = clk_register_fixed_rate(NULL, core->name, NULL, 0,
462 					      core->mult);
463 		break;
464 
465 	default:
466 		if (info->cpg_clk_register)
467 			clk = info->cpg_clk_register(dev, core, info,
468 						     &priv->pub);
469 		else
470 			dev_err(dev, "%s has unsupported core clock type %u\n",
471 				core->name, core->type);
472 		break;
473 	}
474 
475 	if (IS_ERR(clk))
476 		goto fail;
477 
478 	dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
479 	priv->pub.clks[id] = clk;
480 	return;
481 
482 fail:
483 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
484 		core->name, PTR_ERR(clk));
485 }
486 
cpg_mssr_register_mod_clk(const struct mssr_mod_clk * mod,const struct cpg_mssr_info * info,struct cpg_mssr_priv * priv)487 static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
488 					     const struct cpg_mssr_info *info,
489 					     struct cpg_mssr_priv *priv)
490 {
491 	struct mstp_clock *clock = NULL;
492 	struct device *dev = priv->dev;
493 	unsigned int id = mod->id;
494 	struct clk_init_data init = {};
495 	struct clk *parent, *clk;
496 	const char *parent_name;
497 	unsigned int i;
498 
499 	WARN_DEBUG(id < priv->num_core_clks);
500 	WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
501 	WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
502 	WARN_DEBUG(PTR_ERR(priv->pub.clks[id]) != -ENOENT);
503 
504 	if (!mod->name) {
505 		/* Skip NULLified clock */
506 		return;
507 	}
508 
509 	parent = priv->pub.clks[mod->parent];
510 	if (IS_ERR(parent)) {
511 		clk = parent;
512 		goto fail;
513 	}
514 
515 	clock = kzalloc_obj(*clock);
516 	if (!clock) {
517 		clk = ERR_PTR(-ENOMEM);
518 		goto fail;
519 	}
520 
521 	init.name = mod->name;
522 	init.ops = &cpg_mstp_clock_ops;
523 	init.flags = CLK_SET_RATE_PARENT;
524 	parent_name = __clk_get_name(parent);
525 	init.parent_names = &parent_name;
526 	init.num_parents = 1;
527 
528 	clock->index = id - priv->num_core_clks;
529 	clock->priv = priv;
530 	clock->hw.init = &init;
531 
532 	for (i = 0; i < info->num_crit_mod_clks; i++)
533 		if (id == info->crit_mod_clks[i] &&
534 		    cpg_mstp_clock_is_enabled(&clock->hw)) {
535 			dev_dbg(dev, "MSTP %s setting CLK_IS_CRITICAL\n",
536 				mod->name);
537 			init.flags |= CLK_IS_CRITICAL;
538 			break;
539 		}
540 
541 	/*
542 	 * Ignore reserved device.
543 	 * see
544 	 *	cpg_mssr_reserved_init()
545 	 */
546 	for (i = 0; i < priv->num_reserved_ids; i++) {
547 		if (id == priv->reserved_ids[i]) {
548 			dev_info(dev, "Ignore Linux non-assigned mod (%s)\n", mod->name);
549 			init.flags |= CLK_IGNORE_UNUSED;
550 			break;
551 		}
552 	}
553 
554 	clk = clk_register(NULL, &clock->hw);
555 	if (IS_ERR(clk))
556 		goto fail;
557 
558 	dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
559 	priv->clks[id] = clk;
560 	priv->smstpcr_saved[clock->index / 32].mask |= BIT(clock->index % 32);
561 	return;
562 
563 fail:
564 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
565 		mod->name, PTR_ERR(clk));
566 	kfree(clock);
567 }
568 
569 struct cpg_mssr_clk_domain {
570 	struct generic_pm_domain genpd;
571 	unsigned int num_core_pm_clks;
572 	unsigned int core_pm_clks[];
573 };
574 
575 static struct cpg_mssr_clk_domain *cpg_mssr_clk_domain;
576 
cpg_mssr_is_pm_clk(const struct of_phandle_args * clkspec,struct cpg_mssr_clk_domain * pd)577 static bool cpg_mssr_is_pm_clk(const struct of_phandle_args *clkspec,
578 			       struct cpg_mssr_clk_domain *pd)
579 {
580 	unsigned int i;
581 
582 	if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
583 		return false;
584 
585 	switch (clkspec->args[0]) {
586 	case CPG_CORE:
587 		for (i = 0; i < pd->num_core_pm_clks; i++)
588 			if (clkspec->args[1] == pd->core_pm_clks[i])
589 				return true;
590 		return false;
591 
592 	case CPG_MOD:
593 		return true;
594 
595 	default:
596 		return false;
597 	}
598 }
599 
cpg_mssr_attach_dev(struct generic_pm_domain * unused,struct device * dev)600 int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev)
601 {
602 	struct cpg_mssr_clk_domain *pd = cpg_mssr_clk_domain;
603 	struct device_node *np = dev->of_node;
604 	struct of_phandle_args clkspec;
605 	struct clk *clk;
606 	int i = 0;
607 	int error;
608 
609 	if (!pd) {
610 		dev_dbg(dev, "CPG/MSSR clock domain not yet available\n");
611 		return -EPROBE_DEFER;
612 	}
613 
614 	while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
615 					   &clkspec)) {
616 		if (cpg_mssr_is_pm_clk(&clkspec, pd))
617 			goto found;
618 
619 		of_node_put(clkspec.np);
620 		i++;
621 	}
622 
623 	return 0;
624 
625 found:
626 	clk = of_clk_get_from_provider(&clkspec);
627 	of_node_put(clkspec.np);
628 
629 	if (IS_ERR(clk))
630 		return PTR_ERR(clk);
631 
632 	error = pm_clk_create(dev);
633 	if (error)
634 		goto fail_put;
635 
636 	error = pm_clk_add_clk(dev, clk);
637 	if (error)
638 		goto fail_destroy;
639 
640 	return 0;
641 
642 fail_destroy:
643 	pm_clk_destroy(dev);
644 fail_put:
645 	clk_put(clk);
646 	return error;
647 }
648 
cpg_mssr_detach_dev(struct generic_pm_domain * unused,struct device * dev)649 void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev)
650 {
651 	if (!pm_clk_no_clocks(dev))
652 		pm_clk_destroy(dev);
653 }
654 
cpg_mssr_genpd_remove(void * data)655 static void cpg_mssr_genpd_remove(void *data)
656 {
657 	pm_genpd_remove(data);
658 }
659 
cpg_mssr_add_clk_domain(struct device * dev,const unsigned int * core_pm_clks,unsigned int num_core_pm_clks)660 static int __init cpg_mssr_add_clk_domain(struct device *dev,
661 					  const unsigned int *core_pm_clks,
662 					  unsigned int num_core_pm_clks)
663 {
664 	struct device_node *np = dev->of_node;
665 	struct generic_pm_domain *genpd;
666 	struct cpg_mssr_clk_domain *pd;
667 	size_t pm_size = num_core_pm_clks * sizeof(core_pm_clks[0]);
668 	int ret;
669 
670 	pd = devm_kzalloc(dev, sizeof(*pd) + pm_size, GFP_KERNEL);
671 	if (!pd)
672 		return -ENOMEM;
673 
674 	pd->num_core_pm_clks = num_core_pm_clks;
675 	memcpy(pd->core_pm_clks, core_pm_clks, pm_size);
676 
677 	genpd = &pd->genpd;
678 	genpd->name = np->name;
679 	genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
680 		       GENPD_FLAG_ACTIVE_WAKEUP;
681 	genpd->attach_dev = cpg_mssr_attach_dev;
682 	genpd->detach_dev = cpg_mssr_detach_dev;
683 	ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
684 	if (ret)
685 		return ret;
686 
687 	ret = devm_add_action_or_reset(dev, cpg_mssr_genpd_remove, genpd);
688 	if (ret)
689 		return ret;
690 
691 	cpg_mssr_clk_domain = pd;
692 
693 	return of_genpd_add_provider_simple(np, genpd);
694 }
695 
696 #ifdef CONFIG_RESET_CONTROLLER
697 
698 #define rcdev_to_priv(x)	container_of(x, struct cpg_mssr_priv, rcdev)
699 
cpg_mssr_reset_operate(struct reset_controller_dev * rcdev,const char * func,bool set,unsigned long id)700 static int cpg_mssr_reset_operate(struct reset_controller_dev *rcdev,
701 				  const char *func, bool set, unsigned long id)
702 {
703 	struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
704 	unsigned int reg = id / 32;
705 	unsigned int bit = id % 32;
706 	const u16 off = set ? priv->reset_regs[reg] : priv->reset_clear_regs[reg];
707 	u32 bitmask = BIT(bit);
708 
709 	if (func)
710 		dev_dbg(priv->dev, "%s %u%02u\n", func, reg, bit);
711 
712 	writel(bitmask, priv->pub.base0 + off);
713 	readl(priv->pub.base0 + off);
714 	barrier_data(priv->pub.base0 + off);
715 
716 	return 0;
717 }
718 
cpg_mssr_reset(struct reset_controller_dev * rcdev,unsigned long id)719 static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
720 			  unsigned long id)
721 {
722 	struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
723 
724 	/* Reset module */
725 	cpg_mssr_reset_operate(rcdev, "reset", true, id);
726 
727 	/*
728 	 * On R-Car Gen4, delay after SRCR has been written is 1ms.
729 	 * On older SoCs, delay after SRCR has been written is 35us
730 	 * (one cycle of the RCLK clock @ ca. 32 kHz).
731 	 */
732 	if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN4)
733 		usleep_range(1000, 2000);
734 	else
735 		usleep_range(35, 1000);
736 
737 	/* Release module from reset state */
738 	return cpg_mssr_reset_operate(rcdev, NULL, false, id);
739 }
740 
cpg_mssr_assert(struct reset_controller_dev * rcdev,unsigned long id)741 static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
742 {
743 	return cpg_mssr_reset_operate(rcdev, "assert", true, id);
744 }
745 
cpg_mssr_deassert(struct reset_controller_dev * rcdev,unsigned long id)746 static int cpg_mssr_deassert(struct reset_controller_dev *rcdev,
747 			     unsigned long id)
748 {
749 	return cpg_mssr_reset_operate(rcdev, "deassert", false, id);
750 }
751 
cpg_mssr_status(struct reset_controller_dev * rcdev,unsigned long id)752 static int cpg_mssr_status(struct reset_controller_dev *rcdev,
753 			   unsigned long id)
754 {
755 	struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
756 	unsigned int reg = id / 32;
757 	unsigned int bit = id % 32;
758 	u32 bitmask = BIT(bit);
759 
760 	return !!(readl(priv->pub.base0 + priv->reset_regs[reg]) & bitmask);
761 }
762 
cpg_mrcr_set_reset_state(struct reset_controller_dev * rcdev,unsigned long id,bool set)763 static int cpg_mrcr_set_reset_state(struct reset_controller_dev *rcdev,
764 				    unsigned long id, bool set)
765 {
766 	struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
767 	unsigned int reg = id / 32;
768 	unsigned int bit = id % 32;
769 	u32 bitmask = BIT(bit);
770 	void __iomem *reg_addr;
771 	unsigned long flags;
772 	unsigned int i;
773 	u32 val;
774 
775 	dev_dbg(priv->dev, "%s %u%02u\n", set ? "assert" : "deassert", reg, bit);
776 
777 	spin_lock_irqsave(&priv->pub.rmw_lock, flags);
778 
779 	reg_addr = priv->pub.base0 + priv->reset_regs[reg];
780 	/* Read current value and modify */
781 	val = readl(reg_addr);
782 	if (set)
783 		val |= bitmask;
784 	else
785 		val &= ~bitmask;
786 	writel(val, reg_addr);
787 
788 	/*
789 	 * For secure processing after release from a module reset, one must
790 	 * perform multiple dummy reads of the same register.
791 	 */
792 	for (i = 0; !set && i < RZT2H_RESET_REG_READ_COUNT; i++)
793 		readl(reg_addr);
794 
795 	/* Verify the operation */
796 	val = readl(reg_addr);
797 
798 	spin_unlock_irqrestore(&priv->pub.rmw_lock, flags);
799 
800 	if (set == !(bitmask & val)) {
801 		dev_err(priv->dev, "Reset register %u%02u operation failed\n", reg, bit);
802 		return -EIO;
803 	}
804 
805 	return 0;
806 }
807 
cpg_mrcr_reset(struct reset_controller_dev * rcdev,unsigned long id)808 static int cpg_mrcr_reset(struct reset_controller_dev *rcdev, unsigned long id)
809 {
810 	int ret;
811 
812 	ret = cpg_mrcr_set_reset_state(rcdev, id, true);
813 	if (ret)
814 		return ret;
815 
816 	return cpg_mrcr_set_reset_state(rcdev, id, false);
817 }
818 
cpg_mrcr_assert(struct reset_controller_dev * rcdev,unsigned long id)819 static int cpg_mrcr_assert(struct reset_controller_dev *rcdev, unsigned long id)
820 {
821 	return cpg_mrcr_set_reset_state(rcdev, id, true);
822 }
823 
cpg_mrcr_deassert(struct reset_controller_dev * rcdev,unsigned long id)824 static int cpg_mrcr_deassert(struct reset_controller_dev *rcdev, unsigned long id)
825 {
826 	return cpg_mrcr_set_reset_state(rcdev, id, false);
827 }
828 
829 static const struct reset_control_ops cpg_mssr_reset_ops = {
830 	.reset = cpg_mssr_reset,
831 	.assert = cpg_mssr_assert,
832 	.deassert = cpg_mssr_deassert,
833 	.status = cpg_mssr_status,
834 };
835 
836 static const struct reset_control_ops cpg_mrcr_reset_ops = {
837 	.reset = cpg_mrcr_reset,
838 	.assert = cpg_mrcr_assert,
839 	.deassert = cpg_mrcr_deassert,
840 	.status = cpg_mssr_status,
841 };
842 
cpg_mssr_reset_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)843 static int cpg_mssr_reset_xlate(struct reset_controller_dev *rcdev,
844 				const struct of_phandle_args *reset_spec)
845 {
846 	struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
847 	unsigned int unpacked = reset_spec->args[0];
848 	unsigned int idx = MOD_CLK_PACK(unpacked);
849 
850 	if (unpacked % 100 > 31 || idx >= rcdev->nr_resets) {
851 		dev_err(priv->dev, "Invalid reset index %u\n", unpacked);
852 		return -EINVAL;
853 	}
854 
855 	return idx;
856 }
857 
cpg_mssr_reset_controller_register(struct cpg_mssr_priv * priv)858 static int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
859 {
860 	/*
861 	 * RZ/T2H (and family) has the Module Reset Control Registers
862 	 * which allows control resets of certain modules.
863 	 * The number of resets is not equal to the number of module clocks.
864 	 */
865 	if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
866 		priv->rcdev.ops = &cpg_mrcr_reset_ops;
867 		priv->rcdev.nr_resets = ARRAY_SIZE(mrcr_for_rzt2h) * 32;
868 	} else {
869 		priv->rcdev.ops = &cpg_mssr_reset_ops;
870 		priv->rcdev.nr_resets = priv->num_mod_clks;
871 	}
872 
873 	priv->rcdev.of_node = priv->dev->of_node;
874 	priv->rcdev.of_reset_n_cells = 1;
875 	priv->rcdev.of_xlate = cpg_mssr_reset_xlate;
876 
877 	return devm_reset_controller_register(priv->dev, &priv->rcdev);
878 }
879 
880 #else /* !CONFIG_RESET_CONTROLLER */
cpg_mssr_reset_controller_register(struct cpg_mssr_priv * priv)881 static inline int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
882 {
883 	return 0;
884 }
885 #endif /* !CONFIG_RESET_CONTROLLER */
886 
887 static const struct of_device_id cpg_mssr_match[] = {
888 #ifdef CONFIG_CLK_R7S9210
889 	{
890 		.compatible = "renesas,r7s9210-cpg-mssr",
891 		.data = &r7s9210_cpg_mssr_info,
892 	},
893 #endif
894 #ifdef CONFIG_CLK_R8A7742
895 	{
896 		.compatible = "renesas,r8a7742-cpg-mssr",
897 		.data = &r8a7742_cpg_mssr_info,
898 	},
899 #endif
900 #ifdef CONFIG_CLK_R8A7743
901 	{
902 		.compatible = "renesas,r8a7743-cpg-mssr",
903 		.data = &r8a7743_cpg_mssr_info,
904 	},
905 	/* RZ/G1N is (almost) identical to RZ/G1M w.r.t. clocks. */
906 	{
907 		.compatible = "renesas,r8a7744-cpg-mssr",
908 		.data = &r8a7743_cpg_mssr_info,
909 	},
910 #endif
911 #ifdef CONFIG_CLK_R8A7745
912 	{
913 		.compatible = "renesas,r8a7745-cpg-mssr",
914 		.data = &r8a7745_cpg_mssr_info,
915 	},
916 #endif
917 #ifdef CONFIG_CLK_R8A77470
918 	{
919 		.compatible = "renesas,r8a77470-cpg-mssr",
920 		.data = &r8a77470_cpg_mssr_info,
921 	},
922 #endif
923 #ifdef CONFIG_CLK_R8A774A1
924 	{
925 		.compatible = "renesas,r8a774a1-cpg-mssr",
926 		.data = &r8a774a1_cpg_mssr_info,
927 	},
928 #endif
929 #ifdef CONFIG_CLK_R8A774B1
930 	{
931 		.compatible = "renesas,r8a774b1-cpg-mssr",
932 		.data = &r8a774b1_cpg_mssr_info,
933 	},
934 #endif
935 #ifdef CONFIG_CLK_R8A774C0
936 	{
937 		.compatible = "renesas,r8a774c0-cpg-mssr",
938 		.data = &r8a774c0_cpg_mssr_info,
939 	},
940 #endif
941 #ifdef CONFIG_CLK_R8A774E1
942 	{
943 		.compatible = "renesas,r8a774e1-cpg-mssr",
944 		.data = &r8a774e1_cpg_mssr_info,
945 	},
946 #endif
947 #ifdef CONFIG_CLK_R8A7790
948 	{
949 		.compatible = "renesas,r8a7790-cpg-mssr",
950 		.data = &r8a7790_cpg_mssr_info,
951 	},
952 #endif
953 #ifdef CONFIG_CLK_R8A7791
954 	{
955 		.compatible = "renesas,r8a7791-cpg-mssr",
956 		.data = &r8a7791_cpg_mssr_info,
957 	},
958 	/* R-Car M2-N is (almost) identical to R-Car M2-W w.r.t. clocks. */
959 	{
960 		.compatible = "renesas,r8a7793-cpg-mssr",
961 		.data = &r8a7791_cpg_mssr_info,
962 	},
963 #endif
964 #ifdef CONFIG_CLK_R8A7792
965 	{
966 		.compatible = "renesas,r8a7792-cpg-mssr",
967 		.data = &r8a7792_cpg_mssr_info,
968 	},
969 #endif
970 #ifdef CONFIG_CLK_R8A7794
971 	{
972 		.compatible = "renesas,r8a7794-cpg-mssr",
973 		.data = &r8a7794_cpg_mssr_info,
974 	},
975 #endif
976 #ifdef CONFIG_CLK_R8A7795
977 	{
978 		.compatible = "renesas,r8a7795-cpg-mssr",
979 		.data = &r8a7795_cpg_mssr_info,
980 	},
981 #endif
982 #ifdef CONFIG_CLK_R8A77960
983 	{
984 		.compatible = "renesas,r8a7796-cpg-mssr",
985 		.data = &r8a7796_cpg_mssr_info,
986 	},
987 #endif
988 #ifdef CONFIG_CLK_R8A77961
989 	{
990 		.compatible = "renesas,r8a77961-cpg-mssr",
991 		.data = &r8a7796_cpg_mssr_info,
992 	},
993 #endif
994 #ifdef CONFIG_CLK_R8A77965
995 	{
996 		.compatible = "renesas,r8a77965-cpg-mssr",
997 		.data = &r8a77965_cpg_mssr_info,
998 	},
999 #endif
1000 #ifdef CONFIG_CLK_R8A77970
1001 	{
1002 		.compatible = "renesas,r8a77970-cpg-mssr",
1003 		.data = &r8a77970_cpg_mssr_info,
1004 	},
1005 #endif
1006 #ifdef CONFIG_CLK_R8A77980
1007 	{
1008 		.compatible = "renesas,r8a77980-cpg-mssr",
1009 		.data = &r8a77980_cpg_mssr_info,
1010 	},
1011 #endif
1012 #ifdef CONFIG_CLK_R8A77990
1013 	{
1014 		.compatible = "renesas,r8a77990-cpg-mssr",
1015 		.data = &r8a77990_cpg_mssr_info,
1016 	},
1017 #endif
1018 #ifdef CONFIG_CLK_R8A77995
1019 	{
1020 		.compatible = "renesas,r8a77995-cpg-mssr",
1021 		.data = &r8a77995_cpg_mssr_info,
1022 	},
1023 #endif
1024 #ifdef CONFIG_CLK_R8A779A0
1025 	{
1026 		.compatible = "renesas,r8a779a0-cpg-mssr",
1027 		.data = &r8a779a0_cpg_mssr_info,
1028 	},
1029 #endif
1030 #ifdef CONFIG_CLK_R8A779F0
1031 	{
1032 		.compatible = "renesas,r8a779f0-cpg-mssr",
1033 		.data = &r8a779f0_cpg_mssr_info,
1034 	},
1035 #endif
1036 #ifdef CONFIG_CLK_R8A779G0
1037 	{
1038 		.compatible = "renesas,r8a779g0-cpg-mssr",
1039 		.data = &r8a779g0_cpg_mssr_info,
1040 	},
1041 #endif
1042 #ifdef CONFIG_CLK_R8A779H0
1043 	{
1044 		.compatible = "renesas,r8a779h0-cpg-mssr",
1045 		.data = &r8a779h0_cpg_mssr_info,
1046 	},
1047 #endif
1048 #ifdef CONFIG_CLK_R9A09G077
1049 	{
1050 		.compatible = "renesas,r9a09g077-cpg-mssr",
1051 		.data = &r9a09g077_cpg_mssr_info,
1052 	},
1053 #endif
1054 #ifdef CONFIG_CLK_R9A09G087
1055 	{
1056 		.compatible = "renesas,r9a09g087-cpg-mssr",
1057 		.data = &r9a09g077_cpg_mssr_info,
1058 	},
1059 #endif
1060 	{ /* sentinel */ }
1061 };
1062 
cpg_mssr_del_clk_provider(void * data)1063 static void cpg_mssr_del_clk_provider(void *data)
1064 {
1065 	of_clk_del_provider(data);
1066 }
1067 
1068 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM_PSCI_FW)
cpg_mssr_suspend_noirq(struct device * dev)1069 static int cpg_mssr_suspend_noirq(struct device *dev)
1070 {
1071 	struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
1072 	unsigned int reg;
1073 
1074 	/* This is the best we can do to check for the presence of PSCI */
1075 	if (!psci_ops.cpu_suspend)
1076 		return 0;
1077 
1078 	/* Save module registers with bits under our control */
1079 	for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
1080 		u32 val;
1081 
1082 		if (!priv->smstpcr_saved[reg].mask)
1083 			continue;
1084 
1085 		if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
1086 			val = readb(priv->pub.base0 + priv->control_regs[reg]);
1087 		else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H)
1088 			val = cpg_rzt2h_mstp_read(priv, priv->control_regs[reg]);
1089 		else
1090 			val = readl(priv->pub.base0 + priv->control_regs[reg]);
1091 
1092 		priv->smstpcr_saved[reg].val = val;
1093 	}
1094 
1095 	/* Save core clocks */
1096 	raw_notifier_call_chain(&priv->pub.notifiers, PM_EVENT_SUSPEND, NULL);
1097 
1098 	return 0;
1099 }
1100 
cpg_mssr_resume_noirq(struct device * dev)1101 static int cpg_mssr_resume_noirq(struct device *dev)
1102 {
1103 	struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
1104 	unsigned int reg;
1105 	u32 mask, oldval, newval;
1106 	int error;
1107 
1108 	/* This is the best we can do to check for the presence of PSCI */
1109 	if (!psci_ops.cpu_suspend)
1110 		return 0;
1111 
1112 	/* Restore core clocks */
1113 	raw_notifier_call_chain(&priv->pub.notifiers, PM_EVENT_RESUME, NULL);
1114 
1115 	/* Restore module clocks */
1116 	for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
1117 		mask = priv->smstpcr_saved[reg].mask;
1118 		if (!mask)
1119 			continue;
1120 
1121 		if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
1122 			oldval = readb(priv->pub.base0 + priv->control_regs[reg]);
1123 		else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H)
1124 			oldval = cpg_rzt2h_mstp_read(priv, priv->control_regs[reg]);
1125 		else
1126 			oldval = readl(priv->pub.base0 + priv->control_regs[reg]);
1127 		newval = oldval & ~mask;
1128 		newval |= priv->smstpcr_saved[reg].val & mask;
1129 		if (newval == oldval)
1130 			continue;
1131 
1132 		if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
1133 			writeb(newval, priv->pub.base0 + priv->control_regs[reg]);
1134 			/* dummy read to ensure write has completed */
1135 			readb(priv->pub.base0 + priv->control_regs[reg]);
1136 			barrier_data(priv->pub.base0 + priv->control_regs[reg]);
1137 			continue;
1138 		} else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
1139 			cpg_rzt2h_mstp_write(priv, priv->control_regs[reg], newval);
1140 			/* See cpg_mstp_clock_endisable() on why this is necessary. */
1141 			cpg_rzt2h_mstp_read(priv, priv->control_regs[reg]);
1142 			udelay(10);
1143 			continue;
1144 		} else
1145 			writel(newval, priv->pub.base0 + priv->control_regs[reg]);
1146 
1147 		/* Wait until enabled clocks are really enabled */
1148 		mask &= ~priv->smstpcr_saved[reg].val;
1149 		if (!mask)
1150 			continue;
1151 
1152 		error = readl_poll_timeout_atomic(priv->pub.base0 + priv->status_regs[reg],
1153 						oldval, !(oldval & mask), 0, 10);
1154 		if (error)
1155 			dev_warn(dev, "Failed to enable SMSTP%u[0x%x]\n", reg,
1156 				 oldval & mask);
1157 	}
1158 
1159 	return 0;
1160 }
1161 
1162 static const struct dev_pm_ops cpg_mssr_pm = {
1163 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cpg_mssr_suspend_noirq,
1164 				      cpg_mssr_resume_noirq)
1165 };
1166 #define DEV_PM_OPS	&cpg_mssr_pm
1167 #else
1168 #define DEV_PM_OPS	NULL
1169 #endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
1170 
cpg_mssr_reserved_exit(struct cpg_mssr_priv * priv)1171 static void __init cpg_mssr_reserved_exit(struct cpg_mssr_priv *priv)
1172 {
1173 	kfree(priv->reserved_ids);
1174 }
1175 
cpg_mssr_reserved_init(struct cpg_mssr_priv * priv,const struct cpg_mssr_info * info)1176 static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
1177 					 const struct cpg_mssr_info *info)
1178 {
1179 	struct device_node *soc __free(device_node) = of_find_node_by_path("/soc");
1180 	struct device_node *node;
1181 	uint32_t args[MAX_PHANDLE_ARGS];
1182 	unsigned int *ids = NULL;
1183 	unsigned int num = 0;
1184 
1185 	/*
1186 	 * Because clk_disable_unused() will disable all unused clocks, the device which is assigned
1187 	 * to a non-Linux system will be disabled when Linux is booted.
1188 	 *
1189 	 * To avoid such situation, renesas-cpg-mssr assumes the device which has
1190 	 * status = "reserved" is assigned to a non-Linux system, and adds CLK_IGNORE_UNUSED flag
1191 	 * to its CPG_MOD clocks.
1192 	 * see also
1193 	 *	cpg_mssr_register_mod_clk()
1194 	 *
1195 	 *	scif5: serial@e6f30000 {
1196 	 *		...
1197 	 * =>		clocks = <&cpg CPG_MOD 202>,
1198 	 *			 <&cpg CPG_CORE R8A7795_CLK_S3D1>,
1199 	 *			 <&scif_clk>;
1200 	 *			 ...
1201 	 *		 status = "reserved";
1202 	 *	};
1203 	 */
1204 	for_each_reserved_child_of_node(soc, node) {
1205 		struct of_phandle_iterator it;
1206 		int rc;
1207 
1208 		of_for_each_phandle(&it, rc, node, "clocks", "#clock-cells", -1) {
1209 			int idx;
1210 			unsigned int *new_ids;
1211 
1212 			if (it.node != priv->np)
1213 				continue;
1214 
1215 			if (of_phandle_iterator_args(&it, args, MAX_PHANDLE_ARGS) != 2)
1216 				continue;
1217 
1218 			if (args[0] != CPG_MOD)
1219 				continue;
1220 
1221 			new_ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL);
1222 			if (!new_ids) {
1223 				of_node_put(it.node);
1224 				kfree(ids);
1225 				return -ENOMEM;
1226 			}
1227 			ids = new_ids;
1228 
1229 			if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
1230 				idx = MOD_CLK_PACK_10(args[1]);	/* for DEF_MOD_STB() */
1231 			else
1232 				idx = MOD_CLK_PACK(args[1]);	/* for DEF_MOD() */
1233 
1234 			ids[num] = info->num_total_core_clks + idx;
1235 
1236 			num++;
1237 		}
1238 	}
1239 
1240 	priv->num_reserved_ids	= num;
1241 	priv->reserved_ids	= ids;
1242 
1243 	return 0;
1244 }
1245 
cpg_mssr_common_init(struct device * dev,struct device_node * np,const struct cpg_mssr_info * info)1246 static int __init cpg_mssr_common_init(struct device *dev,
1247 				       struct device_node *np,
1248 				       const struct cpg_mssr_info *info)
1249 {
1250 	struct cpg_mssr_priv *priv;
1251 	unsigned int nclks, i;
1252 	int error;
1253 
1254 	if (info->init) {
1255 		error = info->init(dev);
1256 		if (error)
1257 			return error;
1258 	}
1259 
1260 	nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1261 	priv = kzalloc_flex(*priv, clks, nclks);
1262 	if (!priv)
1263 		return -ENOMEM;
1264 
1265 	priv->pub.clks = priv->clks;
1266 	priv->np = np;
1267 	priv->dev = dev;
1268 	spin_lock_init(&priv->pub.rmw_lock);
1269 
1270 	priv->pub.base0 = of_iomap(np, 0);
1271 	if (!priv->pub.base0) {
1272 		error = -ENOMEM;
1273 		goto out_err;
1274 	}
1275 	if (info->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
1276 		priv->pub.base1 = of_iomap(np, 1);
1277 		if (!priv->pub.base1) {
1278 			error = -ENOMEM;
1279 			goto out_err;
1280 		}
1281 	}
1282 
1283 	priv->num_core_clks = info->num_total_core_clks;
1284 	priv->num_mod_clks = info->num_hw_mod_clks;
1285 	priv->last_dt_core_clk = info->last_dt_core_clk;
1286 	RAW_INIT_NOTIFIER_HEAD(&priv->pub.notifiers);
1287 	priv->reg_layout = info->reg_layout;
1288 	if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3) {
1289 		priv->status_regs = mstpsr;
1290 		priv->control_regs = smstpcr;
1291 		priv->reset_regs = srcr;
1292 		priv->reset_clear_regs = srstclr;
1293 	} else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
1294 		priv->control_regs = stbcr;
1295 	} else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
1296 		priv->control_regs = mstpcr_for_rzt2h;
1297 		priv->reset_regs = mrcr_for_rzt2h;
1298 	} else if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN4) {
1299 		priv->status_regs = mstpsr_for_gen4;
1300 		priv->control_regs = mstpcr_for_gen4;
1301 		priv->reset_regs = srcr_for_gen4;
1302 		priv->reset_clear_regs = srstclr_for_gen4;
1303 	} else {
1304 		error = -EINVAL;
1305 		goto out_err;
1306 	}
1307 
1308 	for (i = 0; i < nclks; i++)
1309 		priv->pub.clks[i] = ERR_PTR(-ENOENT);
1310 
1311 	error = cpg_mssr_reserved_init(priv, info);
1312 	if (error)
1313 		goto out_err;
1314 
1315 	error = of_clk_add_provider(np, cpg_mssr_clk_src_twocell_get, priv);
1316 	if (error)
1317 		goto reserve_err;
1318 
1319 	cpg_mssr_priv = priv;
1320 
1321 	return 0;
1322 
1323 reserve_err:
1324 	cpg_mssr_reserved_exit(priv);
1325 out_err:
1326 	if (priv->pub.base0)
1327 		iounmap(priv->pub.base0);
1328 	if (priv->pub.base1)
1329 		iounmap(priv->pub.base1);
1330 	kfree(priv);
1331 
1332 	return error;
1333 }
1334 
cpg_mssr_early_init(struct device_node * np,const struct cpg_mssr_info * info)1335 void __init cpg_mssr_early_init(struct device_node *np,
1336 				const struct cpg_mssr_info *info)
1337 {
1338 	int error;
1339 	int i;
1340 
1341 	error = cpg_mssr_common_init(NULL, np, info);
1342 	if (error)
1343 		return;
1344 
1345 	for (i = 0; i < info->num_early_core_clks; i++)
1346 		cpg_mssr_register_core_clk(&info->early_core_clks[i], info,
1347 					   cpg_mssr_priv);
1348 
1349 	for (i = 0; i < info->num_early_mod_clks; i++)
1350 		cpg_mssr_register_mod_clk(&info->early_mod_clks[i], info,
1351 					  cpg_mssr_priv);
1352 
1353 }
1354 
cpg_mssr_probe(struct platform_device * pdev)1355 static int __init cpg_mssr_probe(struct platform_device *pdev)
1356 {
1357 	struct device *dev = &pdev->dev;
1358 	struct device_node *np = dev->of_node;
1359 	const struct cpg_mssr_info *info;
1360 	struct cpg_mssr_priv *priv;
1361 	unsigned int i;
1362 	int error;
1363 
1364 	info = of_device_get_match_data(dev);
1365 
1366 	if (!cpg_mssr_priv) {
1367 		error = cpg_mssr_common_init(dev, dev->of_node, info);
1368 		if (error)
1369 			return error;
1370 	}
1371 
1372 	priv = cpg_mssr_priv;
1373 	priv->dev = dev;
1374 	dev_set_drvdata(dev, priv);
1375 
1376 	for (i = 0; i < info->num_core_clks; i++)
1377 		cpg_mssr_register_core_clk(&info->core_clks[i], info, priv);
1378 
1379 	for (i = 0; i < info->num_mod_clks; i++)
1380 		cpg_mssr_register_mod_clk(&info->mod_clks[i], info, priv);
1381 
1382 	error = devm_add_action_or_reset(dev,
1383 					 cpg_mssr_del_clk_provider,
1384 					 np);
1385 	if (error)
1386 		goto reserve_exit;
1387 
1388 	error = cpg_mssr_add_clk_domain(dev, info->core_pm_clks,
1389 					info->num_core_pm_clks);
1390 	if (error)
1391 		goto reserve_exit;
1392 
1393 	/* Reset Controller not supported for Standby Control SoCs */
1394 	if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
1395 		goto reserve_exit;
1396 
1397 	error = cpg_mssr_reset_controller_register(priv);
1398 
1399 reserve_exit:
1400 	cpg_mssr_reserved_exit(priv);
1401 
1402 	return error;
1403 }
1404 
1405 static struct platform_driver cpg_mssr_driver = {
1406 	.driver		= {
1407 		.name	= "renesas-cpg-mssr",
1408 		.of_match_table = cpg_mssr_match,
1409 		.pm = DEV_PM_OPS,
1410 	},
1411 };
1412 
cpg_mssr_init(void)1413 static int __init cpg_mssr_init(void)
1414 {
1415 	return platform_driver_probe(&cpg_mssr_driver, cpg_mssr_probe);
1416 }
1417 
1418 subsys_initcall(cpg_mssr_init);
1419 
mssr_mod_nullify(struct mssr_mod_clk * mod_clks,unsigned int num_mod_clks,const unsigned int * clks,unsigned int n)1420 void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
1421 			     unsigned int num_mod_clks,
1422 			     const unsigned int *clks, unsigned int n)
1423 {
1424 	unsigned int i, j;
1425 
1426 	for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
1427 		if (mod_clks[i].id == clks[j]) {
1428 			mod_clks[i].name = NULL;
1429 			j++;
1430 		}
1431 }
1432 
1433 MODULE_DESCRIPTION("Renesas CPG/MSSR Driver");
1434