1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Renesas Clock Pulse Generator / Module Standby and Software Reset
4 *
5 * Copyright (C) 2015 Glider bvba
6 *
7 * Based on clk-mstp.c, clk-rcar-gen2.c, and clk-rcar-gen3.c
8 *
9 * Copyright (C) 2013 Ideas On Board SPRL
10 * Copyright (C) 2015 Renesas Electronics Corp.
11 */
12
13 #include <linux/clk.h>
14 #include <linux/clk-provider.h>
15 #include <linux/clk/renesas.h>
16 #include <linux/delay.h>
17 #include <linux/device.h>
18 #include <linux/init.h>
19 #include <linux/io.h>
20 #include <linux/iopoll.h>
21 #include <linux/mod_devicetable.h>
22 #include <linux/module.h>
23 #include <linux/of_address.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_clock.h>
26 #include <linux/pm_domain.h>
27 #include <linux/psci.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 #include <linux/string_choices.h>
31
32 #include <dt-bindings/clock/renesas-cpg-mssr.h>
33
34 #include "renesas-cpg-mssr.h"
35 #include "clk-div6.h"
36
37 #ifdef DEBUG
38 #define WARN_DEBUG(x) WARN_ON(x)
39 #else
40 #define WARN_DEBUG(x) do { } while (0)
41 #endif
42
43 #define RZT2H_RESET_REG_READ_COUNT 7
44
45 /*
46 * Module Standby and Software Reset register offsets.
47 *
48 * If the registers exist, these are valid for SH-Mobile, R-Mobile,
49 * R-Car Gen2, R-Car Gen3, and RZ/G1.
50 * These are NOT valid for R-Car Gen1 and RZ/A1!
51 */
52
53 /*
54 * Module Stop Status Register offsets
55 */
56
57 static const u16 mstpsr[] = {
58 0x030, 0x038, 0x040, 0x048, 0x04C, 0x03C, 0x1C0, 0x1C4,
59 0x9A0, 0x9A4, 0x9A8, 0x9AC,
60 };
61
62 static const u16 mstpsr_for_gen4[] = {
63 0x2E00, 0x2E04, 0x2E08, 0x2E0C, 0x2E10, 0x2E14, 0x2E18, 0x2E1C,
64 0x2E20, 0x2E24, 0x2E28, 0x2E2C, 0x2E30, 0x2E34, 0x2E38, 0x2E3C,
65 0x2E40, 0x2E44, 0x2E48, 0x2E4C, 0x2E50, 0x2E54, 0x2E58, 0x2E5C,
66 0x2E60, 0x2E64, 0x2E68, 0x2E6C, 0x2E70, 0x2E74,
67 };
68
69 /*
70 * System Module Stop Control Register offsets
71 */
72
73 static const u16 smstpcr[] = {
74 0x130, 0x134, 0x138, 0x13C, 0x140, 0x144, 0x148, 0x14C,
75 0x990, 0x994, 0x998, 0x99C,
76 };
77
78 static const u16 mstpcr_for_gen4[] = {
79 0x2D00, 0x2D04, 0x2D08, 0x2D0C, 0x2D10, 0x2D14, 0x2D18, 0x2D1C,
80 0x2D20, 0x2D24, 0x2D28, 0x2D2C, 0x2D30, 0x2D34, 0x2D38, 0x2D3C,
81 0x2D40, 0x2D44, 0x2D48, 0x2D4C, 0x2D50, 0x2D54, 0x2D58, 0x2D5C,
82 0x2D60, 0x2D64, 0x2D68, 0x2D6C, 0x2D70, 0x2D74,
83 };
84
85 /*
86 * Module Stop Control Register (RZ/T2H)
87 * RZ/T2H has 2 registers blocks,
88 * Bit 12 is used to differentiate them
89 */
90
91 #define RZT2H_MSTPCR_BLOCK_SHIFT 12
92 #define RZT2H_MSTPCR_OFFSET_MASK GENMASK(11, 0)
93 #define RZT2H_MSTPCR(block, offset) (((block) << RZT2H_MSTPCR_BLOCK_SHIFT) | \
94 ((offset) & RZT2H_MSTPCR_OFFSET_MASK))
95
96 #define RZT2H_MSTPCR_BLOCK(x) ((x) >> RZT2H_MSTPCR_BLOCK_SHIFT)
97 #define RZT2H_MSTPCR_OFFSET(x) ((x) & RZT2H_MSTPCR_OFFSET_MASK)
98
99 static const u16 mstpcr_for_rzt2h[] = {
100 RZT2H_MSTPCR(0, 0x300), /* MSTPCRA */
101 RZT2H_MSTPCR(0, 0x304), /* MSTPCRB */
102 RZT2H_MSTPCR(0, 0x308), /* MSTPCRC */
103 RZT2H_MSTPCR(0, 0x30c), /* MSTPCRD */
104 RZT2H_MSTPCR(0, 0x310), /* MSTPCRE */
105 0,
106 RZT2H_MSTPCR(1, 0x318), /* MSTPCRG */
107 0,
108 RZT2H_MSTPCR(1, 0x320), /* MSTPCRI */
109 RZT2H_MSTPCR(0, 0x324), /* MSTPCRJ */
110 RZT2H_MSTPCR(0, 0x328), /* MSTPCRK */
111 RZT2H_MSTPCR(0, 0x32c), /* MSTPCRL */
112 RZT2H_MSTPCR(0, 0x330), /* MSTPCRM */
113 RZT2H_MSTPCR(1, 0x334), /* MSTPCRN */
114 };
115
116 /*
117 * Standby Control Register offsets (RZ/A)
118 * Base address is FRQCR register
119 */
120
121 static const u16 stbcr[] = {
122 0xFFFF/*dummy*/, 0x010, 0x014, 0x410, 0x414, 0x418, 0x41C, 0x420,
123 0x424, 0x428, 0x42C,
124 };
125
126 /*
127 * Software Reset Register offsets
128 */
129
130 static const u16 srcr[] = {
131 0x0A0, 0x0A8, 0x0B0, 0x0B8, 0x0BC, 0x0C4, 0x1C8, 0x1CC,
132 0x920, 0x924, 0x928, 0x92C,
133 };
134
135 static const u16 srcr_for_gen4[] = {
136 0x2C00, 0x2C04, 0x2C08, 0x2C0C, 0x2C10, 0x2C14, 0x2C18, 0x2C1C,
137 0x2C20, 0x2C24, 0x2C28, 0x2C2C, 0x2C30, 0x2C34, 0x2C38, 0x2C3C,
138 0x2C40, 0x2C44, 0x2C48, 0x2C4C, 0x2C50, 0x2C54, 0x2C58, 0x2C5C,
139 0x2C60, 0x2C64, 0x2C68, 0x2C6C, 0x2C70, 0x2C74,
140 };
141
142 static const u16 mrcr_for_rzt2h[] = {
143 0x240, /* MRCTLA */
144 0x244, /* Reserved */
145 0x248, /* Reserved */
146 0x24C, /* Reserved */
147 0x250, /* MRCTLE */
148 0x254, /* Reserved */
149 0x258, /* Reserved */
150 0x25C, /* Reserved */
151 0x260, /* MRCTLI */
152 0x264, /* Reserved */
153 0x268, /* Reserved */
154 0x26C, /* Reserved */
155 0x270, /* MRCTLM */
156 };
157
158 /*
159 * Software Reset Clearing Register offsets
160 */
161
162 static const u16 srstclr[] = {
163 0x940, 0x944, 0x948, 0x94C, 0x950, 0x954, 0x958, 0x95C,
164 0x960, 0x964, 0x968, 0x96C,
165 };
166
167 static const u16 srstclr_for_gen4[] = {
168 0x2C80, 0x2C84, 0x2C88, 0x2C8C, 0x2C90, 0x2C94, 0x2C98, 0x2C9C,
169 0x2CA0, 0x2CA4, 0x2CA8, 0x2CAC, 0x2CB0, 0x2CB4, 0x2CB8, 0x2CBC,
170 0x2CC0, 0x2CC4, 0x2CC8, 0x2CCC, 0x2CD0, 0x2CD4, 0x2CD8, 0x2CDC,
171 0x2CE0, 0x2CE4, 0x2CE8, 0x2CEC, 0x2CF0, 0x2CF4,
172 };
173
174 /**
175 * struct cpg_mssr_priv - Clock Pulse Generator / Module Standby
176 * and Software Reset Private Data
177 *
178 * @pub: Data passed to clock registration callback
179 * @rcdev: Optional reset controller entity
180 * @dev: CPG/MSSR device
181 * @reg_layout: CPG/MSSR register layout
182 * @np: Device node in DT for this CPG/MSSR module
183 * @num_core_clks: Number of Core Clocks in clks[]
184 * @num_mod_clks: Number of Module Clocks in clks[]
185 * @last_dt_core_clk: ID of the last Core Clock exported to DT
186 * @status_regs: Pointer to status registers array
187 * @control_regs: Pointer to control registers array
188 * @reset_regs: Pointer to reset registers array
189 * @reset_clear_regs: Pointer to reset clearing registers array
190 * @smstpcr_saved: [].mask: Mask of SMSTPCR[] bits under our control
191 * [].val: Saved values of SMSTPCR[]
192 * @reserved_ids: Temporary used, reserved id list
193 * @num_reserved_ids: Temporary used, number of reserved id list
194 * @clks: Array containing all Core and Module Clocks
195 */
196 struct cpg_mssr_priv {
197 struct cpg_mssr_pub pub;
198 #ifdef CONFIG_RESET_CONTROLLER
199 struct reset_controller_dev rcdev;
200 #endif
201 struct device *dev;
202 enum clk_reg_layout reg_layout;
203 struct device_node *np;
204
205 unsigned int num_core_clks;
206 unsigned int num_mod_clks;
207 unsigned int last_dt_core_clk;
208
209 const u16 *status_regs;
210 const u16 *control_regs;
211 const u16 *reset_regs;
212 const u16 *reset_clear_regs;
213 struct {
214 u32 mask;
215 u32 val;
216 } smstpcr_saved[ARRAY_SIZE(mstpsr_for_gen4)];
217
218 unsigned int *reserved_ids;
219 unsigned int num_reserved_ids;
220
221 struct clk *clks[];
222 };
223
224 static struct cpg_mssr_priv *cpg_mssr_priv;
225
226 /**
227 * struct mstp_clock - MSTP gating clock
228 * @hw: handle between common and hardware-specific interfaces
229 * @index: MSTP clock number
230 * @priv: CPG/MSSR private data
231 */
232 struct mstp_clock {
233 struct clk_hw hw;
234 u32 index;
235 struct cpg_mssr_priv *priv;
236 };
237
238 #define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
239
cpg_rzt2h_mstp_read(struct clk_hw * hw,u16 offset)240 static u32 cpg_rzt2h_mstp_read(struct clk_hw *hw, u16 offset)
241 {
242 struct mstp_clock *clock = to_mstp_clock(hw);
243 struct cpg_mssr_priv *priv = clock->priv;
244 void __iomem *base =
245 RZT2H_MSTPCR_BLOCK(offset) ? priv->pub.base1 : priv->pub.base0;
246
247 return readl(base + RZT2H_MSTPCR_OFFSET(offset));
248 }
249
cpg_rzt2h_mstp_write(struct clk_hw * hw,u16 offset,u32 value)250 static void cpg_rzt2h_mstp_write(struct clk_hw *hw, u16 offset, u32 value)
251 {
252 struct mstp_clock *clock = to_mstp_clock(hw);
253 struct cpg_mssr_priv *priv = clock->priv;
254 void __iomem *base =
255 RZT2H_MSTPCR_BLOCK(offset) ? priv->pub.base1 : priv->pub.base0;
256
257 writel(value, base + RZT2H_MSTPCR_OFFSET(offset));
258 }
259
cpg_mstp_clock_endisable(struct clk_hw * hw,bool enable)260 static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
261 {
262 struct mstp_clock *clock = to_mstp_clock(hw);
263 struct cpg_mssr_priv *priv = clock->priv;
264 unsigned int reg = clock->index / 32;
265 unsigned int bit = clock->index % 32;
266 struct device *dev = priv->dev;
267 u32 bitmask = BIT(bit);
268 unsigned long flags;
269 u32 value;
270 int error;
271
272 dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk,
273 str_on_off(enable));
274 spin_lock_irqsave(&priv->pub.rmw_lock, flags);
275
276 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
277 value = readb(priv->pub.base0 + priv->control_regs[reg]);
278 if (enable)
279 value &= ~bitmask;
280 else
281 value |= bitmask;
282 writeb(value, priv->pub.base0 + priv->control_regs[reg]);
283
284 /* dummy read to ensure write has completed */
285 readb(priv->pub.base0 + priv->control_regs[reg]);
286 barrier_data(priv->pub.base0 + priv->control_regs[reg]);
287
288 } else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
289 value = cpg_rzt2h_mstp_read(hw,
290 priv->control_regs[reg]);
291
292 if (enable)
293 value &= ~bitmask;
294 else
295 value |= bitmask;
296
297 cpg_rzt2h_mstp_write(hw,
298 priv->control_regs[reg],
299 value);
300 } else {
301 value = readl(priv->pub.base0 + priv->control_regs[reg]);
302 if (enable)
303 value &= ~bitmask;
304 else
305 value |= bitmask;
306 writel(value, priv->pub.base0 + priv->control_regs[reg]);
307 }
308
309 spin_unlock_irqrestore(&priv->pub.rmw_lock, flags);
310
311 if (!enable || priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
312 return 0;
313
314 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
315 /*
316 * For the RZ/T2H case, it is necessary to perform a read-back after
317 * accessing the MSTPCRm register and to dummy-read any register of
318 * the IP at least seven times. Instead of memory-mapping the IP
319 * register, we simply add a delay after the read operation.
320 */
321 cpg_rzt2h_mstp_read(hw, priv->control_regs[reg]);
322 udelay(10);
323 return 0;
324 }
325
326 error = readl_poll_timeout_atomic(priv->pub.base0 + priv->status_regs[reg],
327 value, !(value & bitmask), 0, 10);
328 if (error)
329 dev_err(dev, "Failed to enable SMSTP %p[%d]\n",
330 priv->pub.base0 + priv->control_regs[reg], bit);
331
332 return error;
333 }
334
cpg_mstp_clock_enable(struct clk_hw * hw)335 static int cpg_mstp_clock_enable(struct clk_hw *hw)
336 {
337 return cpg_mstp_clock_endisable(hw, true);
338 }
339
cpg_mstp_clock_disable(struct clk_hw * hw)340 static void cpg_mstp_clock_disable(struct clk_hw *hw)
341 {
342 cpg_mstp_clock_endisable(hw, false);
343 }
344
cpg_mstp_clock_is_enabled(struct clk_hw * hw)345 static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
346 {
347 struct mstp_clock *clock = to_mstp_clock(hw);
348 struct cpg_mssr_priv *priv = clock->priv;
349 unsigned int reg = clock->index / 32;
350 u32 value;
351
352 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
353 value = readb(priv->pub.base0 + priv->control_regs[reg]);
354 else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H)
355 value = cpg_rzt2h_mstp_read(hw,
356 priv->control_regs[reg]);
357 else
358 value = readl(priv->pub.base0 + priv->status_regs[reg]);
359
360 return !(value & BIT(clock->index % 32));
361 }
362
363 static const struct clk_ops cpg_mstp_clock_ops = {
364 .enable = cpg_mstp_clock_enable,
365 .disable = cpg_mstp_clock_disable,
366 .is_enabled = cpg_mstp_clock_is_enabled,
367 };
368
369 static
cpg_mssr_clk_src_twocell_get(struct of_phandle_args * clkspec,void * data)370 struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
371 void *data)
372 {
373 unsigned int clkidx = clkspec->args[1];
374 struct cpg_mssr_priv *priv = data;
375 struct device *dev = priv->dev;
376 unsigned int idx;
377 const char *type;
378 struct clk *clk;
379 int range_check;
380
381 switch (clkspec->args[0]) {
382 case CPG_CORE:
383 type = "core";
384 if (clkidx > priv->last_dt_core_clk) {
385 dev_err(dev, "Invalid %s clock index %u\n", type,
386 clkidx);
387 return ERR_PTR(-EINVAL);
388 }
389 clk = priv->clks[clkidx];
390 break;
391
392 case CPG_MOD:
393 type = "module";
394 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
395 idx = MOD_CLK_PACK_10(clkidx);
396 range_check = 7 - (clkidx % 10);
397 } else {
398 idx = MOD_CLK_PACK(clkidx);
399 range_check = 31 - (clkidx % 100);
400 }
401 if (range_check < 0 || idx >= priv->num_mod_clks) {
402 dev_err(dev, "Invalid %s clock index %u\n", type,
403 clkidx);
404 return ERR_PTR(-EINVAL);
405 }
406 clk = priv->clks[priv->num_core_clks + idx];
407 break;
408
409 default:
410 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
411 return ERR_PTR(-EINVAL);
412 }
413
414 if (IS_ERR(clk))
415 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
416 PTR_ERR(clk));
417 else
418 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
419 clkspec->args[0], clkspec->args[1], clk,
420 clk_get_rate(clk));
421 return clk;
422 }
423
cpg_mssr_register_core_clk(const struct cpg_core_clk * core,const struct cpg_mssr_info * info,struct cpg_mssr_priv * priv)424 static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
425 const struct cpg_mssr_info *info,
426 struct cpg_mssr_priv *priv)
427 {
428 struct clk *clk = ERR_PTR(-ENOTSUPP), *parent;
429 struct device *dev = priv->dev;
430 unsigned int id = core->id, div = core->div;
431 const char *parent_name;
432
433 WARN_DEBUG(id >= priv->num_core_clks);
434 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
435
436 switch (core->type) {
437 case CLK_TYPE_IN:
438 clk = of_clk_get_by_name(priv->np, core->name);
439 break;
440
441 case CLK_TYPE_FF:
442 case CLK_TYPE_DIV6P1:
443 case CLK_TYPE_DIV6_RO:
444 WARN_DEBUG(core->parent >= priv->num_core_clks);
445 parent = priv->pub.clks[core->parent];
446 if (IS_ERR(parent)) {
447 clk = parent;
448 goto fail;
449 }
450
451 parent_name = __clk_get_name(parent);
452
453 if (core->type == CLK_TYPE_DIV6_RO)
454 /* Multiply with the DIV6 register value */
455 div *= (readl(priv->pub.base0 + core->offset) & 0x3f) + 1;
456
457 if (core->type == CLK_TYPE_DIV6P1) {
458 clk = cpg_div6_register(core->name, 1, &parent_name,
459 priv->pub.base0 + core->offset,
460 &priv->pub.notifiers);
461 } else {
462 clk = clk_register_fixed_factor(NULL, core->name,
463 parent_name, 0,
464 core->mult, div);
465 }
466 break;
467
468 case CLK_TYPE_FR:
469 clk = clk_register_fixed_rate(NULL, core->name, NULL, 0,
470 core->mult);
471 break;
472
473 default:
474 if (info->cpg_clk_register)
475 clk = info->cpg_clk_register(dev, core, info,
476 &priv->pub);
477 else
478 dev_err(dev, "%s has unsupported core clock type %u\n",
479 core->name, core->type);
480 break;
481 }
482
483 if (IS_ERR(clk))
484 goto fail;
485
486 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
487 priv->pub.clks[id] = clk;
488 return;
489
490 fail:
491 dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
492 core->name, PTR_ERR(clk));
493 }
494
cpg_mssr_register_mod_clk(const struct mssr_mod_clk * mod,const struct cpg_mssr_info * info,struct cpg_mssr_priv * priv)495 static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
496 const struct cpg_mssr_info *info,
497 struct cpg_mssr_priv *priv)
498 {
499 struct mstp_clock *clock = NULL;
500 struct device *dev = priv->dev;
501 unsigned int id = mod->id;
502 struct clk_init_data init = {};
503 struct clk *parent, *clk;
504 const char *parent_name;
505 unsigned int i;
506
507 WARN_DEBUG(id < priv->num_core_clks);
508 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
509 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
510 WARN_DEBUG(PTR_ERR(priv->pub.clks[id]) != -ENOENT);
511
512 if (!mod->name) {
513 /* Skip NULLified clock */
514 return;
515 }
516
517 parent = priv->pub.clks[mod->parent];
518 if (IS_ERR(parent)) {
519 clk = parent;
520 goto fail;
521 }
522
523 clock = kzalloc(sizeof(*clock), GFP_KERNEL);
524 if (!clock) {
525 clk = ERR_PTR(-ENOMEM);
526 goto fail;
527 }
528
529 init.name = mod->name;
530 init.ops = &cpg_mstp_clock_ops;
531 init.flags = CLK_SET_RATE_PARENT;
532 parent_name = __clk_get_name(parent);
533 init.parent_names = &parent_name;
534 init.num_parents = 1;
535
536 clock->index = id - priv->num_core_clks;
537 clock->priv = priv;
538 clock->hw.init = &init;
539
540 for (i = 0; i < info->num_crit_mod_clks; i++)
541 if (id == info->crit_mod_clks[i] &&
542 cpg_mstp_clock_is_enabled(&clock->hw)) {
543 dev_dbg(dev, "MSTP %s setting CLK_IS_CRITICAL\n",
544 mod->name);
545 init.flags |= CLK_IS_CRITICAL;
546 break;
547 }
548
549 /*
550 * Ignore reserved device.
551 * see
552 * cpg_mssr_reserved_init()
553 */
554 for (i = 0; i < priv->num_reserved_ids; i++) {
555 if (id == priv->reserved_ids[i]) {
556 dev_info(dev, "Ignore Linux non-assigned mod (%s)\n", mod->name);
557 init.flags |= CLK_IGNORE_UNUSED;
558 break;
559 }
560 }
561
562 clk = clk_register(NULL, &clock->hw);
563 if (IS_ERR(clk))
564 goto fail;
565
566 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
567 priv->clks[id] = clk;
568 priv->smstpcr_saved[clock->index / 32].mask |= BIT(clock->index % 32);
569 return;
570
571 fail:
572 dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
573 mod->name, PTR_ERR(clk));
574 kfree(clock);
575 }
576
577 struct cpg_mssr_clk_domain {
578 struct generic_pm_domain genpd;
579 unsigned int num_core_pm_clks;
580 unsigned int core_pm_clks[];
581 };
582
583 static struct cpg_mssr_clk_domain *cpg_mssr_clk_domain;
584
cpg_mssr_is_pm_clk(const struct of_phandle_args * clkspec,struct cpg_mssr_clk_domain * pd)585 static bool cpg_mssr_is_pm_clk(const struct of_phandle_args *clkspec,
586 struct cpg_mssr_clk_domain *pd)
587 {
588 unsigned int i;
589
590 if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
591 return false;
592
593 switch (clkspec->args[0]) {
594 case CPG_CORE:
595 for (i = 0; i < pd->num_core_pm_clks; i++)
596 if (clkspec->args[1] == pd->core_pm_clks[i])
597 return true;
598 return false;
599
600 case CPG_MOD:
601 return true;
602
603 default:
604 return false;
605 }
606 }
607
cpg_mssr_attach_dev(struct generic_pm_domain * unused,struct device * dev)608 int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev)
609 {
610 struct cpg_mssr_clk_domain *pd = cpg_mssr_clk_domain;
611 struct device_node *np = dev->of_node;
612 struct of_phandle_args clkspec;
613 struct clk *clk;
614 int i = 0;
615 int error;
616
617 if (!pd) {
618 dev_dbg(dev, "CPG/MSSR clock domain not yet available\n");
619 return -EPROBE_DEFER;
620 }
621
622 while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
623 &clkspec)) {
624 if (cpg_mssr_is_pm_clk(&clkspec, pd))
625 goto found;
626
627 of_node_put(clkspec.np);
628 i++;
629 }
630
631 return 0;
632
633 found:
634 clk = of_clk_get_from_provider(&clkspec);
635 of_node_put(clkspec.np);
636
637 if (IS_ERR(clk))
638 return PTR_ERR(clk);
639
640 error = pm_clk_create(dev);
641 if (error)
642 goto fail_put;
643
644 error = pm_clk_add_clk(dev, clk);
645 if (error)
646 goto fail_destroy;
647
648 return 0;
649
650 fail_destroy:
651 pm_clk_destroy(dev);
652 fail_put:
653 clk_put(clk);
654 return error;
655 }
656
cpg_mssr_detach_dev(struct generic_pm_domain * unused,struct device * dev)657 void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev)
658 {
659 if (!pm_clk_no_clocks(dev))
660 pm_clk_destroy(dev);
661 }
662
cpg_mssr_genpd_remove(void * data)663 static void cpg_mssr_genpd_remove(void *data)
664 {
665 pm_genpd_remove(data);
666 }
667
cpg_mssr_add_clk_domain(struct device * dev,const unsigned int * core_pm_clks,unsigned int num_core_pm_clks)668 static int __init cpg_mssr_add_clk_domain(struct device *dev,
669 const unsigned int *core_pm_clks,
670 unsigned int num_core_pm_clks)
671 {
672 struct device_node *np = dev->of_node;
673 struct generic_pm_domain *genpd;
674 struct cpg_mssr_clk_domain *pd;
675 size_t pm_size = num_core_pm_clks * sizeof(core_pm_clks[0]);
676 int ret;
677
678 pd = devm_kzalloc(dev, sizeof(*pd) + pm_size, GFP_KERNEL);
679 if (!pd)
680 return -ENOMEM;
681
682 pd->num_core_pm_clks = num_core_pm_clks;
683 memcpy(pd->core_pm_clks, core_pm_clks, pm_size);
684
685 genpd = &pd->genpd;
686 genpd->name = np->name;
687 genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
688 GENPD_FLAG_ACTIVE_WAKEUP;
689 genpd->attach_dev = cpg_mssr_attach_dev;
690 genpd->detach_dev = cpg_mssr_detach_dev;
691 ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
692 if (ret)
693 return ret;
694
695 ret = devm_add_action_or_reset(dev, cpg_mssr_genpd_remove, genpd);
696 if (ret)
697 return ret;
698
699 cpg_mssr_clk_domain = pd;
700
701 return of_genpd_add_provider_simple(np, genpd);
702 }
703
704 #ifdef CONFIG_RESET_CONTROLLER
705
706 #define rcdev_to_priv(x) container_of(x, struct cpg_mssr_priv, rcdev)
707
cpg_mssr_reset_operate(struct reset_controller_dev * rcdev,const char * func,bool set,unsigned long id)708 static int cpg_mssr_reset_operate(struct reset_controller_dev *rcdev,
709 const char *func, bool set, unsigned long id)
710 {
711 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
712 unsigned int reg = id / 32;
713 unsigned int bit = id % 32;
714 const u16 off = set ? priv->reset_regs[reg] : priv->reset_clear_regs[reg];
715 u32 bitmask = BIT(bit);
716
717 if (func)
718 dev_dbg(priv->dev, "%s %u%02u\n", func, reg, bit);
719
720 writel(bitmask, priv->pub.base0 + off);
721 readl(priv->pub.base0 + off);
722 barrier_data(priv->pub.base0 + off);
723
724 return 0;
725 }
726
cpg_mssr_reset(struct reset_controller_dev * rcdev,unsigned long id)727 static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
728 unsigned long id)
729 {
730 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
731
732 /* Reset module */
733 cpg_mssr_reset_operate(rcdev, "reset", true, id);
734
735 /*
736 * On R-Car Gen4, delay after SRCR has been written is 1ms.
737 * On older SoCs, delay after SRCR has been written is 35us
738 * (one cycle of the RCLK clock @ ca. 32 kHz).
739 */
740 if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN4)
741 usleep_range(1000, 2000);
742 else
743 usleep_range(35, 1000);
744
745 /* Release module from reset state */
746 return cpg_mssr_reset_operate(rcdev, NULL, false, id);
747 }
748
cpg_mssr_assert(struct reset_controller_dev * rcdev,unsigned long id)749 static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
750 {
751 return cpg_mssr_reset_operate(rcdev, "assert", true, id);
752 }
753
cpg_mssr_deassert(struct reset_controller_dev * rcdev,unsigned long id)754 static int cpg_mssr_deassert(struct reset_controller_dev *rcdev,
755 unsigned long id)
756 {
757 return cpg_mssr_reset_operate(rcdev, "deassert", false, id);
758 }
759
cpg_mssr_status(struct reset_controller_dev * rcdev,unsigned long id)760 static int cpg_mssr_status(struct reset_controller_dev *rcdev,
761 unsigned long id)
762 {
763 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
764 unsigned int reg = id / 32;
765 unsigned int bit = id % 32;
766 u32 bitmask = BIT(bit);
767
768 return !!(readl(priv->pub.base0 + priv->reset_regs[reg]) & bitmask);
769 }
770
cpg_mrcr_set_reset_state(struct reset_controller_dev * rcdev,unsigned long id,bool set)771 static int cpg_mrcr_set_reset_state(struct reset_controller_dev *rcdev,
772 unsigned long id, bool set)
773 {
774 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
775 unsigned int reg = id / 32;
776 unsigned int bit = id % 32;
777 u32 bitmask = BIT(bit);
778 void __iomem *reg_addr;
779 unsigned long flags;
780 unsigned int i;
781 u32 val;
782
783 dev_dbg(priv->dev, "%s %u%02u\n", set ? "assert" : "deassert", reg, bit);
784
785 spin_lock_irqsave(&priv->pub.rmw_lock, flags);
786
787 reg_addr = priv->pub.base0 + priv->reset_regs[reg];
788 /* Read current value and modify */
789 val = readl(reg_addr);
790 if (set)
791 val |= bitmask;
792 else
793 val &= ~bitmask;
794 writel(val, reg_addr);
795
796 /*
797 * For secure processing after release from a module reset, one must
798 * perform multiple dummy reads of the same register.
799 */
800 for (i = 0; !set && i < RZT2H_RESET_REG_READ_COUNT; i++)
801 readl(reg_addr);
802
803 /* Verify the operation */
804 val = readl(reg_addr);
805 if (set == !(bitmask & val)) {
806 dev_err(priv->dev, "Reset register %u%02u operation failed\n", reg, bit);
807 spin_unlock_irqrestore(&priv->pub.rmw_lock, flags);
808 return -EIO;
809 }
810
811 spin_unlock_irqrestore(&priv->pub.rmw_lock, flags);
812
813 return 0;
814 }
815
cpg_mrcr_reset(struct reset_controller_dev * rcdev,unsigned long id)816 static int cpg_mrcr_reset(struct reset_controller_dev *rcdev, unsigned long id)
817 {
818 int ret;
819
820 ret = cpg_mrcr_set_reset_state(rcdev, id, true);
821 if (ret)
822 return ret;
823
824 return cpg_mrcr_set_reset_state(rcdev, id, false);
825 }
826
cpg_mrcr_assert(struct reset_controller_dev * rcdev,unsigned long id)827 static int cpg_mrcr_assert(struct reset_controller_dev *rcdev, unsigned long id)
828 {
829 return cpg_mrcr_set_reset_state(rcdev, id, true);
830 }
831
cpg_mrcr_deassert(struct reset_controller_dev * rcdev,unsigned long id)832 static int cpg_mrcr_deassert(struct reset_controller_dev *rcdev, unsigned long id)
833 {
834 return cpg_mrcr_set_reset_state(rcdev, id, false);
835 }
836
837 static const struct reset_control_ops cpg_mssr_reset_ops = {
838 .reset = cpg_mssr_reset,
839 .assert = cpg_mssr_assert,
840 .deassert = cpg_mssr_deassert,
841 .status = cpg_mssr_status,
842 };
843
844 static const struct reset_control_ops cpg_mrcr_reset_ops = {
845 .reset = cpg_mrcr_reset,
846 .assert = cpg_mrcr_assert,
847 .deassert = cpg_mrcr_deassert,
848 .status = cpg_mssr_status,
849 };
850
cpg_mssr_reset_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)851 static int cpg_mssr_reset_xlate(struct reset_controller_dev *rcdev,
852 const struct of_phandle_args *reset_spec)
853 {
854 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
855 unsigned int unpacked = reset_spec->args[0];
856 unsigned int idx = MOD_CLK_PACK(unpacked);
857
858 if (unpacked % 100 > 31 || idx >= rcdev->nr_resets) {
859 dev_err(priv->dev, "Invalid reset index %u\n", unpacked);
860 return -EINVAL;
861 }
862
863 return idx;
864 }
865
cpg_mssr_reset_controller_register(struct cpg_mssr_priv * priv)866 static int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
867 {
868 /*
869 * RZ/T2H (and family) has the Module Reset Control Registers
870 * which allows control resets of certain modules.
871 * The number of resets is not equal to the number of module clocks.
872 */
873 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
874 priv->rcdev.ops = &cpg_mrcr_reset_ops;
875 priv->rcdev.nr_resets = ARRAY_SIZE(mrcr_for_rzt2h) * 32;
876 } else {
877 priv->rcdev.ops = &cpg_mssr_reset_ops;
878 priv->rcdev.nr_resets = priv->num_mod_clks;
879 }
880
881 priv->rcdev.of_node = priv->dev->of_node;
882 priv->rcdev.of_reset_n_cells = 1;
883 priv->rcdev.of_xlate = cpg_mssr_reset_xlate;
884
885 return devm_reset_controller_register(priv->dev, &priv->rcdev);
886 }
887
888 #else /* !CONFIG_RESET_CONTROLLER */
cpg_mssr_reset_controller_register(struct cpg_mssr_priv * priv)889 static inline int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
890 {
891 return 0;
892 }
893 #endif /* !CONFIG_RESET_CONTROLLER */
894
895 static const struct of_device_id cpg_mssr_match[] = {
896 #ifdef CONFIG_CLK_R7S9210
897 {
898 .compatible = "renesas,r7s9210-cpg-mssr",
899 .data = &r7s9210_cpg_mssr_info,
900 },
901 #endif
902 #ifdef CONFIG_CLK_R8A7742
903 {
904 .compatible = "renesas,r8a7742-cpg-mssr",
905 .data = &r8a7742_cpg_mssr_info,
906 },
907 #endif
908 #ifdef CONFIG_CLK_R8A7743
909 {
910 .compatible = "renesas,r8a7743-cpg-mssr",
911 .data = &r8a7743_cpg_mssr_info,
912 },
913 /* RZ/G1N is (almost) identical to RZ/G1M w.r.t. clocks. */
914 {
915 .compatible = "renesas,r8a7744-cpg-mssr",
916 .data = &r8a7743_cpg_mssr_info,
917 },
918 #endif
919 #ifdef CONFIG_CLK_R8A7745
920 {
921 .compatible = "renesas,r8a7745-cpg-mssr",
922 .data = &r8a7745_cpg_mssr_info,
923 },
924 #endif
925 #ifdef CONFIG_CLK_R8A77470
926 {
927 .compatible = "renesas,r8a77470-cpg-mssr",
928 .data = &r8a77470_cpg_mssr_info,
929 },
930 #endif
931 #ifdef CONFIG_CLK_R8A774A1
932 {
933 .compatible = "renesas,r8a774a1-cpg-mssr",
934 .data = &r8a774a1_cpg_mssr_info,
935 },
936 #endif
937 #ifdef CONFIG_CLK_R8A774B1
938 {
939 .compatible = "renesas,r8a774b1-cpg-mssr",
940 .data = &r8a774b1_cpg_mssr_info,
941 },
942 #endif
943 #ifdef CONFIG_CLK_R8A774C0
944 {
945 .compatible = "renesas,r8a774c0-cpg-mssr",
946 .data = &r8a774c0_cpg_mssr_info,
947 },
948 #endif
949 #ifdef CONFIG_CLK_R8A774E1
950 {
951 .compatible = "renesas,r8a774e1-cpg-mssr",
952 .data = &r8a774e1_cpg_mssr_info,
953 },
954 #endif
955 #ifdef CONFIG_CLK_R8A7790
956 {
957 .compatible = "renesas,r8a7790-cpg-mssr",
958 .data = &r8a7790_cpg_mssr_info,
959 },
960 #endif
961 #ifdef CONFIG_CLK_R8A7791
962 {
963 .compatible = "renesas,r8a7791-cpg-mssr",
964 .data = &r8a7791_cpg_mssr_info,
965 },
966 /* R-Car M2-N is (almost) identical to R-Car M2-W w.r.t. clocks. */
967 {
968 .compatible = "renesas,r8a7793-cpg-mssr",
969 .data = &r8a7791_cpg_mssr_info,
970 },
971 #endif
972 #ifdef CONFIG_CLK_R8A7792
973 {
974 .compatible = "renesas,r8a7792-cpg-mssr",
975 .data = &r8a7792_cpg_mssr_info,
976 },
977 #endif
978 #ifdef CONFIG_CLK_R8A7794
979 {
980 .compatible = "renesas,r8a7794-cpg-mssr",
981 .data = &r8a7794_cpg_mssr_info,
982 },
983 #endif
984 #ifdef CONFIG_CLK_R8A7795
985 {
986 .compatible = "renesas,r8a7795-cpg-mssr",
987 .data = &r8a7795_cpg_mssr_info,
988 },
989 #endif
990 #ifdef CONFIG_CLK_R8A77960
991 {
992 .compatible = "renesas,r8a7796-cpg-mssr",
993 .data = &r8a7796_cpg_mssr_info,
994 },
995 #endif
996 #ifdef CONFIG_CLK_R8A77961
997 {
998 .compatible = "renesas,r8a77961-cpg-mssr",
999 .data = &r8a7796_cpg_mssr_info,
1000 },
1001 #endif
1002 #ifdef CONFIG_CLK_R8A77965
1003 {
1004 .compatible = "renesas,r8a77965-cpg-mssr",
1005 .data = &r8a77965_cpg_mssr_info,
1006 },
1007 #endif
1008 #ifdef CONFIG_CLK_R8A77970
1009 {
1010 .compatible = "renesas,r8a77970-cpg-mssr",
1011 .data = &r8a77970_cpg_mssr_info,
1012 },
1013 #endif
1014 #ifdef CONFIG_CLK_R8A77980
1015 {
1016 .compatible = "renesas,r8a77980-cpg-mssr",
1017 .data = &r8a77980_cpg_mssr_info,
1018 },
1019 #endif
1020 #ifdef CONFIG_CLK_R8A77990
1021 {
1022 .compatible = "renesas,r8a77990-cpg-mssr",
1023 .data = &r8a77990_cpg_mssr_info,
1024 },
1025 #endif
1026 #ifdef CONFIG_CLK_R8A77995
1027 {
1028 .compatible = "renesas,r8a77995-cpg-mssr",
1029 .data = &r8a77995_cpg_mssr_info,
1030 },
1031 #endif
1032 #ifdef CONFIG_CLK_R8A779A0
1033 {
1034 .compatible = "renesas,r8a779a0-cpg-mssr",
1035 .data = &r8a779a0_cpg_mssr_info,
1036 },
1037 #endif
1038 #ifdef CONFIG_CLK_R8A779F0
1039 {
1040 .compatible = "renesas,r8a779f0-cpg-mssr",
1041 .data = &r8a779f0_cpg_mssr_info,
1042 },
1043 #endif
1044 #ifdef CONFIG_CLK_R8A779G0
1045 {
1046 .compatible = "renesas,r8a779g0-cpg-mssr",
1047 .data = &r8a779g0_cpg_mssr_info,
1048 },
1049 #endif
1050 #ifdef CONFIG_CLK_R8A779H0
1051 {
1052 .compatible = "renesas,r8a779h0-cpg-mssr",
1053 .data = &r8a779h0_cpg_mssr_info,
1054 },
1055 #endif
1056 #ifdef CONFIG_CLK_R9A09G077
1057 {
1058 .compatible = "renesas,r9a09g077-cpg-mssr",
1059 .data = &r9a09g077_cpg_mssr_info,
1060 },
1061 #endif
1062 #ifdef CONFIG_CLK_R9A09G087
1063 {
1064 .compatible = "renesas,r9a09g087-cpg-mssr",
1065 .data = &r9a09g077_cpg_mssr_info,
1066 },
1067 #endif
1068 { /* sentinel */ }
1069 };
1070
cpg_mssr_del_clk_provider(void * data)1071 static void cpg_mssr_del_clk_provider(void *data)
1072 {
1073 of_clk_del_provider(data);
1074 }
1075
1076 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM_PSCI_FW)
cpg_mssr_suspend_noirq(struct device * dev)1077 static int cpg_mssr_suspend_noirq(struct device *dev)
1078 {
1079 struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
1080 unsigned int reg;
1081
1082 /* This is the best we can do to check for the presence of PSCI */
1083 if (!psci_ops.cpu_suspend)
1084 return 0;
1085
1086 /* Save module registers with bits under our control */
1087 for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
1088 if (priv->smstpcr_saved[reg].mask)
1089 priv->smstpcr_saved[reg].val =
1090 priv->reg_layout == CLK_REG_LAYOUT_RZ_A ?
1091 readb(priv->pub.base0 + priv->control_regs[reg]) :
1092 readl(priv->pub.base0 + priv->control_regs[reg]);
1093 }
1094
1095 /* Save core clocks */
1096 raw_notifier_call_chain(&priv->pub.notifiers, PM_EVENT_SUSPEND, NULL);
1097
1098 return 0;
1099 }
1100
cpg_mssr_resume_noirq(struct device * dev)1101 static int cpg_mssr_resume_noirq(struct device *dev)
1102 {
1103 struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
1104 unsigned int reg;
1105 u32 mask, oldval, newval;
1106 int error;
1107
1108 /* This is the best we can do to check for the presence of PSCI */
1109 if (!psci_ops.cpu_suspend)
1110 return 0;
1111
1112 /* Restore core clocks */
1113 raw_notifier_call_chain(&priv->pub.notifiers, PM_EVENT_RESUME, NULL);
1114
1115 /* Restore module clocks */
1116 for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
1117 mask = priv->smstpcr_saved[reg].mask;
1118 if (!mask)
1119 continue;
1120
1121 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
1122 oldval = readb(priv->pub.base0 + priv->control_regs[reg]);
1123 else
1124 oldval = readl(priv->pub.base0 + priv->control_regs[reg]);
1125 newval = oldval & ~mask;
1126 newval |= priv->smstpcr_saved[reg].val & mask;
1127 if (newval == oldval)
1128 continue;
1129
1130 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
1131 writeb(newval, priv->pub.base0 + priv->control_regs[reg]);
1132 /* dummy read to ensure write has completed */
1133 readb(priv->pub.base0 + priv->control_regs[reg]);
1134 barrier_data(priv->pub.base0 + priv->control_regs[reg]);
1135 continue;
1136 } else
1137 writel(newval, priv->pub.base0 + priv->control_regs[reg]);
1138
1139 /* Wait until enabled clocks are really enabled */
1140 mask &= ~priv->smstpcr_saved[reg].val;
1141 if (!mask)
1142 continue;
1143
1144 error = readl_poll_timeout_atomic(priv->pub.base0 + priv->status_regs[reg],
1145 oldval, !(oldval & mask), 0, 10);
1146 if (error)
1147 dev_warn(dev, "Failed to enable SMSTP%u[0x%x]\n", reg,
1148 oldval & mask);
1149 }
1150
1151 return 0;
1152 }
1153
1154 static const struct dev_pm_ops cpg_mssr_pm = {
1155 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cpg_mssr_suspend_noirq,
1156 cpg_mssr_resume_noirq)
1157 };
1158 #define DEV_PM_OPS &cpg_mssr_pm
1159 #else
1160 #define DEV_PM_OPS NULL
1161 #endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
1162
cpg_mssr_reserved_exit(struct cpg_mssr_priv * priv)1163 static void __init cpg_mssr_reserved_exit(struct cpg_mssr_priv *priv)
1164 {
1165 kfree(priv->reserved_ids);
1166 }
1167
cpg_mssr_reserved_init(struct cpg_mssr_priv * priv,const struct cpg_mssr_info * info)1168 static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
1169 const struct cpg_mssr_info *info)
1170 {
1171 struct device_node *soc __free(device_node) = of_find_node_by_path("/soc");
1172 struct device_node *node;
1173 uint32_t args[MAX_PHANDLE_ARGS];
1174 unsigned int *ids = NULL;
1175 unsigned int num = 0;
1176
1177 /*
1178 * Because clk_disable_unused() will disable all unused clocks, the device which is assigned
1179 * to a non-Linux system will be disabled when Linux is booted.
1180 *
1181 * To avoid such situation, renesas-cpg-mssr assumes the device which has
1182 * status = "reserved" is assigned to a non-Linux system, and adds CLK_IGNORE_UNUSED flag
1183 * to its CPG_MOD clocks.
1184 * see also
1185 * cpg_mssr_register_mod_clk()
1186 *
1187 * scif5: serial@e6f30000 {
1188 * ...
1189 * => clocks = <&cpg CPG_MOD 202>,
1190 * <&cpg CPG_CORE R8A7795_CLK_S3D1>,
1191 * <&scif_clk>;
1192 * ...
1193 * status = "reserved";
1194 * };
1195 */
1196 for_each_reserved_child_of_node(soc, node) {
1197 struct of_phandle_iterator it;
1198 int rc;
1199
1200 of_for_each_phandle(&it, rc, node, "clocks", "#clock-cells", -1) {
1201 int idx;
1202 unsigned int *new_ids;
1203
1204 if (it.node != priv->np)
1205 continue;
1206
1207 if (of_phandle_iterator_args(&it, args, MAX_PHANDLE_ARGS) != 2)
1208 continue;
1209
1210 if (args[0] != CPG_MOD)
1211 continue;
1212
1213 new_ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL);
1214 if (!new_ids) {
1215 of_node_put(it.node);
1216 kfree(ids);
1217 return -ENOMEM;
1218 }
1219 ids = new_ids;
1220
1221 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
1222 idx = MOD_CLK_PACK_10(args[1]); /* for DEF_MOD_STB() */
1223 else
1224 idx = MOD_CLK_PACK(args[1]); /* for DEF_MOD() */
1225
1226 ids[num] = info->num_total_core_clks + idx;
1227
1228 num++;
1229 }
1230 }
1231
1232 priv->num_reserved_ids = num;
1233 priv->reserved_ids = ids;
1234
1235 return 0;
1236 }
1237
cpg_mssr_common_init(struct device * dev,struct device_node * np,const struct cpg_mssr_info * info)1238 static int __init cpg_mssr_common_init(struct device *dev,
1239 struct device_node *np,
1240 const struct cpg_mssr_info *info)
1241 {
1242 struct cpg_mssr_priv *priv;
1243 unsigned int nclks, i;
1244 int error;
1245
1246 if (info->init) {
1247 error = info->init(dev);
1248 if (error)
1249 return error;
1250 }
1251
1252 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1253 priv = kzalloc(struct_size(priv, clks, nclks), GFP_KERNEL);
1254 if (!priv)
1255 return -ENOMEM;
1256
1257 priv->pub.clks = priv->clks;
1258 priv->np = np;
1259 priv->dev = dev;
1260 spin_lock_init(&priv->pub.rmw_lock);
1261
1262 priv->pub.base0 = of_iomap(np, 0);
1263 if (!priv->pub.base0) {
1264 error = -ENOMEM;
1265 goto out_err;
1266 }
1267 if (info->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
1268 priv->pub.base1 = of_iomap(np, 1);
1269 if (!priv->pub.base1) {
1270 error = -ENOMEM;
1271 goto out_err;
1272 }
1273 }
1274
1275 priv->num_core_clks = info->num_total_core_clks;
1276 priv->num_mod_clks = info->num_hw_mod_clks;
1277 priv->last_dt_core_clk = info->last_dt_core_clk;
1278 RAW_INIT_NOTIFIER_HEAD(&priv->pub.notifiers);
1279 priv->reg_layout = info->reg_layout;
1280 if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3) {
1281 priv->status_regs = mstpsr;
1282 priv->control_regs = smstpcr;
1283 priv->reset_regs = srcr;
1284 priv->reset_clear_regs = srstclr;
1285 } else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
1286 priv->control_regs = stbcr;
1287 } else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
1288 priv->control_regs = mstpcr_for_rzt2h;
1289 priv->reset_regs = mrcr_for_rzt2h;
1290 } else if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN4) {
1291 priv->status_regs = mstpsr_for_gen4;
1292 priv->control_regs = mstpcr_for_gen4;
1293 priv->reset_regs = srcr_for_gen4;
1294 priv->reset_clear_regs = srstclr_for_gen4;
1295 } else {
1296 error = -EINVAL;
1297 goto out_err;
1298 }
1299
1300 for (i = 0; i < nclks; i++)
1301 priv->pub.clks[i] = ERR_PTR(-ENOENT);
1302
1303 error = cpg_mssr_reserved_init(priv, info);
1304 if (error)
1305 goto out_err;
1306
1307 error = of_clk_add_provider(np, cpg_mssr_clk_src_twocell_get, priv);
1308 if (error)
1309 goto reserve_err;
1310
1311 cpg_mssr_priv = priv;
1312
1313 return 0;
1314
1315 reserve_err:
1316 cpg_mssr_reserved_exit(priv);
1317 out_err:
1318 if (priv->pub.base0)
1319 iounmap(priv->pub.base0);
1320 if (priv->pub.base1)
1321 iounmap(priv->pub.base1);
1322 kfree(priv);
1323
1324 return error;
1325 }
1326
cpg_mssr_early_init(struct device_node * np,const struct cpg_mssr_info * info)1327 void __init cpg_mssr_early_init(struct device_node *np,
1328 const struct cpg_mssr_info *info)
1329 {
1330 int error;
1331 int i;
1332
1333 error = cpg_mssr_common_init(NULL, np, info);
1334 if (error)
1335 return;
1336
1337 for (i = 0; i < info->num_early_core_clks; i++)
1338 cpg_mssr_register_core_clk(&info->early_core_clks[i], info,
1339 cpg_mssr_priv);
1340
1341 for (i = 0; i < info->num_early_mod_clks; i++)
1342 cpg_mssr_register_mod_clk(&info->early_mod_clks[i], info,
1343 cpg_mssr_priv);
1344
1345 }
1346
cpg_mssr_probe(struct platform_device * pdev)1347 static int __init cpg_mssr_probe(struct platform_device *pdev)
1348 {
1349 struct device *dev = &pdev->dev;
1350 struct device_node *np = dev->of_node;
1351 const struct cpg_mssr_info *info;
1352 struct cpg_mssr_priv *priv;
1353 unsigned int i;
1354 int error;
1355
1356 info = of_device_get_match_data(dev);
1357
1358 if (!cpg_mssr_priv) {
1359 error = cpg_mssr_common_init(dev, dev->of_node, info);
1360 if (error)
1361 return error;
1362 }
1363
1364 priv = cpg_mssr_priv;
1365 priv->dev = dev;
1366 dev_set_drvdata(dev, priv);
1367
1368 for (i = 0; i < info->num_core_clks; i++)
1369 cpg_mssr_register_core_clk(&info->core_clks[i], info, priv);
1370
1371 for (i = 0; i < info->num_mod_clks; i++)
1372 cpg_mssr_register_mod_clk(&info->mod_clks[i], info, priv);
1373
1374 error = devm_add_action_or_reset(dev,
1375 cpg_mssr_del_clk_provider,
1376 np);
1377 if (error)
1378 goto reserve_exit;
1379
1380 error = cpg_mssr_add_clk_domain(dev, info->core_pm_clks,
1381 info->num_core_pm_clks);
1382 if (error)
1383 goto reserve_exit;
1384
1385 /* Reset Controller not supported for Standby Control SoCs */
1386 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
1387 goto reserve_exit;
1388
1389 error = cpg_mssr_reset_controller_register(priv);
1390
1391 reserve_exit:
1392 cpg_mssr_reserved_exit(priv);
1393
1394 return error;
1395 }
1396
1397 static struct platform_driver cpg_mssr_driver = {
1398 .driver = {
1399 .name = "renesas-cpg-mssr",
1400 .of_match_table = cpg_mssr_match,
1401 .pm = DEV_PM_OPS,
1402 },
1403 };
1404
cpg_mssr_init(void)1405 static int __init cpg_mssr_init(void)
1406 {
1407 return platform_driver_probe(&cpg_mssr_driver, cpg_mssr_probe);
1408 }
1409
1410 subsys_initcall(cpg_mssr_init);
1411
mssr_mod_nullify(struct mssr_mod_clk * mod_clks,unsigned int num_mod_clks,const unsigned int * clks,unsigned int n)1412 void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
1413 unsigned int num_mod_clks,
1414 const unsigned int *clks, unsigned int n)
1415 {
1416 unsigned int i, j;
1417
1418 for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
1419 if (mod_clks[i].id == clks[j]) {
1420 mod_clks[i].name = NULL;
1421 j++;
1422 }
1423 }
1424
1425 MODULE_DESCRIPTION("Renesas CPG/MSSR Driver");
1426