1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2013 Freescale Semiconductor, Inc.
4 * Copyright 2021 NXP
5 *
6 * clock driver for Freescale QorIQ SoCs.
7 */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <dt-bindings/clock/fsl,qoriq-clockgen.h>
12 #include <linux/cleanup.h>
13 #include <linux/clk.h>
14 #include <linux/clk-provider.h>
15 #include <linux/clkdev.h>
16 #include <linux/fsl/guts.h>
17 #include <linux/io.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/of_address.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/slab.h>
24
25 #define PLL_DIV1 0
26 #define PLL_DIV2 1
27 #define PLL_DIV3 2
28 #define PLL_DIV4 3
29
30 #define PLATFORM_PLL 0
31 #define CGA_PLL1 1
32 #define CGA_PLL2 2
33 #define CGA_PLL3 3
34 #define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */
35 #define CGB_PLL1 4
36 #define CGB_PLL2 5
37 #define MAX_PLL_DIV 32
38
39 struct clockgen_pll_div {
40 struct clk *clk;
41 char name[32];
42 };
43
44 struct clockgen_pll {
45 struct clockgen_pll_div div[MAX_PLL_DIV];
46 };
47
48 #define CLKSEL_VALID 1
49 #define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */
50
51 struct clockgen_sourceinfo {
52 u32 flags; /* CLKSEL_xxx */
53 int pll; /* CGx_PLLn */
54 int div; /* PLL_DIVn */
55 };
56
57 #define NUM_MUX_PARENTS 16
58
59 struct clockgen_muxinfo {
60 struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS];
61 };
62
63 #define NUM_HWACCEL 5
64 #define NUM_CMUX 8
65
66 struct clockgen;
67
68 /*
69 * cmux freq must be >= platform pll.
70 * If not set, cmux freq must be >= platform pll/2
71 */
72 #define CG_CMUX_GE_PLAT 1
73
74 #define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */
75 #define CG_VER3 4 /* version 3 cg: reg layout different */
76 #define CG_LITTLE_ENDIAN 8
77
78 struct clockgen_chipinfo {
79 const char *compat, *guts_compat;
80 const struct clockgen_muxinfo *cmux_groups[2];
81 const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
82 void (*init_periph)(struct clockgen *cg);
83 int cmux_to_group[NUM_CMUX + 1]; /* array should be -1 terminated */
84 u32 pll_mask; /* 1 << n bit set if PLL n is valid */
85 u32 flags; /* CG_xxx */
86 };
87
88 struct clockgen {
89 struct device_node *node;
90 void __iomem *regs;
91 struct clockgen_chipinfo info; /* mutable copy */
92 struct clk *sysclk, *coreclk;
93 struct clockgen_pll pll[6];
94 struct clk *cmux[NUM_CMUX];
95 struct clk *hwaccel[NUM_HWACCEL];
96 struct clk *fman[2];
97 struct ccsr_guts __iomem *guts;
98 };
99
100 static struct clockgen clockgen;
101 static bool add_cpufreq_dev __initdata;
102
cg_out(struct clockgen * cg,u32 val,u32 __iomem * reg)103 static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg)
104 {
105 if (cg->info.flags & CG_LITTLE_ENDIAN)
106 iowrite32(val, reg);
107 else
108 iowrite32be(val, reg);
109 }
110
cg_in(struct clockgen * cg,u32 __iomem * reg)111 static u32 cg_in(struct clockgen *cg, u32 __iomem *reg)
112 {
113 u32 val;
114
115 if (cg->info.flags & CG_LITTLE_ENDIAN)
116 val = ioread32(reg);
117 else
118 val = ioread32be(reg);
119
120 return val;
121 }
122
123 static const struct clockgen_muxinfo p2041_cmux_grp1 = {
124 {
125 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
126 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
127 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
128 }
129 };
130
131 static const struct clockgen_muxinfo p2041_cmux_grp2 = {
132 {
133 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
134 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
135 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
136 }
137 };
138
139 static const struct clockgen_muxinfo p5020_cmux_grp1 = {
140 {
141 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
142 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
143 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
144 }
145 };
146
147 static const struct clockgen_muxinfo p5020_cmux_grp2 = {
148 {
149 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
150 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
151 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
152 }
153 };
154
155 static const struct clockgen_muxinfo p5040_cmux_grp1 = {
156 {
157 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
158 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
159 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
160 [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 },
161 }
162 };
163
164 static const struct clockgen_muxinfo p5040_cmux_grp2 = {
165 {
166 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
167 [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 },
168 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
169 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
170 }
171 };
172
173 static const struct clockgen_muxinfo p4080_cmux_grp1 = {
174 {
175 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
176 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
177 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
178 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
179 [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 },
180 }
181 };
182
183 static const struct clockgen_muxinfo p4080_cmux_grp2 = {
184 {
185 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
186 [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
187 [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
188 [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 },
189 [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 },
190 }
191 };
192
193 static const struct clockgen_muxinfo t1023_cmux = {
194 {
195 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
196 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
197 }
198 };
199
200 static const struct clockgen_muxinfo t1040_cmux = {
201 {
202 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
203 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
204 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
205 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
206 }
207 };
208
209
210 static const struct clockgen_muxinfo clockgen2_cmux_cga = {
211 {
212 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
213 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
214 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
215 {},
216 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
217 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
218 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
219 {},
220 { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
221 { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
222 { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 },
223 },
224 };
225
226 static const struct clockgen_muxinfo clockgen2_cmux_cga12 = {
227 {
228 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
229 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
230 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
231 {},
232 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
233 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
234 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
235 },
236 };
237
238 static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
239 {
240 { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 },
241 { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
242 { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
243 {},
244 { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 },
245 { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
246 { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
247 },
248 };
249
250 static const struct clockgen_muxinfo ls1021a_cmux = {
251 {
252 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
253 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
254 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
255 }
256 };
257
258 static const struct clockgen_muxinfo ls1028a_hwa1 = {
259 {
260 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
261 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
262 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
263 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
264 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
265 {},
266 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
267 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
268 },
269 };
270
271 static const struct clockgen_muxinfo ls1028a_hwa2 = {
272 {
273 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
274 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
275 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
276 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
277 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
278 {},
279 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
280 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
281 },
282 };
283
284 static const struct clockgen_muxinfo ls1028a_hwa3 = {
285 {
286 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
287 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
288 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
289 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
290 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
291 {},
292 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
293 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
294 },
295 };
296
297 static const struct clockgen_muxinfo ls1028a_hwa4 = {
298 {
299 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
300 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
301 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
302 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
303 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
304 {},
305 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
306 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
307 },
308 };
309
310 static const struct clockgen_muxinfo ls1043a_hwa1 = {
311 {
312 {},
313 {},
314 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
315 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
316 {},
317 {},
318 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
319 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
320 },
321 };
322
323 static const struct clockgen_muxinfo ls1043a_hwa2 = {
324 {
325 {},
326 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
327 {},
328 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
329 },
330 };
331
332 static const struct clockgen_muxinfo ls1046a_hwa1 = {
333 {
334 {},
335 {},
336 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
337 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
338 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
339 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
340 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
341 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
342 },
343 };
344
345 static const struct clockgen_muxinfo ls1046a_hwa2 = {
346 {
347 {},
348 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
349 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
350 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
351 {},
352 {},
353 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
354 },
355 };
356
357 static const struct clockgen_muxinfo ls1088a_hwa1 = {
358 {
359 {},
360 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
361 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
362 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
363 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
364 {},
365 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
366 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
367 },
368 };
369
370 static const struct clockgen_muxinfo ls1088a_hwa2 = {
371 {
372 {},
373 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
374 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
375 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
376 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
377 {},
378 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
379 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
380 },
381 };
382
383 static const struct clockgen_muxinfo ls1012a_cmux = {
384 {
385 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
386 {},
387 [2] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
388 }
389 };
390
391 static const struct clockgen_muxinfo t1023_hwa1 = {
392 {
393 {},
394 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
395 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
396 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
397 },
398 };
399
400 static const struct clockgen_muxinfo t1023_hwa2 = {
401 {
402 [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
403 },
404 };
405
406 static const struct clockgen_muxinfo t2080_hwa1 = {
407 {
408 {},
409 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
410 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
411 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
412 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
413 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
414 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
415 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
416 },
417 };
418
419 static const struct clockgen_muxinfo t2080_hwa2 = {
420 {
421 {},
422 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
423 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
424 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
425 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
426 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
427 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
428 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
429 },
430 };
431
432 static const struct clockgen_muxinfo t4240_hwa1 = {
433 {
434 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 },
435 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
436 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
437 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
438 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
439 {},
440 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
441 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
442 },
443 };
444
445 static const struct clockgen_muxinfo t4240_hwa4 = {
446 {
447 [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
448 [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
449 [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
450 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
451 [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
452 },
453 };
454
455 static const struct clockgen_muxinfo t4240_hwa5 = {
456 {
457 [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
458 [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 },
459 [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
460 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
461 [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
462 [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
463 },
464 };
465
466 #define RCWSR7_FM1_CLK_SEL 0x40000000
467 #define RCWSR7_FM2_CLK_SEL 0x20000000
468 #define RCWSR7_HWA_ASYNC_DIV 0x04000000
469
p2041_init_periph(struct clockgen * cg)470 static void __init p2041_init_periph(struct clockgen *cg)
471 {
472 u32 reg;
473
474 reg = ioread32be(&cg->guts->rcwsr[7]);
475
476 if (reg & RCWSR7_FM1_CLK_SEL)
477 cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk;
478 else
479 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
480 }
481
p4080_init_periph(struct clockgen * cg)482 static void __init p4080_init_periph(struct clockgen *cg)
483 {
484 u32 reg;
485
486 reg = ioread32be(&cg->guts->rcwsr[7]);
487
488 if (reg & RCWSR7_FM1_CLK_SEL)
489 cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
490 else
491 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
492
493 if (reg & RCWSR7_FM2_CLK_SEL)
494 cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
495 else
496 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
497 }
498
p5020_init_periph(struct clockgen * cg)499 static void __init p5020_init_periph(struct clockgen *cg)
500 {
501 u32 reg;
502 int div = PLL_DIV2;
503
504 reg = ioread32be(&cg->guts->rcwsr[7]);
505 if (reg & RCWSR7_HWA_ASYNC_DIV)
506 div = PLL_DIV4;
507
508 if (reg & RCWSR7_FM1_CLK_SEL)
509 cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk;
510 else
511 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
512 }
513
p5040_init_periph(struct clockgen * cg)514 static void __init p5040_init_periph(struct clockgen *cg)
515 {
516 u32 reg;
517 int div = PLL_DIV2;
518
519 reg = ioread32be(&cg->guts->rcwsr[7]);
520 if (reg & RCWSR7_HWA_ASYNC_DIV)
521 div = PLL_DIV4;
522
523 if (reg & RCWSR7_FM1_CLK_SEL)
524 cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk;
525 else
526 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
527
528 if (reg & RCWSR7_FM2_CLK_SEL)
529 cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk;
530 else
531 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
532 }
533
t1023_init_periph(struct clockgen * cg)534 static void __init t1023_init_periph(struct clockgen *cg)
535 {
536 cg->fman[0] = cg->hwaccel[1];
537 }
538
t1040_init_periph(struct clockgen * cg)539 static void __init t1040_init_periph(struct clockgen *cg)
540 {
541 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk;
542 }
543
t2080_init_periph(struct clockgen * cg)544 static void __init t2080_init_periph(struct clockgen *cg)
545 {
546 cg->fman[0] = cg->hwaccel[0];
547 }
548
t4240_init_periph(struct clockgen * cg)549 static void __init t4240_init_periph(struct clockgen *cg)
550 {
551 cg->fman[0] = cg->hwaccel[3];
552 cg->fman[1] = cg->hwaccel[4];
553 }
554
555 static const struct clockgen_chipinfo chipinfo[] = {
556 {
557 .compat = "fsl,b4420-clockgen",
558 .guts_compat = "fsl,b4860-device-config",
559 .init_periph = t2080_init_periph,
560 .cmux_groups = {
561 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
562 },
563 .hwaccel = {
564 &t2080_hwa1
565 },
566 .cmux_to_group = {
567 0, 1, 1, 1, -1
568 },
569 .pll_mask = BIT(PLATFORM_PLL) |
570 BIT(CGA_PLL1) | BIT(CGA_PLL2) | BIT(CGA_PLL3) |
571 BIT(CGB_PLL1) | BIT(CGB_PLL2),
572 .flags = CG_PLL_8BIT,
573 },
574 {
575 .compat = "fsl,b4860-clockgen",
576 .guts_compat = "fsl,b4860-device-config",
577 .init_periph = t2080_init_periph,
578 .cmux_groups = {
579 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
580 },
581 .hwaccel = {
582 &t2080_hwa1
583 },
584 .cmux_to_group = {
585 0, 1, 1, 1, -1
586 },
587 .pll_mask = BIT(PLATFORM_PLL) |
588 BIT(CGA_PLL1) | BIT(CGA_PLL2) | BIT(CGA_PLL3) |
589 BIT(CGB_PLL1) | BIT(CGB_PLL2),
590 .flags = CG_PLL_8BIT,
591 },
592 {
593 .compat = "fsl,ls1021a-clockgen",
594 .cmux_groups = {
595 &ls1021a_cmux
596 },
597 .cmux_to_group = {
598 0, -1
599 },
600 .pll_mask = BIT(PLATFORM_PLL) |
601 BIT(CGA_PLL1) | BIT(CGA_PLL2),
602 },
603 {
604 .compat = "fsl,ls1028a-clockgen",
605 .cmux_groups = {
606 &clockgen2_cmux_cga12
607 },
608 .hwaccel = {
609 &ls1028a_hwa1, &ls1028a_hwa2,
610 &ls1028a_hwa3, &ls1028a_hwa4
611 },
612 .cmux_to_group = {
613 0, 0, 0, 0, -1
614 },
615 .pll_mask = BIT(PLATFORM_PLL) |
616 BIT(CGA_PLL1) | BIT(CGA_PLL2),
617 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
618 },
619 {
620 .compat = "fsl,ls1043a-clockgen",
621 .init_periph = t2080_init_periph,
622 .cmux_groups = {
623 &t1040_cmux
624 },
625 .hwaccel = {
626 &ls1043a_hwa1, &ls1043a_hwa2
627 },
628 .cmux_to_group = {
629 0, -1
630 },
631 .pll_mask = BIT(PLATFORM_PLL) |
632 BIT(CGA_PLL1) | BIT(CGA_PLL2),
633 .flags = CG_PLL_8BIT,
634 },
635 {
636 .compat = "fsl,ls1046a-clockgen",
637 .init_periph = t2080_init_periph,
638 .cmux_groups = {
639 &t1040_cmux
640 },
641 .hwaccel = {
642 &ls1046a_hwa1, &ls1046a_hwa2
643 },
644 .cmux_to_group = {
645 0, -1
646 },
647 .pll_mask = BIT(PLATFORM_PLL) |
648 BIT(CGA_PLL1) | BIT(CGA_PLL2),
649 .flags = CG_PLL_8BIT,
650 },
651 {
652 .compat = "fsl,ls1088a-clockgen",
653 .cmux_groups = {
654 &clockgen2_cmux_cga12
655 },
656 .hwaccel = {
657 &ls1088a_hwa1, &ls1088a_hwa2
658 },
659 .cmux_to_group = {
660 0, 0, -1
661 },
662 .pll_mask = BIT(PLATFORM_PLL) |
663 BIT(CGA_PLL1) | BIT(CGA_PLL2),
664 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
665 },
666 {
667 .compat = "fsl,ls1012a-clockgen",
668 .cmux_groups = {
669 &ls1012a_cmux
670 },
671 .cmux_to_group = {
672 0, -1
673 },
674 .pll_mask = BIT(PLATFORM_PLL) | BIT(CGA_PLL1),
675 },
676 {
677 .compat = "fsl,ls2080a-clockgen",
678 .cmux_groups = {
679 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
680 },
681 .cmux_to_group = {
682 0, 0, 1, 1, -1
683 },
684 .pll_mask = BIT(PLATFORM_PLL) |
685 BIT(CGA_PLL1) | BIT(CGA_PLL2) |
686 BIT(CGB_PLL1) | BIT(CGB_PLL2),
687 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
688 },
689 {
690 .compat = "fsl,lx2160a-clockgen",
691 .cmux_groups = {
692 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
693 },
694 .cmux_to_group = {
695 0, 0, 0, 0, 1, 1, 1, 1, -1
696 },
697 .pll_mask = BIT(PLATFORM_PLL) |
698 BIT(CGA_PLL1) | BIT(CGA_PLL2) |
699 BIT(CGB_PLL1) | BIT(CGB_PLL2),
700 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
701 },
702 {
703 .compat = "fsl,p2041-clockgen",
704 .guts_compat = "fsl,qoriq-device-config-1.0",
705 .init_periph = p2041_init_periph,
706 .cmux_groups = {
707 &p2041_cmux_grp1, &p2041_cmux_grp2
708 },
709 .cmux_to_group = {
710 0, 0, 1, 1, -1
711 },
712 .pll_mask = BIT(PLATFORM_PLL) |
713 BIT(CGA_PLL1) | BIT(CGA_PLL2),
714 },
715 {
716 .compat = "fsl,p3041-clockgen",
717 .guts_compat = "fsl,qoriq-device-config-1.0",
718 .init_periph = p2041_init_periph,
719 .cmux_groups = {
720 &p2041_cmux_grp1, &p2041_cmux_grp2
721 },
722 .cmux_to_group = {
723 0, 0, 1, 1, -1
724 },
725 .pll_mask = BIT(PLATFORM_PLL) |
726 BIT(CGA_PLL1) | BIT(CGA_PLL2),
727 },
728 {
729 .compat = "fsl,p4080-clockgen",
730 .guts_compat = "fsl,qoriq-device-config-1.0",
731 .init_periph = p4080_init_periph,
732 .cmux_groups = {
733 &p4080_cmux_grp1, &p4080_cmux_grp2
734 },
735 .cmux_to_group = {
736 0, 0, 0, 0, 1, 1, 1, 1, -1
737 },
738 .pll_mask = BIT(PLATFORM_PLL) |
739 BIT(CGA_PLL1) | BIT(CGA_PLL2) |
740 BIT(CGA_PLL3) | BIT(CGA_PLL4),
741 },
742 {
743 .compat = "fsl,p5020-clockgen",
744 .guts_compat = "fsl,qoriq-device-config-1.0",
745 .init_periph = p5020_init_periph,
746 .cmux_groups = {
747 &p5020_cmux_grp1, &p5020_cmux_grp2
748 },
749 .cmux_to_group = {
750 0, 1, -1
751 },
752 .pll_mask = BIT(PLATFORM_PLL) |
753 BIT(CGA_PLL1) | BIT(CGA_PLL2),
754 },
755 {
756 .compat = "fsl,p5040-clockgen",
757 .guts_compat = "fsl,p5040-device-config",
758 .init_periph = p5040_init_periph,
759 .cmux_groups = {
760 &p5040_cmux_grp1, &p5040_cmux_grp2
761 },
762 .cmux_to_group = {
763 0, 0, 1, 1, -1
764 },
765 .pll_mask = BIT(PLATFORM_PLL) |
766 BIT(CGA_PLL1) | BIT(CGA_PLL2) | BIT(CGA_PLL3),
767 },
768 {
769 .compat = "fsl,t1023-clockgen",
770 .guts_compat = "fsl,t1023-device-config",
771 .init_periph = t1023_init_periph,
772 .cmux_groups = {
773 &t1023_cmux
774 },
775 .hwaccel = {
776 &t1023_hwa1, &t1023_hwa2
777 },
778 .cmux_to_group = {
779 0, 0, -1
780 },
781 .pll_mask = BIT(PLATFORM_PLL) | BIT(CGA_PLL1),
782 .flags = CG_PLL_8BIT,
783 },
784 {
785 .compat = "fsl,t1040-clockgen",
786 .guts_compat = "fsl,t1040-device-config",
787 .init_periph = t1040_init_periph,
788 .cmux_groups = {
789 &t1040_cmux
790 },
791 .cmux_to_group = {
792 0, 0, 0, 0, -1
793 },
794 .pll_mask = BIT(PLATFORM_PLL) |
795 BIT(CGA_PLL1) | BIT(CGA_PLL2),
796 .flags = CG_PLL_8BIT,
797 },
798 {
799 .compat = "fsl,t2080-clockgen",
800 .guts_compat = "fsl,t2080-device-config",
801 .init_periph = t2080_init_periph,
802 .cmux_groups = {
803 &clockgen2_cmux_cga12
804 },
805 .hwaccel = {
806 &t2080_hwa1, &t2080_hwa2
807 },
808 .cmux_to_group = {
809 0, -1
810 },
811 .pll_mask = BIT(PLATFORM_PLL) |
812 BIT(CGA_PLL1) | BIT(CGA_PLL2),
813 .flags = CG_PLL_8BIT,
814 },
815 {
816 .compat = "fsl,t4240-clockgen",
817 .guts_compat = "fsl,t4240-device-config",
818 .init_periph = t4240_init_periph,
819 .cmux_groups = {
820 &clockgen2_cmux_cga, &clockgen2_cmux_cgb
821 },
822 .hwaccel = {
823 &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5
824 },
825 .cmux_to_group = {
826 0, 0, 1, -1
827 },
828 .pll_mask = BIT(PLATFORM_PLL) |
829 BIT(CGA_PLL1) | BIT(CGA_PLL2) | BIT(CGA_PLL3) |
830 BIT(CGB_PLL1) | BIT(CGB_PLL2),
831 .flags = CG_PLL_8BIT,
832 },
833 {},
834 };
835
836 struct mux_hwclock {
837 struct clk_hw hw;
838 struct clockgen *cg;
839 const struct clockgen_muxinfo *info;
840 u32 __iomem *reg;
841 u8 parent_to_clksel[NUM_MUX_PARENTS];
842 s8 clksel_to_parent[NUM_MUX_PARENTS];
843 int num_parents;
844 };
845
846 #define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw)
847 #define CLKSEL_MASK 0x78000000
848 #define CLKSEL_SHIFT 27
849
mux_set_parent(struct clk_hw * hw,u8 idx)850 static int mux_set_parent(struct clk_hw *hw, u8 idx)
851 {
852 struct mux_hwclock *hwc = to_mux_hwclock(hw);
853 u32 clksel;
854
855 if (idx >= hwc->num_parents)
856 return -EINVAL;
857
858 clksel = hwc->parent_to_clksel[idx];
859 cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg);
860
861 return 0;
862 }
863
mux_get_parent(struct clk_hw * hw)864 static u8 mux_get_parent(struct clk_hw *hw)
865 {
866 struct mux_hwclock *hwc = to_mux_hwclock(hw);
867 u32 clksel;
868 s8 ret;
869
870 clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
871
872 ret = hwc->clksel_to_parent[clksel];
873 if (ret < 0) {
874 pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg);
875 return 0;
876 }
877
878 return ret;
879 }
880
881 static const struct clk_ops cmux_ops = {
882 .determine_rate = clk_hw_determine_rate_no_reparent,
883 .get_parent = mux_get_parent,
884 .set_parent = mux_set_parent,
885 };
886
887 /*
888 * Don't allow setting for now, as the clock options haven't been
889 * sanitized for additional restrictions.
890 */
891 static const struct clk_ops hwaccel_ops = {
892 .get_parent = mux_get_parent,
893 };
894
get_pll_div(struct clockgen * cg,struct mux_hwclock * hwc,int idx)895 static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg,
896 struct mux_hwclock *hwc,
897 int idx)
898 {
899 int pll, div;
900
901 if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID))
902 return NULL;
903
904 pll = hwc->info->clksel[idx].pll;
905 div = hwc->info->clksel[idx].div;
906
907 return &cg->pll[pll].div[div];
908 }
909
create_mux_common(struct clockgen * cg,struct mux_hwclock * hwc,const struct clk_ops * ops,unsigned long min_rate,unsigned long max_rate,unsigned long pct80_rate,const char * fmt,int idx)910 static struct clk * __init create_mux_common(struct clockgen *cg,
911 struct mux_hwclock *hwc,
912 const struct clk_ops *ops,
913 unsigned long min_rate,
914 unsigned long max_rate,
915 unsigned long pct80_rate,
916 const char *fmt, int idx)
917 {
918 struct clk_init_data init = {};
919 struct clk *clk;
920 const struct clockgen_pll_div *div;
921 const char *parent_names[NUM_MUX_PARENTS];
922 char name[32];
923 int i, j;
924
925 snprintf(name, sizeof(name), fmt, idx);
926
927 for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) {
928 unsigned long rate;
929
930 hwc->clksel_to_parent[i] = -1;
931
932 div = get_pll_div(cg, hwc, i);
933 if (!div)
934 continue;
935
936 rate = clk_get_rate(div->clk);
937
938 if (hwc->info->clksel[i].flags & CLKSEL_80PCT &&
939 rate > pct80_rate)
940 continue;
941 if (rate < min_rate)
942 continue;
943 if (rate > max_rate)
944 continue;
945
946 parent_names[j] = div->name;
947 hwc->parent_to_clksel[j] = i;
948 hwc->clksel_to_parent[i] = j;
949 j++;
950 }
951
952 init.name = name;
953 init.ops = ops;
954 init.parent_names = parent_names;
955 init.num_parents = hwc->num_parents = j;
956 init.flags = 0;
957 hwc->hw.init = &init;
958 hwc->cg = cg;
959
960 clk = clk_register(NULL, &hwc->hw);
961 if (IS_ERR(clk)) {
962 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
963 PTR_ERR(clk));
964 kfree(hwc);
965 return NULL;
966 }
967
968 return clk;
969 }
970
create_one_cmux(struct clockgen * cg,int idx)971 static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
972 {
973 struct mux_hwclock *hwc;
974 const struct clockgen_pll_div *div;
975 unsigned long plat_rate, min_rate;
976 u64 max_rate, pct80_rate;
977 u32 clksel;
978
979 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
980 if (!hwc)
981 return NULL;
982
983 if (cg->info.flags & CG_VER3)
984 hwc->reg = cg->regs + 0x70000 + 0x20 * idx;
985 else
986 hwc->reg = cg->regs + 0x20 * idx;
987
988 hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
989
990 /*
991 * Find the rate for the default clksel, and treat it as the
992 * maximum rated core frequency. If this is an incorrect
993 * assumption, certain clock options (possibly including the
994 * default clksel) may be inappropriately excluded on certain
995 * chips.
996 */
997 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
998 div = get_pll_div(cg, hwc, clksel);
999 if (!div) {
1000 kfree(hwc);
1001 return NULL;
1002 }
1003
1004 max_rate = clk_get_rate(div->clk);
1005 pct80_rate = max_rate * 8;
1006 do_div(pct80_rate, 10);
1007
1008 plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
1009
1010 if (cg->info.flags & CG_CMUX_GE_PLAT)
1011 min_rate = plat_rate;
1012 else
1013 min_rate = plat_rate / 2;
1014
1015 return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
1016 pct80_rate, "cg-cmux%d", idx);
1017 }
1018
create_one_hwaccel(struct clockgen * cg,int idx)1019 static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
1020 {
1021 struct mux_hwclock *hwc;
1022
1023 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
1024 if (!hwc)
1025 return NULL;
1026
1027 hwc->reg = cg->regs + 0x20 * idx + 0x10;
1028 hwc->info = cg->info.hwaccel[idx];
1029
1030 return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
1031 "cg-hwaccel%d", idx);
1032 }
1033
create_muxes(struct clockgen * cg)1034 static void __init create_muxes(struct clockgen *cg)
1035 {
1036 int i;
1037
1038 for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) {
1039 if (cg->info.cmux_to_group[i] < 0)
1040 break;
1041 if (cg->info.cmux_to_group[i] >=
1042 ARRAY_SIZE(cg->info.cmux_groups)) {
1043 WARN_ON_ONCE(1);
1044 continue;
1045 }
1046
1047 cg->cmux[i] = create_one_cmux(cg, i);
1048 }
1049
1050 for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) {
1051 if (!cg->info.hwaccel[i])
1052 continue;
1053
1054 cg->hwaccel[i] = create_one_hwaccel(cg, i);
1055 }
1056 }
1057
1058 static void __init _clockgen_init(struct device_node *np, bool legacy);
1059
1060 /*
1061 * Legacy nodes may get probed before the parent clockgen node.
1062 * It is assumed that device trees with legacy nodes will not
1063 * contain a "clocks" property -- otherwise the input clocks may
1064 * not be initialized at this point.
1065 */
legacy_init_clockgen(struct device_node * np)1066 static void __init legacy_init_clockgen(struct device_node *np)
1067 {
1068 if (!clockgen.node) {
1069 struct device_node *parent_np __free(device_node) = of_get_parent(np);
1070 _clockgen_init(parent_np, true);
1071 }
1072 }
1073
1074 /* Legacy node */
core_mux_init(struct device_node * np)1075 static void __init core_mux_init(struct device_node *np)
1076 {
1077 struct clk *clk;
1078 struct resource res;
1079 int idx, rc;
1080
1081 legacy_init_clockgen(np);
1082
1083 if (of_address_to_resource(np, 0, &res))
1084 return;
1085
1086 idx = (res.start & 0xf0) >> 5;
1087 clk = clockgen.cmux[idx];
1088
1089 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
1090 if (rc) {
1091 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
1092 __func__, np, rc);
1093 return;
1094 }
1095 }
1096
1097 static struct clk __init
sysclk_from_fixed(struct device_node * node,const char * name)1098 *sysclk_from_fixed(struct device_node *node, const char *name)
1099 {
1100 u32 rate;
1101
1102 if (of_property_read_u32(node, "clock-frequency", &rate))
1103 return ERR_PTR(-ENODEV);
1104
1105 return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
1106 }
1107
input_clock(const char * name,struct clk * clk)1108 static struct clk __init *input_clock(const char *name, struct clk *clk)
1109 {
1110 const char *input_name;
1111
1112 /* Register the input clock under the desired name. */
1113 input_name = __clk_get_name(clk);
1114 clk = clk_register_fixed_factor(NULL, name, input_name,
1115 0, 1, 1);
1116 if (IS_ERR(clk))
1117 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
1118 PTR_ERR(clk));
1119
1120 return clk;
1121 }
1122
input_clock_by_name(const char * name,const char * dtname)1123 static struct clk __init *input_clock_by_name(const char *name,
1124 const char *dtname)
1125 {
1126 struct clk *clk;
1127
1128 clk = of_clk_get_by_name(clockgen.node, dtname);
1129 if (IS_ERR(clk))
1130 return clk;
1131
1132 return input_clock(name, clk);
1133 }
1134
input_clock_by_index(const char * name,int idx)1135 static struct clk __init *input_clock_by_index(const char *name, int idx)
1136 {
1137 struct clk *clk;
1138
1139 clk = of_clk_get(clockgen.node, 0);
1140 if (IS_ERR(clk))
1141 return clk;
1142
1143 return input_clock(name, clk);
1144 }
1145
create_sysclk(const char * name)1146 static struct clk * __init create_sysclk(const char *name)
1147 {
1148 struct device_node *sysclk;
1149 struct clk *clk;
1150
1151 clk = sysclk_from_fixed(clockgen.node, name);
1152 if (!IS_ERR(clk))
1153 return clk;
1154
1155 clk = input_clock_by_name(name, "sysclk");
1156 if (!IS_ERR(clk))
1157 return clk;
1158
1159 clk = input_clock_by_index(name, 0);
1160 if (!IS_ERR(clk))
1161 return clk;
1162
1163 sysclk = of_get_child_by_name(clockgen.node, "sysclk");
1164 if (sysclk) {
1165 clk = sysclk_from_fixed(sysclk, name);
1166 of_node_put(sysclk);
1167 if (!IS_ERR(clk))
1168 return clk;
1169 }
1170
1171 pr_err("%s: No input sysclk\n", __func__);
1172 return NULL;
1173 }
1174
create_coreclk(const char * name)1175 static struct clk * __init create_coreclk(const char *name)
1176 {
1177 struct clk *clk;
1178
1179 clk = input_clock_by_name(name, "coreclk");
1180 if (!IS_ERR(clk))
1181 return clk;
1182
1183 /*
1184 * This indicates a mix of legacy nodes with the new coreclk
1185 * mechanism, which should never happen. If this error occurs,
1186 * don't use the wrong input clock just because coreclk isn't
1187 * ready yet.
1188 */
1189 if (WARN_ON(PTR_ERR(clk) == -EPROBE_DEFER))
1190 return clk;
1191
1192 return NULL;
1193 }
1194
1195 /* Legacy node */
sysclk_init(struct device_node * node)1196 static void __init sysclk_init(struct device_node *node)
1197 {
1198 struct clk *clk;
1199
1200 legacy_init_clockgen(node);
1201
1202 clk = clockgen.sysclk;
1203 if (clk)
1204 of_clk_add_provider(node, of_clk_src_simple_get, clk);
1205 }
1206
1207 #define PLL_KILL BIT(31)
1208
create_one_pll(struct clockgen * cg,int idx)1209 static void __init create_one_pll(struct clockgen *cg, int idx)
1210 {
1211 u32 __iomem *reg;
1212 u32 mult;
1213 struct clockgen_pll *pll = &cg->pll[idx];
1214 const char *input = "cg-sysclk";
1215 int i;
1216
1217 if (!(cg->info.pll_mask & (1 << idx)))
1218 return;
1219
1220 if (cg->coreclk && idx != PLATFORM_PLL) {
1221 if (IS_ERR(cg->coreclk))
1222 return;
1223
1224 input = "cg-coreclk";
1225 }
1226
1227 if (cg->info.flags & CG_VER3) {
1228 switch (idx) {
1229 case PLATFORM_PLL:
1230 reg = cg->regs + 0x60080;
1231 break;
1232 case CGA_PLL1:
1233 reg = cg->regs + 0x80;
1234 break;
1235 case CGA_PLL2:
1236 reg = cg->regs + 0xa0;
1237 break;
1238 case CGB_PLL1:
1239 reg = cg->regs + 0x10080;
1240 break;
1241 case CGB_PLL2:
1242 reg = cg->regs + 0x100a0;
1243 break;
1244 default:
1245 WARN_ONCE(1, "index %d\n", idx);
1246 return;
1247 }
1248 } else {
1249 if (idx == PLATFORM_PLL)
1250 reg = cg->regs + 0xc00;
1251 else
1252 reg = cg->regs + 0x800 + 0x20 * (idx - 1);
1253 }
1254
1255 /* Get the multiple of PLL */
1256 mult = cg_in(cg, reg);
1257
1258 /* Check if this PLL is disabled */
1259 if (mult & PLL_KILL) {
1260 pr_debug("%s(): pll %p disabled\n", __func__, reg);
1261 return;
1262 }
1263
1264 if ((cg->info.flags & CG_VER3) ||
1265 ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL))
1266 mult = (mult & GENMASK(8, 1)) >> 1;
1267 else
1268 mult = (mult & GENMASK(6, 1)) >> 1;
1269
1270 for (i = 0; i < ARRAY_SIZE(pll->div); i++) {
1271 struct clk *clk;
1272 int ret;
1273
1274 /*
1275 * For platform PLL, there are MAX_PLL_DIV divider clocks.
1276 * For core PLL, there are 4 divider clocks at most.
1277 */
1278 if (idx != PLATFORM_PLL && i >= 4)
1279 break;
1280
1281 snprintf(pll->div[i].name, sizeof(pll->div[i].name),
1282 "cg-pll%d-div%d", idx, i + 1);
1283
1284 clk = clk_register_fixed_factor(NULL,
1285 pll->div[i].name, input, 0, mult, i + 1);
1286 if (IS_ERR(clk)) {
1287 pr_err("%s: %s: register failed %ld\n",
1288 __func__, pll->div[i].name, PTR_ERR(clk));
1289 continue;
1290 }
1291
1292 pll->div[i].clk = clk;
1293 ret = clk_register_clkdev(clk, pll->div[i].name, NULL);
1294 if (ret != 0)
1295 pr_err("%s: %s: register to lookup table failed %d\n",
1296 __func__, pll->div[i].name, ret);
1297
1298 }
1299 }
1300
create_plls(struct clockgen * cg)1301 static void __init create_plls(struct clockgen *cg)
1302 {
1303 int i;
1304
1305 for (i = 0; i < ARRAY_SIZE(cg->pll); i++)
1306 create_one_pll(cg, i);
1307 }
1308
legacy_pll_init(struct device_node * np,int idx)1309 static void __init legacy_pll_init(struct device_node *np, int idx)
1310 {
1311 struct clockgen_pll *pll;
1312 struct clk_onecell_data *onecell_data;
1313 struct clk **subclks;
1314 int count, rc;
1315
1316 legacy_init_clockgen(np);
1317
1318 pll = &clockgen.pll[idx];
1319 count = of_property_count_strings(np, "clock-output-names");
1320
1321 BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4);
1322 subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL);
1323 if (!subclks)
1324 return;
1325
1326 onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
1327 if (!onecell_data)
1328 goto err_clks;
1329
1330 if (count <= 3) {
1331 subclks[0] = pll->div[0].clk;
1332 subclks[1] = pll->div[1].clk;
1333 subclks[2] = pll->div[3].clk;
1334 } else {
1335 subclks[0] = pll->div[0].clk;
1336 subclks[1] = pll->div[1].clk;
1337 subclks[2] = pll->div[2].clk;
1338 subclks[3] = pll->div[3].clk;
1339 }
1340
1341 onecell_data->clks = subclks;
1342 onecell_data->clk_num = count;
1343
1344 rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
1345 if (rc) {
1346 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
1347 __func__, np, rc);
1348 goto err_cell;
1349 }
1350
1351 return;
1352 err_cell:
1353 kfree(onecell_data);
1354 err_clks:
1355 kfree(subclks);
1356 }
1357
1358 /* Legacy node */
pltfrm_pll_init(struct device_node * np)1359 static void __init pltfrm_pll_init(struct device_node *np)
1360 {
1361 legacy_pll_init(np, PLATFORM_PLL);
1362 }
1363
1364 /* Legacy node */
core_pll_init(struct device_node * np)1365 static void __init core_pll_init(struct device_node *np)
1366 {
1367 struct resource res;
1368 int idx;
1369
1370 if (of_address_to_resource(np, 0, &res))
1371 return;
1372
1373 if ((res.start & 0xfff) == 0xc00) {
1374 /*
1375 * ls1021a devtree labels the platform PLL
1376 * with the core PLL compatible
1377 */
1378 pltfrm_pll_init(np);
1379 } else {
1380 idx = (res.start & 0xf0) >> 5;
1381 legacy_pll_init(np, CGA_PLL1 + idx);
1382 }
1383 }
1384
clockgen_clk_get(struct of_phandle_args * clkspec,void * data)1385 static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
1386 {
1387 struct clockgen *cg = data;
1388 struct clk *clk;
1389 struct clockgen_pll *pll;
1390 u32 type, idx;
1391
1392 if (clkspec->args_count < 2) {
1393 pr_err("%s: insufficient phandle args\n", __func__);
1394 return ERR_PTR(-EINVAL);
1395 }
1396
1397 type = clkspec->args[0];
1398 idx = clkspec->args[1];
1399
1400 switch (type) {
1401 case QORIQ_CLK_SYSCLK:
1402 if (idx != 0)
1403 goto bad_args;
1404 clk = cg->sysclk;
1405 break;
1406 case QORIQ_CLK_CMUX:
1407 if (idx >= ARRAY_SIZE(cg->cmux))
1408 goto bad_args;
1409 clk = cg->cmux[idx];
1410 break;
1411 case QORIQ_CLK_HWACCEL:
1412 if (idx >= ARRAY_SIZE(cg->hwaccel))
1413 goto bad_args;
1414 clk = cg->hwaccel[idx];
1415 break;
1416 case QORIQ_CLK_FMAN:
1417 if (idx >= ARRAY_SIZE(cg->fman))
1418 goto bad_args;
1419 clk = cg->fman[idx];
1420 break;
1421 case QORIQ_CLK_PLATFORM_PLL:
1422 pll = &cg->pll[PLATFORM_PLL];
1423 if (idx >= ARRAY_SIZE(pll->div))
1424 goto bad_args;
1425 clk = pll->div[idx].clk;
1426 break;
1427 case QORIQ_CLK_CORECLK:
1428 if (idx != 0)
1429 goto bad_args;
1430 clk = cg->coreclk;
1431 if (IS_ERR(clk))
1432 clk = NULL;
1433 break;
1434 default:
1435 goto bad_args;
1436 }
1437
1438 if (!clk)
1439 return ERR_PTR(-ENOENT);
1440 return clk;
1441
1442 bad_args:
1443 pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx);
1444 return ERR_PTR(-EINVAL);
1445 }
1446
1447 #ifdef CONFIG_PPC
1448 #include <asm/mpc85xx.h>
1449
1450 static const u32 a4510_svrs[] __initconst = {
1451 (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */
1452 (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */
1453 (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */
1454 (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */
1455 (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */
1456 (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */
1457 (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */
1458 (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */
1459 (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */
1460 (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */
1461 (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */
1462 (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */
1463 (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */
1464 };
1465
1466 #define SVR_SECURITY 0x80000 /* The Security (E) bit */
1467
has_erratum_a4510(void)1468 static bool __init has_erratum_a4510(void)
1469 {
1470 u32 svr = mfspr(SPRN_SVR);
1471 int i;
1472
1473 svr &= ~SVR_SECURITY;
1474
1475 for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) {
1476 if (svr == a4510_svrs[i])
1477 return true;
1478 }
1479
1480 return false;
1481 }
1482 #else
has_erratum_a4510(void)1483 static bool __init has_erratum_a4510(void)
1484 {
1485 return false;
1486 }
1487 #endif
1488
_clockgen_init(struct device_node * np,bool legacy)1489 static void __init _clockgen_init(struct device_node *np, bool legacy)
1490 {
1491 int i, ret;
1492 bool is_old_ls1021a = false;
1493
1494 /* May have already been called by a legacy probe */
1495 if (clockgen.node)
1496 return;
1497
1498 clockgen.node = np;
1499 clockgen.regs = of_iomap(np, 0);
1500 if (!clockgen.regs &&
1501 of_device_is_compatible(of_root, "fsl,ls1021a")) {
1502 /* Compatibility hack for old, broken device trees */
1503 clockgen.regs = ioremap(0x1ee1000, 0x1000);
1504 is_old_ls1021a = true;
1505 }
1506 if (!clockgen.regs) {
1507 pr_err("%s(): %pOFn: of_iomap() failed\n", __func__, np);
1508 return;
1509 }
1510
1511 for (i = 0; i < ARRAY_SIZE(chipinfo); i++) {
1512 if (of_device_is_compatible(np, chipinfo[i].compat))
1513 break;
1514 if (is_old_ls1021a &&
1515 !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen"))
1516 break;
1517 }
1518
1519 if (i == ARRAY_SIZE(chipinfo)) {
1520 pr_err("%s: unknown clockgen node %pOF\n", __func__, np);
1521 goto err;
1522 }
1523 clockgen.info = chipinfo[i];
1524
1525 if (clockgen.info.guts_compat) {
1526 struct device_node *guts;
1527
1528 guts = of_find_compatible_node(NULL, NULL,
1529 clockgen.info.guts_compat);
1530 if (guts) {
1531 clockgen.guts = of_iomap(guts, 0);
1532 if (!clockgen.guts) {
1533 pr_err("%s: Couldn't map %pOF regs\n", __func__,
1534 guts);
1535 }
1536 of_node_put(guts);
1537 }
1538
1539 }
1540
1541 if (has_erratum_a4510())
1542 clockgen.info.flags |= CG_CMUX_GE_PLAT;
1543
1544 clockgen.sysclk = create_sysclk("cg-sysclk");
1545 clockgen.coreclk = create_coreclk("cg-coreclk");
1546 create_plls(&clockgen);
1547 create_muxes(&clockgen);
1548
1549 if (clockgen.info.init_periph)
1550 clockgen.info.init_periph(&clockgen);
1551
1552 ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen);
1553 if (ret) {
1554 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
1555 __func__, np, ret);
1556 }
1557
1558 /* Don't create cpufreq device for legacy clockgen blocks */
1559 add_cpufreq_dev = !legacy;
1560
1561 return;
1562 err:
1563 iounmap(clockgen.regs);
1564 clockgen.regs = NULL;
1565 }
1566
clockgen_init(struct device_node * np)1567 static void __init clockgen_init(struct device_node *np)
1568 {
1569 _clockgen_init(np, false);
1570 }
1571
clockgen_cpufreq_init(void)1572 static int __init clockgen_cpufreq_init(void)
1573 {
1574 struct platform_device *pdev;
1575
1576 if (add_cpufreq_dev) {
1577 pdev = platform_device_register_simple("qoriq-cpufreq", -1,
1578 NULL, 0);
1579 if (IS_ERR(pdev))
1580 pr_err("Couldn't register qoriq-cpufreq err=%ld\n",
1581 PTR_ERR(pdev));
1582 }
1583 return 0;
1584 }
1585 device_initcall(clockgen_cpufreq_init);
1586
1587 CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
1588 CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
1589 CLK_OF_DECLARE(qoriq_clockgen_b4420, "fsl,b4420-clockgen", clockgen_init);
1590 CLK_OF_DECLARE(qoriq_clockgen_b4860, "fsl,b4860-clockgen", clockgen_init);
1591 CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init);
1592 CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
1593 CLK_OF_DECLARE(qoriq_clockgen_ls1028a, "fsl,ls1028a-clockgen", clockgen_init);
1594 CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
1595 CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
1596 CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init);
1597 CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
1598 CLK_OF_DECLARE(qoriq_clockgen_lx2160a, "fsl,lx2160a-clockgen", clockgen_init);
1599 CLK_OF_DECLARE(qoriq_clockgen_p2041, "fsl,p2041-clockgen", clockgen_init);
1600 CLK_OF_DECLARE(qoriq_clockgen_p3041, "fsl,p3041-clockgen", clockgen_init);
1601 CLK_OF_DECLARE(qoriq_clockgen_p4080, "fsl,p4080-clockgen", clockgen_init);
1602 CLK_OF_DECLARE(qoriq_clockgen_p5020, "fsl,p5020-clockgen", clockgen_init);
1603 CLK_OF_DECLARE(qoriq_clockgen_p5040, "fsl,p5040-clockgen", clockgen_init);
1604 CLK_OF_DECLARE(qoriq_clockgen_t1023, "fsl,t1023-clockgen", clockgen_init);
1605 CLK_OF_DECLARE(qoriq_clockgen_t1040, "fsl,t1040-clockgen", clockgen_init);
1606 CLK_OF_DECLARE(qoriq_clockgen_t2080, "fsl,t2080-clockgen", clockgen_init);
1607 CLK_OF_DECLARE(qoriq_clockgen_t4240, "fsl,t4240-clockgen", clockgen_init);
1608
1609 /* Legacy nodes */
1610 CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
1611 CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
1612 CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
1613 CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
1614 CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
1615 CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
1616 CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
1617 CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);
1618