1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Alchemy clocks.
4 *
5 * Exposes all configurable internal clock sources to the clk framework.
6 *
7 * We have:
8 * - Root source, usually 12MHz supplied by an external crystal
9 * - 3 PLLs which generate multiples of root rate [AUX, CPU, AUX2]
10 *
11 * Dividers:
12 * - 6 clock dividers with:
13 * * selectable source [one of the PLLs],
14 * * output divided between [2 .. 512 in steps of 2] (!Au1300)
15 * or [1 .. 256 in steps of 1] (Au1300),
16 * * can be enabled individually.
17 *
18 * - up to 6 "internal" (fixed) consumers which:
19 * * take either AUXPLL or one of the above 6 dividers as input,
20 * * divide this input by 1, 2, or 4 (and 3 on Au1300).
21 * * can be disabled separately.
22 *
23 * Misc clocks:
24 * - sysbus clock: CPU core clock (CPUPLL) divided by 2, 3 or 4.
25 * depends on board design and should be set by bootloader, read-only.
26 * - peripheral clock: half the rate of sysbus clock, source for a lot
27 * of peripheral blocks, read-only.
28 * - memory clock: clk rate to main memory chips, depends on board
29 * design and is read-only,
30 * - lrclk: the static bus clock signal for synchronous operation.
31 * depends on board design, must be set by bootloader,
32 * but may be required to correctly configure devices attached to
33 * the static bus. The Au1000/1500/1100 manuals call it LCLK, on
34 * later models it's called RCLK.
35 */
36
37 #include <linux/init.h>
38 #include <linux/io.h>
39 #include <linux/clk.h>
40 #include <linux/clk-provider.h>
41 #include <linux/clkdev.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/types.h>
45 #include <asm/mach-au1x00/au1000.h>
46
47 /* Base clock: 12MHz is the default in all databooks, and I haven't
48 * found any board yet which uses a different rate.
49 */
50 #define ALCHEMY_ROOTCLK_RATE 12000000
51
52 /*
53 * the internal sources which can be driven by the PLLs and dividers.
54 * Names taken from the databooks, refer to them for more information,
55 * especially which ones are share a clock line.
56 */
57 static const char * const alchemy_au1300_intclknames[] = {
58 "lcd_intclk", "gpemgp_clk", "maempe_clk", "maebsa_clk",
59 "EXTCLK0", "EXTCLK1"
60 };
61
62 static const char * const alchemy_au1200_intclknames[] = {
63 "lcd_intclk", NULL, NULL, NULL, "EXTCLK0", "EXTCLK1"
64 };
65
66 static const char * const alchemy_au1550_intclknames[] = {
67 "usb_clk", "psc0_intclk", "psc1_intclk", "pci_clko",
68 "EXTCLK0", "EXTCLK1"
69 };
70
71 static const char * const alchemy_au1100_intclknames[] = {
72 "usb_clk", "lcd_intclk", NULL, "i2s_clk", "EXTCLK0", "EXTCLK1"
73 };
74
75 static const char * const alchemy_au1500_intclknames[] = {
76 NULL, "usbd_clk", "usbh_clk", "pci_clko", "EXTCLK0", "EXTCLK1"
77 };
78
79 static const char * const alchemy_au1000_intclknames[] = {
80 "irda_clk", "usbd_clk", "usbh_clk", "i2s_clk", "EXTCLK0",
81 "EXTCLK1"
82 };
83
84 /* aliases for a few on-chip sources which are either shared
85 * or have gone through name changes.
86 */
87 static struct clk_aliastable {
88 char *alias;
89 char *base;
90 int cputype;
91 } alchemy_clk_aliases[] __initdata = {
92 { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
93 { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
94 { "irda_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
95 { "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1550 },
96 { "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1550 },
97 { "psc2_intclk", "usb_clk", ALCHEMY_CPU_AU1550 },
98 { "psc3_intclk", "EXTCLK0", ALCHEMY_CPU_AU1550 },
99 { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1200 },
100 { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1200 },
101 { "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 },
102 { "psc2_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 },
103 { "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 },
104 { "psc3_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 },
105
106 { NULL, NULL, 0 },
107 };
108
109 #define IOMEM(x) ((void __iomem *)(KSEG1ADDR(CPHYSADDR(x))))
110
111 /* access locks to SYS_FREQCTRL0/1 and SYS_CLKSRC registers */
112 static spinlock_t alchemy_clk_fg0_lock;
113 static spinlock_t alchemy_clk_fg1_lock;
114 static DEFINE_SPINLOCK(alchemy_clk_csrc_lock);
115
116 /* CPU Core clock *****************************************************/
117
alchemy_clk_cpu_recalc(struct clk_hw * hw,unsigned long parent_rate)118 static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw,
119 unsigned long parent_rate)
120 {
121 unsigned long t;
122
123 /*
124 * On early Au1000, sys_cpupll was write-only. Since these
125 * silicon versions of Au1000 are not sold, we don't bend
126 * over backwards trying to determine the frequency.
127 */
128 if (unlikely(au1xxx_cpu_has_pll_wo()))
129 t = 396000000;
130 else {
131 t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f;
132 if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300)
133 t &= 0x3f;
134 t *= parent_rate;
135 }
136
137 return t;
138 }
139
alchemy_set_lpj(void)140 void __init alchemy_set_lpj(void)
141 {
142 preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE);
143 preset_lpj /= 2 * HZ;
144 }
145
146 static const struct clk_ops alchemy_clkops_cpu = {
147 .recalc_rate = alchemy_clk_cpu_recalc,
148 };
149
alchemy_clk_setup_cpu(const char * parent_name,int ctype)150 static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name,
151 int ctype)
152 {
153 struct clk_init_data id;
154 struct clk_hw *h;
155 struct clk *clk;
156
157 h = kzalloc(sizeof(*h), GFP_KERNEL);
158 if (!h)
159 return ERR_PTR(-ENOMEM);
160
161 id.name = ALCHEMY_CPU_CLK;
162 id.parent_names = &parent_name;
163 id.num_parents = 1;
164 id.flags = 0;
165 id.ops = &alchemy_clkops_cpu;
166 h->init = &id;
167
168 clk = clk_register(NULL, h);
169 if (IS_ERR(clk)) {
170 pr_err("failed to register clock\n");
171 kfree(h);
172 }
173
174 return clk;
175 }
176
177 /* AUXPLLs ************************************************************/
178
179 struct alchemy_auxpll_clk {
180 struct clk_hw hw;
181 unsigned long reg; /* au1300 has also AUXPLL2 */
182 int maxmult; /* max multiplier */
183 };
184 #define to_auxpll_clk(x) container_of(x, struct alchemy_auxpll_clk, hw)
185
alchemy_clk_aux_recalc(struct clk_hw * hw,unsigned long parent_rate)186 static unsigned long alchemy_clk_aux_recalc(struct clk_hw *hw,
187 unsigned long parent_rate)
188 {
189 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
190
191 return (alchemy_rdsys(a->reg) & 0xff) * parent_rate;
192 }
193
alchemy_clk_aux_setr(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)194 static int alchemy_clk_aux_setr(struct clk_hw *hw,
195 unsigned long rate,
196 unsigned long parent_rate)
197 {
198 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
199 unsigned long d = rate;
200
201 if (rate)
202 d /= parent_rate;
203 else
204 d = 0;
205
206 /* minimum is 84MHz, max is 756-1032 depending on variant */
207 if (((d < 7) && (d != 0)) || (d > a->maxmult))
208 return -EINVAL;
209
210 alchemy_wrsys(d, a->reg);
211 return 0;
212 }
213
alchemy_clk_aux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)214 static int alchemy_clk_aux_determine_rate(struct clk_hw *hw,
215 struct clk_rate_request *req)
216 {
217 struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
218 unsigned long mult;
219
220 if (!req->rate || !req->best_parent_rate) {
221 req->rate = 0;
222
223 return 0;
224 }
225
226 mult = req->rate / req->best_parent_rate;
227
228 if (mult && (mult < 7))
229 mult = 7;
230 if (mult > a->maxmult)
231 mult = a->maxmult;
232
233 req->rate = req->best_parent_rate * mult;
234
235 return 0;
236 }
237
238 static const struct clk_ops alchemy_clkops_aux = {
239 .recalc_rate = alchemy_clk_aux_recalc,
240 .set_rate = alchemy_clk_aux_setr,
241 .determine_rate = alchemy_clk_aux_determine_rate,
242 };
243
alchemy_clk_setup_aux(const char * parent_name,char * name,int maxmult,unsigned long reg)244 static struct clk __init *alchemy_clk_setup_aux(const char *parent_name,
245 char *name, int maxmult,
246 unsigned long reg)
247 {
248 struct clk_init_data id;
249 struct clk *c;
250 struct alchemy_auxpll_clk *a;
251
252 a = kzalloc(sizeof(*a), GFP_KERNEL);
253 if (!a)
254 return ERR_PTR(-ENOMEM);
255
256 id.name = name;
257 id.parent_names = &parent_name;
258 id.num_parents = 1;
259 id.flags = CLK_GET_RATE_NOCACHE;
260 id.ops = &alchemy_clkops_aux;
261
262 a->reg = reg;
263 a->maxmult = maxmult;
264 a->hw.init = &id;
265
266 c = clk_register(NULL, &a->hw);
267 if (!IS_ERR(c))
268 clk_register_clkdev(c, name, NULL);
269 else
270 kfree(a);
271
272 return c;
273 }
274
275 /* sysbus_clk *********************************************************/
276
alchemy_clk_setup_sysbus(const char * pn)277 static struct clk __init *alchemy_clk_setup_sysbus(const char *pn)
278 {
279 unsigned long v = (alchemy_rdsys(AU1000_SYS_POWERCTRL) & 3) + 2;
280 struct clk *c;
281
282 c = clk_register_fixed_factor(NULL, ALCHEMY_SYSBUS_CLK,
283 pn, 0, 1, v);
284 if (!IS_ERR(c))
285 clk_register_clkdev(c, ALCHEMY_SYSBUS_CLK, NULL);
286 return c;
287 }
288
289 /* Peripheral Clock ***************************************************/
290
alchemy_clk_setup_periph(const char * pn)291 static struct clk __init *alchemy_clk_setup_periph(const char *pn)
292 {
293 /* Peripheral clock runs at half the rate of sysbus clk */
294 struct clk *c;
295
296 c = clk_register_fixed_factor(NULL, ALCHEMY_PERIPH_CLK,
297 pn, 0, 1, 2);
298 if (!IS_ERR(c))
299 clk_register_clkdev(c, ALCHEMY_PERIPH_CLK, NULL);
300 return c;
301 }
302
303 /* mem clock **********************************************************/
304
alchemy_clk_setup_mem(const char * pn,int ct)305 static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct)
306 {
307 void __iomem *addr = IOMEM(AU1000_MEM_PHYS_ADDR);
308 unsigned long v;
309 struct clk *c;
310 int div;
311
312 switch (ct) {
313 case ALCHEMY_CPU_AU1550:
314 case ALCHEMY_CPU_AU1200:
315 v = __raw_readl(addr + AU1550_MEM_SDCONFIGB);
316 div = (v & (1 << 15)) ? 1 : 2;
317 break;
318 case ALCHEMY_CPU_AU1300:
319 v = __raw_readl(addr + AU1550_MEM_SDCONFIGB);
320 div = (v & (1 << 31)) ? 1 : 2;
321 break;
322 case ALCHEMY_CPU_AU1000:
323 case ALCHEMY_CPU_AU1500:
324 case ALCHEMY_CPU_AU1100:
325 default:
326 div = 2;
327 break;
328 }
329
330 c = clk_register_fixed_factor(NULL, ALCHEMY_MEM_CLK, pn,
331 0, 1, div);
332 if (!IS_ERR(c))
333 clk_register_clkdev(c, ALCHEMY_MEM_CLK, NULL);
334 return c;
335 }
336
337 /* lrclk: external synchronous static bus clock ***********************/
338
alchemy_clk_setup_lrclk(const char * pn,int t)339 static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t)
340 {
341 /* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5,
342 * otherwise lrclk=pclk/4.
343 * All other variants: MEM_STCFG0[15:13] = divisor.
344 * L/RCLK = periph_clk / (divisor + 1)
345 * On Au1000, Au1500, Au1100 it's called LCLK,
346 * on later models it's called RCLK, but it's the same thing.
347 */
348 struct clk *c;
349 unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0);
350
351 switch (t) {
352 case ALCHEMY_CPU_AU1000:
353 case ALCHEMY_CPU_AU1500:
354 v = 4 + ((v >> 11) & 1);
355 break;
356 default: /* all other models */
357 v = ((v >> 13) & 7) + 1;
358 }
359 c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK,
360 pn, 0, 1, v);
361 if (!IS_ERR(c))
362 clk_register_clkdev(c, ALCHEMY_LR_CLK, NULL);
363 return c;
364 }
365
366 /* Clock dividers and muxes *******************************************/
367
368 /* data for fgen and csrc mux-dividers */
369 struct alchemy_fgcs_clk {
370 struct clk_hw hw;
371 spinlock_t *reglock; /* register lock */
372 unsigned long reg; /* SYS_FREQCTRL0/1 */
373 int shift; /* offset in register */
374 int parent; /* parent before disable [Au1300] */
375 int isen; /* is it enabled? */
376 int *dt; /* dividertable for csrc */
377 };
378 #define to_fgcs_clk(x) container_of(x, struct alchemy_fgcs_clk, hw)
379
alchemy_calc_div(unsigned long rate,unsigned long prate,int scale,int maxdiv,unsigned long * rv)380 static long alchemy_calc_div(unsigned long rate, unsigned long prate,
381 int scale, int maxdiv, unsigned long *rv)
382 {
383 long div1, div2;
384
385 div1 = prate / rate;
386 if ((prate / div1) > rate)
387 div1++;
388
389 if (scale == 2) { /* only div-by-multiple-of-2 possible */
390 if (div1 & 1)
391 div1++; /* stay <=prate */
392 }
393
394 div2 = (div1 / scale) - 1; /* value to write to register */
395
396 if (div2 > maxdiv)
397 div2 = maxdiv;
398 if (rv)
399 *rv = div2;
400
401 div1 = ((div2 + 1) * scale);
402 return div1;
403 }
404
alchemy_clk_fgcs_detr(struct clk_hw * hw,struct clk_rate_request * req,int scale,int maxdiv)405 static int alchemy_clk_fgcs_detr(struct clk_hw *hw,
406 struct clk_rate_request *req,
407 int scale, int maxdiv)
408 {
409 struct clk_hw *pc, *bpc, *free;
410 long tdv, tpr, pr, nr, br, bpr, diff, lastdiff;
411 int j;
412
413 lastdiff = INT_MAX;
414 bpr = 0;
415 bpc = NULL;
416 br = -EINVAL;
417 free = NULL;
418
419 /* look at the rates each enabled parent supplies and select
420 * the one that gets closest to but not over the requested rate.
421 */
422 for (j = 0; j < 7; j++) {
423 pc = clk_hw_get_parent_by_index(hw, j);
424 if (!pc)
425 break;
426
427 /* if this parent is currently unused, remember it.
428 * XXX: we would actually want clk_has_active_children()
429 * but this is a good-enough approximation for now.
430 */
431 if (!clk_hw_is_prepared(pc)) {
432 if (!free)
433 free = pc;
434 }
435
436 pr = clk_hw_get_rate(pc);
437 if (pr < req->rate)
438 continue;
439
440 /* what can hardware actually provide */
441 tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL);
442 nr = pr / tdv;
443 diff = req->rate - nr;
444 if (nr > req->rate)
445 continue;
446
447 if (diff < lastdiff) {
448 lastdiff = diff;
449 bpr = pr;
450 bpc = pc;
451 br = nr;
452 }
453 if (diff == 0)
454 break;
455 }
456
457 /* if we couldn't get the exact rate we wanted from the enabled
458 * parents, maybe we can tell an available disabled/inactive one
459 * to give us a rate we can divide down to the requested rate.
460 */
461 if (lastdiff && free) {
462 for (j = (maxdiv == 4) ? 1 : scale; j <= maxdiv; j += scale) {
463 tpr = req->rate * j;
464 if (tpr < 0)
465 break;
466 pr = clk_hw_round_rate(free, tpr);
467
468 tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv,
469 NULL);
470 nr = pr / tdv;
471 diff = req->rate - nr;
472 if (nr > req->rate)
473 continue;
474 if (diff < lastdiff) {
475 lastdiff = diff;
476 bpr = pr;
477 bpc = free;
478 br = nr;
479 }
480 if (diff == 0)
481 break;
482 }
483 }
484
485 if (br < 0)
486 return br;
487
488 req->best_parent_rate = bpr;
489 req->best_parent_hw = bpc;
490 req->rate = br;
491
492 return 0;
493 }
494
alchemy_clk_fgv1_en(struct clk_hw * hw)495 static int alchemy_clk_fgv1_en(struct clk_hw *hw)
496 {
497 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
498 unsigned long v, flags;
499
500 spin_lock_irqsave(c->reglock, flags);
501 v = alchemy_rdsys(c->reg);
502 v |= (1 << 1) << c->shift;
503 alchemy_wrsys(v, c->reg);
504 spin_unlock_irqrestore(c->reglock, flags);
505
506 return 0;
507 }
508
alchemy_clk_fgv1_isen(struct clk_hw * hw)509 static int alchemy_clk_fgv1_isen(struct clk_hw *hw)
510 {
511 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
512 unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 1);
513
514 return v & 1;
515 }
516
alchemy_clk_fgv1_dis(struct clk_hw * hw)517 static void alchemy_clk_fgv1_dis(struct clk_hw *hw)
518 {
519 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
520 unsigned long v, flags;
521
522 spin_lock_irqsave(c->reglock, flags);
523 v = alchemy_rdsys(c->reg);
524 v &= ~((1 << 1) << c->shift);
525 alchemy_wrsys(v, c->reg);
526 spin_unlock_irqrestore(c->reglock, flags);
527 }
528
alchemy_clk_fgv1_setp(struct clk_hw * hw,u8 index)529 static int alchemy_clk_fgv1_setp(struct clk_hw *hw, u8 index)
530 {
531 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
532 unsigned long v, flags;
533
534 spin_lock_irqsave(c->reglock, flags);
535 v = alchemy_rdsys(c->reg);
536 if (index)
537 v |= (1 << c->shift);
538 else
539 v &= ~(1 << c->shift);
540 alchemy_wrsys(v, c->reg);
541 spin_unlock_irqrestore(c->reglock, flags);
542
543 return 0;
544 }
545
alchemy_clk_fgv1_getp(struct clk_hw * hw)546 static u8 alchemy_clk_fgv1_getp(struct clk_hw *hw)
547 {
548 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
549
550 return (alchemy_rdsys(c->reg) >> c->shift) & 1;
551 }
552
alchemy_clk_fgv1_setr(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)553 static int alchemy_clk_fgv1_setr(struct clk_hw *hw, unsigned long rate,
554 unsigned long parent_rate)
555 {
556 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
557 unsigned long div, v, flags, ret;
558 int sh = c->shift + 2;
559
560 if (!rate || !parent_rate || rate > (parent_rate / 2))
561 return -EINVAL;
562 ret = alchemy_calc_div(rate, parent_rate, 2, 512, &div);
563 spin_lock_irqsave(c->reglock, flags);
564 v = alchemy_rdsys(c->reg);
565 v &= ~(0xff << sh);
566 v |= div << sh;
567 alchemy_wrsys(v, c->reg);
568 spin_unlock_irqrestore(c->reglock, flags);
569
570 return 0;
571 }
572
alchemy_clk_fgv1_recalc(struct clk_hw * hw,unsigned long parent_rate)573 static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw,
574 unsigned long parent_rate)
575 {
576 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
577 unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 2);
578
579 v = ((v & 0xff) + 1) * 2;
580 return parent_rate / v;
581 }
582
alchemy_clk_fgv1_detr(struct clk_hw * hw,struct clk_rate_request * req)583 static int alchemy_clk_fgv1_detr(struct clk_hw *hw,
584 struct clk_rate_request *req)
585 {
586 return alchemy_clk_fgcs_detr(hw, req, 2, 512);
587 }
588
589 /* Au1000, Au1100, Au15x0, Au12x0 */
590 static const struct clk_ops alchemy_clkops_fgenv1 = {
591 .recalc_rate = alchemy_clk_fgv1_recalc,
592 .determine_rate = alchemy_clk_fgv1_detr,
593 .set_rate = alchemy_clk_fgv1_setr,
594 .set_parent = alchemy_clk_fgv1_setp,
595 .get_parent = alchemy_clk_fgv1_getp,
596 .enable = alchemy_clk_fgv1_en,
597 .disable = alchemy_clk_fgv1_dis,
598 .is_enabled = alchemy_clk_fgv1_isen,
599 };
600
__alchemy_clk_fgv2_en(struct alchemy_fgcs_clk * c)601 static void __alchemy_clk_fgv2_en(struct alchemy_fgcs_clk *c)
602 {
603 unsigned long v = alchemy_rdsys(c->reg);
604
605 v &= ~(3 << c->shift);
606 v |= (c->parent & 3) << c->shift;
607 alchemy_wrsys(v, c->reg);
608 c->isen = 1;
609 }
610
alchemy_clk_fgv2_en(struct clk_hw * hw)611 static int alchemy_clk_fgv2_en(struct clk_hw *hw)
612 {
613 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
614 unsigned long flags;
615
616 /* enable by setting the previous parent clock */
617 spin_lock_irqsave(c->reglock, flags);
618 __alchemy_clk_fgv2_en(c);
619 spin_unlock_irqrestore(c->reglock, flags);
620
621 return 0;
622 }
623
alchemy_clk_fgv2_isen(struct clk_hw * hw)624 static int alchemy_clk_fgv2_isen(struct clk_hw *hw)
625 {
626 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
627
628 return ((alchemy_rdsys(c->reg) >> c->shift) & 3) != 0;
629 }
630
alchemy_clk_fgv2_dis(struct clk_hw * hw)631 static void alchemy_clk_fgv2_dis(struct clk_hw *hw)
632 {
633 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
634 unsigned long v, flags;
635
636 spin_lock_irqsave(c->reglock, flags);
637 v = alchemy_rdsys(c->reg);
638 v &= ~(3 << c->shift); /* set input mux to "disabled" state */
639 alchemy_wrsys(v, c->reg);
640 c->isen = 0;
641 spin_unlock_irqrestore(c->reglock, flags);
642 }
643
alchemy_clk_fgv2_setp(struct clk_hw * hw,u8 index)644 static int alchemy_clk_fgv2_setp(struct clk_hw *hw, u8 index)
645 {
646 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
647 unsigned long flags;
648
649 spin_lock_irqsave(c->reglock, flags);
650 c->parent = index + 1; /* value to write to register */
651 if (c->isen)
652 __alchemy_clk_fgv2_en(c);
653 spin_unlock_irqrestore(c->reglock, flags);
654
655 return 0;
656 }
657
alchemy_clk_fgv2_getp(struct clk_hw * hw)658 static u8 alchemy_clk_fgv2_getp(struct clk_hw *hw)
659 {
660 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
661 unsigned long flags, v;
662
663 spin_lock_irqsave(c->reglock, flags);
664 v = c->parent - 1;
665 spin_unlock_irqrestore(c->reglock, flags);
666 return v;
667 }
668
669 /* fg0-2 and fg4-6 share a "scale"-bit. With this bit cleared, the
670 * dividers behave exactly as on previous models (dividers are multiples
671 * of 2); with the bit set, dividers are multiples of 1, halving their
672 * range, but making them also much more flexible.
673 */
alchemy_clk_fgv2_setr(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)674 static int alchemy_clk_fgv2_setr(struct clk_hw *hw, unsigned long rate,
675 unsigned long parent_rate)
676 {
677 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
678 int sh = c->shift + 2;
679 unsigned long div, v, flags, ret;
680
681 if (!rate || !parent_rate || rate > parent_rate)
682 return -EINVAL;
683
684 v = alchemy_rdsys(c->reg) & (1 << 30); /* test "scale" bit */
685 ret = alchemy_calc_div(rate, parent_rate, v ? 1 : 2,
686 v ? 256 : 512, &div);
687
688 spin_lock_irqsave(c->reglock, flags);
689 v = alchemy_rdsys(c->reg);
690 v &= ~(0xff << sh);
691 v |= (div & 0xff) << sh;
692 alchemy_wrsys(v, c->reg);
693 spin_unlock_irqrestore(c->reglock, flags);
694
695 return 0;
696 }
697
alchemy_clk_fgv2_recalc(struct clk_hw * hw,unsigned long parent_rate)698 static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw,
699 unsigned long parent_rate)
700 {
701 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
702 int sh = c->shift + 2;
703 unsigned long v, t;
704
705 v = alchemy_rdsys(c->reg);
706 t = parent_rate / (((v >> sh) & 0xff) + 1);
707 if ((v & (1 << 30)) == 0) /* test scale bit */
708 t /= 2;
709
710 return t;
711 }
712
alchemy_clk_fgv2_detr(struct clk_hw * hw,struct clk_rate_request * req)713 static int alchemy_clk_fgv2_detr(struct clk_hw *hw,
714 struct clk_rate_request *req)
715 {
716 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
717 int scale, maxdiv;
718
719 if (alchemy_rdsys(c->reg) & (1 << 30)) {
720 scale = 1;
721 maxdiv = 256;
722 } else {
723 scale = 2;
724 maxdiv = 512;
725 }
726
727 return alchemy_clk_fgcs_detr(hw, req, scale, maxdiv);
728 }
729
730 /* Au1300 larger input mux, no separate disable bit, flexible divider */
731 static const struct clk_ops alchemy_clkops_fgenv2 = {
732 .recalc_rate = alchemy_clk_fgv2_recalc,
733 .determine_rate = alchemy_clk_fgv2_detr,
734 .set_rate = alchemy_clk_fgv2_setr,
735 .set_parent = alchemy_clk_fgv2_setp,
736 .get_parent = alchemy_clk_fgv2_getp,
737 .enable = alchemy_clk_fgv2_en,
738 .disable = alchemy_clk_fgv2_dis,
739 .is_enabled = alchemy_clk_fgv2_isen,
740 };
741
742 static const char * const alchemy_clk_fgv1_parents[] = {
743 ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK
744 };
745
746 static const char * const alchemy_clk_fgv2_parents[] = {
747 ALCHEMY_AUXPLL2_CLK, ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK
748 };
749
750 static const char * const alchemy_clk_fgen_names[] = {
751 ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK,
752 ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK };
753
alchemy_clk_init_fgens(int ctype)754 static int __init alchemy_clk_init_fgens(int ctype)
755 {
756 struct clk *c;
757 struct clk_init_data id;
758 struct alchemy_fgcs_clk *a;
759 unsigned long v;
760 int i, ret;
761
762 switch (ctype) {
763 case ALCHEMY_CPU_AU1000...ALCHEMY_CPU_AU1200:
764 id.ops = &alchemy_clkops_fgenv1;
765 id.parent_names = alchemy_clk_fgv1_parents;
766 id.num_parents = 2;
767 break;
768 case ALCHEMY_CPU_AU1300:
769 id.ops = &alchemy_clkops_fgenv2;
770 id.parent_names = alchemy_clk_fgv2_parents;
771 id.num_parents = 3;
772 break;
773 default:
774 return -ENODEV;
775 }
776 id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
777
778 a = kcalloc(6, sizeof(*a), GFP_KERNEL);
779 if (!a)
780 return -ENOMEM;
781
782 spin_lock_init(&alchemy_clk_fg0_lock);
783 spin_lock_init(&alchemy_clk_fg1_lock);
784 ret = 0;
785 for (i = 0; i < 6; i++) {
786 id.name = alchemy_clk_fgen_names[i];
787 a->shift = 10 * (i < 3 ? i : i - 3);
788 if (i > 2) {
789 a->reg = AU1000_SYS_FREQCTRL1;
790 a->reglock = &alchemy_clk_fg1_lock;
791 } else {
792 a->reg = AU1000_SYS_FREQCTRL0;
793 a->reglock = &alchemy_clk_fg0_lock;
794 }
795
796 /* default to first parent if bootloader has set
797 * the mux to disabled state.
798 */
799 if (ctype == ALCHEMY_CPU_AU1300) {
800 v = alchemy_rdsys(a->reg);
801 a->parent = (v >> a->shift) & 3;
802 if (!a->parent) {
803 a->parent = 1;
804 a->isen = 0;
805 } else
806 a->isen = 1;
807 }
808
809 a->hw.init = &id;
810 c = clk_register(NULL, &a->hw);
811 if (IS_ERR(c))
812 ret++;
813 else
814 clk_register_clkdev(c, id.name, NULL);
815 a++;
816 }
817
818 return ret;
819 }
820
821 /* internal sources muxes *********************************************/
822
alchemy_clk_csrc_isen(struct clk_hw * hw)823 static int alchemy_clk_csrc_isen(struct clk_hw *hw)
824 {
825 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
826 unsigned long v = alchemy_rdsys(c->reg);
827
828 return (((v >> c->shift) >> 2) & 7) != 0;
829 }
830
__alchemy_clk_csrc_en(struct alchemy_fgcs_clk * c)831 static void __alchemy_clk_csrc_en(struct alchemy_fgcs_clk *c)
832 {
833 unsigned long v = alchemy_rdsys(c->reg);
834
835 v &= ~((7 << 2) << c->shift);
836 v |= ((c->parent & 7) << 2) << c->shift;
837 alchemy_wrsys(v, c->reg);
838 c->isen = 1;
839 }
840
alchemy_clk_csrc_en(struct clk_hw * hw)841 static int alchemy_clk_csrc_en(struct clk_hw *hw)
842 {
843 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
844 unsigned long flags;
845
846 /* enable by setting the previous parent clock */
847 spin_lock_irqsave(c->reglock, flags);
848 __alchemy_clk_csrc_en(c);
849 spin_unlock_irqrestore(c->reglock, flags);
850
851 return 0;
852 }
853
alchemy_clk_csrc_dis(struct clk_hw * hw)854 static void alchemy_clk_csrc_dis(struct clk_hw *hw)
855 {
856 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
857 unsigned long v, flags;
858
859 spin_lock_irqsave(c->reglock, flags);
860 v = alchemy_rdsys(c->reg);
861 v &= ~((3 << 2) << c->shift); /* mux to "disabled" state */
862 alchemy_wrsys(v, c->reg);
863 c->isen = 0;
864 spin_unlock_irqrestore(c->reglock, flags);
865 }
866
alchemy_clk_csrc_setp(struct clk_hw * hw,u8 index)867 static int alchemy_clk_csrc_setp(struct clk_hw *hw, u8 index)
868 {
869 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
870 unsigned long flags;
871
872 spin_lock_irqsave(c->reglock, flags);
873 c->parent = index + 1; /* value to write to register */
874 if (c->isen)
875 __alchemy_clk_csrc_en(c);
876 spin_unlock_irqrestore(c->reglock, flags);
877
878 return 0;
879 }
880
alchemy_clk_csrc_getp(struct clk_hw * hw)881 static u8 alchemy_clk_csrc_getp(struct clk_hw *hw)
882 {
883 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
884
885 return c->parent - 1;
886 }
887
alchemy_clk_csrc_recalc(struct clk_hw * hw,unsigned long parent_rate)888 static unsigned long alchemy_clk_csrc_recalc(struct clk_hw *hw,
889 unsigned long parent_rate)
890 {
891 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
892 unsigned long v = (alchemy_rdsys(c->reg) >> c->shift) & 3;
893
894 return parent_rate / c->dt[v];
895 }
896
alchemy_clk_csrc_setr(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)897 static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate,
898 unsigned long parent_rate)
899 {
900 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
901 unsigned long d, v, flags;
902 int i;
903
904 if (!rate || !parent_rate || rate > parent_rate)
905 return -EINVAL;
906
907 d = (parent_rate + (rate / 2)) / rate;
908 if (d > 4)
909 return -EINVAL;
910 if ((d == 3) && (c->dt[2] != 3))
911 d = 4;
912
913 for (i = 0; i < 4; i++)
914 if (c->dt[i] == d)
915 break;
916
917 if (i >= 4)
918 return -EINVAL; /* oops */
919
920 spin_lock_irqsave(c->reglock, flags);
921 v = alchemy_rdsys(c->reg);
922 v &= ~(3 << c->shift);
923 v |= (i & 3) << c->shift;
924 alchemy_wrsys(v, c->reg);
925 spin_unlock_irqrestore(c->reglock, flags);
926
927 return 0;
928 }
929
alchemy_clk_csrc_detr(struct clk_hw * hw,struct clk_rate_request * req)930 static int alchemy_clk_csrc_detr(struct clk_hw *hw,
931 struct clk_rate_request *req)
932 {
933 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
934 int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */
935
936 return alchemy_clk_fgcs_detr(hw, req, scale, 4);
937 }
938
939 static const struct clk_ops alchemy_clkops_csrc = {
940 .recalc_rate = alchemy_clk_csrc_recalc,
941 .determine_rate = alchemy_clk_csrc_detr,
942 .set_rate = alchemy_clk_csrc_setr,
943 .set_parent = alchemy_clk_csrc_setp,
944 .get_parent = alchemy_clk_csrc_getp,
945 .enable = alchemy_clk_csrc_en,
946 .disable = alchemy_clk_csrc_dis,
947 .is_enabled = alchemy_clk_csrc_isen,
948 };
949
950 static const char * const alchemy_clk_csrc_parents[] = {
951 /* disabled at index 0 */ ALCHEMY_AUXPLL_CLK,
952 ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK,
953 ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK
954 };
955
956 /* divider tables */
957 static int alchemy_csrc_dt1[] = { 1, 4, 1, 2 }; /* rest */
958 static int alchemy_csrc_dt2[] = { 1, 4, 3, 2 }; /* Au1300 */
959
alchemy_clk_setup_imux(int ctype)960 static int __init alchemy_clk_setup_imux(int ctype)
961 {
962 struct alchemy_fgcs_clk *a;
963 const char * const *names;
964 struct clk_init_data id;
965 unsigned long v;
966 int i, ret, *dt;
967 struct clk *c;
968
969 id.ops = &alchemy_clkops_csrc;
970 id.parent_names = alchemy_clk_csrc_parents;
971 id.num_parents = 7;
972 id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
973
974 dt = alchemy_csrc_dt1;
975 switch (ctype) {
976 case ALCHEMY_CPU_AU1000:
977 names = alchemy_au1000_intclknames;
978 break;
979 case ALCHEMY_CPU_AU1500:
980 names = alchemy_au1500_intclknames;
981 break;
982 case ALCHEMY_CPU_AU1100:
983 names = alchemy_au1100_intclknames;
984 break;
985 case ALCHEMY_CPU_AU1550:
986 names = alchemy_au1550_intclknames;
987 break;
988 case ALCHEMY_CPU_AU1200:
989 names = alchemy_au1200_intclknames;
990 break;
991 case ALCHEMY_CPU_AU1300:
992 dt = alchemy_csrc_dt2;
993 names = alchemy_au1300_intclknames;
994 break;
995 default:
996 return -ENODEV;
997 }
998
999 a = kcalloc(6, sizeof(*a), GFP_KERNEL);
1000 if (!a)
1001 return -ENOMEM;
1002
1003 ret = 0;
1004
1005 for (i = 0; i < 6; i++) {
1006 id.name = names[i];
1007 if (!id.name)
1008 goto next;
1009
1010 a->shift = i * 5;
1011 a->reg = AU1000_SYS_CLKSRC;
1012 a->reglock = &alchemy_clk_csrc_lock;
1013 a->dt = dt;
1014
1015 /* default to first parent clock if mux is initially
1016 * set to disabled state.
1017 */
1018 v = alchemy_rdsys(a->reg);
1019 a->parent = ((v >> a->shift) >> 2) & 7;
1020 if (!a->parent) {
1021 a->parent = 1;
1022 a->isen = 0;
1023 } else
1024 a->isen = 1;
1025
1026 a->hw.init = &id;
1027 c = clk_register(NULL, &a->hw);
1028 if (IS_ERR(c))
1029 ret++;
1030 else
1031 clk_register_clkdev(c, id.name, NULL);
1032 next:
1033 a++;
1034 }
1035
1036 return ret;
1037 }
1038
1039
1040 /**********************************************************************/
1041
1042
1043 #define ERRCK(x) \
1044 if (IS_ERR(x)) { \
1045 ret = PTR_ERR(x); \
1046 goto out; \
1047 }
1048
alchemy_clk_init(void)1049 static int __init alchemy_clk_init(void)
1050 {
1051 int ctype = alchemy_get_cputype(), ret, i;
1052 struct clk_aliastable *t = alchemy_clk_aliases;
1053 struct clk *c;
1054
1055 /* Root of the Alchemy clock tree: external 12MHz crystal osc */
1056 c = clk_register_fixed_rate(NULL, ALCHEMY_ROOT_CLK, NULL,
1057 0, ALCHEMY_ROOTCLK_RATE);
1058 ERRCK(c)
1059
1060 /* CPU core clock */
1061 c = alchemy_clk_setup_cpu(ALCHEMY_ROOT_CLK, ctype);
1062 ERRCK(c)
1063
1064 /* AUXPLLs: max 1GHz on Au1300, 748MHz on older models */
1065 i = (ctype == ALCHEMY_CPU_AU1300) ? 84 : 63;
1066 c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, ALCHEMY_AUXPLL_CLK,
1067 i, AU1000_SYS_AUXPLL);
1068 ERRCK(c)
1069
1070 if (ctype == ALCHEMY_CPU_AU1300) {
1071 c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK,
1072 ALCHEMY_AUXPLL2_CLK, i,
1073 AU1300_SYS_AUXPLL2);
1074 ERRCK(c)
1075 }
1076
1077 /* sysbus clock: cpu core clock divided by 2, 3 or 4 */
1078 c = alchemy_clk_setup_sysbus(ALCHEMY_CPU_CLK);
1079 ERRCK(c)
1080
1081 /* peripheral clock: runs at half rate of sysbus clk */
1082 c = alchemy_clk_setup_periph(ALCHEMY_SYSBUS_CLK);
1083 ERRCK(c)
1084
1085 /* SDR/DDR memory clock */
1086 c = alchemy_clk_setup_mem(ALCHEMY_SYSBUS_CLK, ctype);
1087 ERRCK(c)
1088
1089 /* L/RCLK: external static bus clock for synchronous mode */
1090 c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype);
1091 ERRCK(c)
1092
1093 /* Frequency dividers 0-5 */
1094 ret = alchemy_clk_init_fgens(ctype);
1095 if (ret) {
1096 ret = -ENODEV;
1097 goto out;
1098 }
1099
1100 /* diving muxes for internal sources */
1101 ret = alchemy_clk_setup_imux(ctype);
1102 if (ret) {
1103 ret = -ENODEV;
1104 goto out;
1105 }
1106
1107 /* set up aliases drivers might look for */
1108 while (t->base) {
1109 if (t->cputype == ctype)
1110 clk_add_alias(t->alias, NULL, t->base, NULL);
1111 t++;
1112 }
1113
1114 pr_info("Alchemy clocktree installed\n");
1115 return 0;
1116
1117 out:
1118 return ret;
1119 }
1120 postcore_initcall(alchemy_clk_init);
1121