1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Ingenic SoC CGU driver
4 *
5 * Copyright (c) 2013-2015 Imagination Technologies
6 * Author: Paul Burton <paul.burton@mips.com>
7 */
8
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/clkdev.h>
13 #include <linux/delay.h>
14 #include <linux/io.h>
15 #include <linux/iopoll.h>
16 #include <linux/math64.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/time.h>
22
23 #include "cgu.h"
24
25 #define MHZ (1000 * 1000)
26
27 static inline const struct ingenic_cgu_clk_info *
to_clk_info(struct ingenic_clk * clk)28 to_clk_info(struct ingenic_clk *clk)
29 {
30 return &clk->cgu->clock_info[clk->idx];
31 }
32
33 /**
34 * ingenic_cgu_gate_get() - get the value of clock gate register bit
35 * @cgu: reference to the CGU whose registers should be read
36 * @info: info struct describing the gate bit
37 *
38 * Retrieves the state of the clock gate bit described by info. The
39 * caller must hold cgu->lock.
40 *
41 * Return: true if the gate bit is set, else false.
42 */
43 static inline bool
ingenic_cgu_gate_get(struct ingenic_cgu * cgu,const struct ingenic_cgu_gate_info * info)44 ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
45 const struct ingenic_cgu_gate_info *info)
46 {
47 return !!(readl(cgu->base + info->reg) & BIT(info->bit))
48 ^ info->clear_to_gate;
49 }
50
51 /**
52 * ingenic_cgu_gate_set() - set the value of clock gate register bit
53 * @cgu: reference to the CGU whose registers should be modified
54 * @info: info struct describing the gate bit
55 * @val: non-zero to gate a clock, otherwise zero
56 *
57 * Sets the given gate bit in order to gate or ungate a clock.
58 *
59 * The caller must hold cgu->lock.
60 */
61 static inline void
ingenic_cgu_gate_set(struct ingenic_cgu * cgu,const struct ingenic_cgu_gate_info * info,bool val)62 ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
63 const struct ingenic_cgu_gate_info *info, bool val)
64 {
65 u32 clkgr = readl(cgu->base + info->reg);
66
67 if (val ^ info->clear_to_gate)
68 clkgr |= BIT(info->bit);
69 else
70 clkgr &= ~BIT(info->bit);
71
72 writel(clkgr, cgu->base + info->reg);
73 }
74
75 /*
76 * PLL operations
77 */
78
79 static unsigned long
ingenic_pll_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)80 ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
81 {
82 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
83 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
84 struct ingenic_cgu *cgu = ingenic_clk->cgu;
85 const struct ingenic_cgu_pll_info *pll_info;
86 unsigned m, n, od, od_enc = 0;
87 bool bypass;
88 u32 ctl;
89
90 BUG_ON(clk_info->type != CGU_CLK_PLL);
91 pll_info = &clk_info->pll;
92
93 ctl = readl(cgu->base + pll_info->reg);
94
95 m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
96 m += pll_info->m_offset;
97 n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0);
98 n += pll_info->n_offset;
99
100 if (pll_info->od_bits > 0) {
101 od_enc = ctl >> pll_info->od_shift;
102 od_enc &= GENMASK(pll_info->od_bits - 1, 0);
103 }
104
105 if (pll_info->bypass_bit >= 0) {
106 ctl = readl(cgu->base + pll_info->bypass_reg);
107
108 bypass = !!(ctl & BIT(pll_info->bypass_bit));
109
110 if (bypass)
111 return parent_rate;
112 }
113
114 for (od = 0; od < pll_info->od_max; od++)
115 if (pll_info->od_encoding[od] == od_enc)
116 break;
117
118 /* if od_max = 0, od_bits should be 0 and od is fixed to 1. */
119 if (pll_info->od_max == 0)
120 BUG_ON(pll_info->od_bits != 0);
121 else
122 BUG_ON(od == pll_info->od_max);
123 od++;
124
125 return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
126 n * od);
127 }
128
129 static void
ingenic_pll_calc_m_n_od(const struct ingenic_cgu_pll_info * pll_info,unsigned long rate,unsigned long parent_rate,unsigned int * pm,unsigned int * pn,unsigned int * pod)130 ingenic_pll_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
131 unsigned long rate, unsigned long parent_rate,
132 unsigned int *pm, unsigned int *pn, unsigned int *pod)
133 {
134 unsigned int m, n, od = 1;
135
136 /*
137 * The frequency after the input divider must be between 10 and 50 MHz.
138 * The highest divider yields the best resolution.
139 */
140 n = parent_rate / (10 * MHZ);
141 n = min_t(unsigned int, n, 1 << pll_info->n_bits);
142 n = max_t(unsigned int, n, pll_info->n_offset);
143
144 m = (rate / MHZ) * od * n / (parent_rate / MHZ);
145 m = min_t(unsigned int, m, 1 << pll_info->m_bits);
146 m = max_t(unsigned int, m, pll_info->m_offset);
147
148 *pm = m;
149 *pn = n;
150 *pod = od;
151 }
152
153 static unsigned long
ingenic_pll_calc(const struct ingenic_cgu_clk_info * clk_info,unsigned long rate,unsigned long parent_rate,unsigned int * pm,unsigned int * pn,unsigned int * pod)154 ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
155 unsigned long rate, unsigned long parent_rate,
156 unsigned int *pm, unsigned int *pn, unsigned int *pod)
157 {
158 const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
159 unsigned int m, n, od;
160
161 if (pll_info->calc_m_n_od)
162 (*pll_info->calc_m_n_od)(pll_info, rate, parent_rate, &m, &n, &od);
163 else
164 ingenic_pll_calc_m_n_od(pll_info, rate, parent_rate, &m, &n, &od);
165
166 if (pm)
167 *pm = m;
168 if (pn)
169 *pn = n;
170 if (pod)
171 *pod = od;
172
173 return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
174 n * od);
175 }
176
ingenic_pll_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)177 static int ingenic_pll_determine_rate(struct clk_hw *hw,
178 struct clk_rate_request *req)
179 {
180 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
181 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
182
183 req->rate = ingenic_pll_calc(clk_info, req->rate, req->best_parent_rate,
184 NULL, NULL, NULL);
185
186 return 0;
187 }
188
ingenic_pll_check_stable(struct ingenic_cgu * cgu,const struct ingenic_cgu_pll_info * pll_info)189 static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu,
190 const struct ingenic_cgu_pll_info *pll_info)
191 {
192 u32 ctl;
193
194 if (pll_info->stable_bit < 0)
195 return 0;
196
197 return readl_poll_timeout(cgu->base + pll_info->reg, ctl,
198 ctl & BIT(pll_info->stable_bit),
199 0, 100 * USEC_PER_MSEC);
200 }
201
202 static int
ingenic_pll_set_rate(struct clk_hw * hw,unsigned long req_rate,unsigned long parent_rate)203 ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
204 unsigned long parent_rate)
205 {
206 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
207 struct ingenic_cgu *cgu = ingenic_clk->cgu;
208 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
209 const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
210 unsigned long rate, flags;
211 unsigned int m, n, od;
212 int ret = 0;
213 u32 ctl;
214
215 rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
216 &m, &n, &od);
217 if (rate != req_rate)
218 pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
219 clk_info->name, req_rate, rate);
220
221 spin_lock_irqsave(&cgu->lock, flags);
222 ctl = readl(cgu->base + pll_info->reg);
223
224 ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift);
225 ctl |= (m - pll_info->m_offset) << pll_info->m_shift;
226
227 ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift);
228 ctl |= (n - pll_info->n_offset) << pll_info->n_shift;
229
230 if (pll_info->od_bits > 0) {
231 ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift);
232 ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
233 }
234
235 writel(ctl, cgu->base + pll_info->reg);
236
237 if (pll_info->set_rate_hook)
238 pll_info->set_rate_hook(pll_info, rate, parent_rate);
239
240 /* If the PLL is enabled, verify that it's stable */
241 if (pll_info->enable_bit >= 0 && (ctl & BIT(pll_info->enable_bit)))
242 ret = ingenic_pll_check_stable(cgu, pll_info);
243
244 spin_unlock_irqrestore(&cgu->lock, flags);
245
246 return ret;
247 }
248
ingenic_pll_enable(struct clk_hw * hw)249 static int ingenic_pll_enable(struct clk_hw *hw)
250 {
251 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
252 struct ingenic_cgu *cgu = ingenic_clk->cgu;
253 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
254 const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
255 unsigned long flags;
256 int ret;
257 u32 ctl;
258
259 if (pll_info->enable_bit < 0)
260 return 0;
261
262 spin_lock_irqsave(&cgu->lock, flags);
263 if (pll_info->bypass_bit >= 0) {
264 ctl = readl(cgu->base + pll_info->bypass_reg);
265
266 ctl &= ~BIT(pll_info->bypass_bit);
267
268 writel(ctl, cgu->base + pll_info->bypass_reg);
269 }
270
271 ctl = readl(cgu->base + pll_info->reg);
272
273 ctl |= BIT(pll_info->enable_bit);
274
275 writel(ctl, cgu->base + pll_info->reg);
276
277 ret = ingenic_pll_check_stable(cgu, pll_info);
278 spin_unlock_irqrestore(&cgu->lock, flags);
279
280 return ret;
281 }
282
ingenic_pll_disable(struct clk_hw * hw)283 static void ingenic_pll_disable(struct clk_hw *hw)
284 {
285 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
286 struct ingenic_cgu *cgu = ingenic_clk->cgu;
287 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
288 const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
289 unsigned long flags;
290 u32 ctl;
291
292 if (pll_info->enable_bit < 0)
293 return;
294
295 spin_lock_irqsave(&cgu->lock, flags);
296 ctl = readl(cgu->base + pll_info->reg);
297
298 ctl &= ~BIT(pll_info->enable_bit);
299
300 writel(ctl, cgu->base + pll_info->reg);
301 spin_unlock_irqrestore(&cgu->lock, flags);
302 }
303
ingenic_pll_is_enabled(struct clk_hw * hw)304 static int ingenic_pll_is_enabled(struct clk_hw *hw)
305 {
306 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
307 struct ingenic_cgu *cgu = ingenic_clk->cgu;
308 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
309 const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
310 u32 ctl;
311
312 if (pll_info->enable_bit < 0)
313 return true;
314
315 ctl = readl(cgu->base + pll_info->reg);
316
317 return !!(ctl & BIT(pll_info->enable_bit));
318 }
319
320 static const struct clk_ops ingenic_pll_ops = {
321 .recalc_rate = ingenic_pll_recalc_rate,
322 .determine_rate = ingenic_pll_determine_rate,
323 .set_rate = ingenic_pll_set_rate,
324
325 .enable = ingenic_pll_enable,
326 .disable = ingenic_pll_disable,
327 .is_enabled = ingenic_pll_is_enabled,
328 };
329
330 /*
331 * Operations for all non-PLL clocks
332 */
333
ingenic_clk_get_parent(struct clk_hw * hw)334 static u8 ingenic_clk_get_parent(struct clk_hw *hw)
335 {
336 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
337 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
338 struct ingenic_cgu *cgu = ingenic_clk->cgu;
339 u32 reg;
340 u8 i, hw_idx, idx = 0;
341
342 if (clk_info->type & CGU_CLK_MUX) {
343 reg = readl(cgu->base + clk_info->mux.reg);
344 hw_idx = (reg >> clk_info->mux.shift) &
345 GENMASK(clk_info->mux.bits - 1, 0);
346
347 /*
348 * Convert the hardware index to the parent index by skipping
349 * over any -1's in the parents array.
350 */
351 for (i = 0; i < hw_idx; i++) {
352 if (clk_info->parents[i] != -1)
353 idx++;
354 }
355 }
356
357 return idx;
358 }
359
ingenic_clk_set_parent(struct clk_hw * hw,u8 idx)360 static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
361 {
362 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
363 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
364 struct ingenic_cgu *cgu = ingenic_clk->cgu;
365 unsigned long flags;
366 u8 curr_idx, hw_idx, num_poss;
367 u32 reg, mask;
368
369 if (clk_info->type & CGU_CLK_MUX) {
370 /*
371 * Convert the parent index to the hardware index by adding
372 * 1 for any -1 in the parents array preceding the given
373 * index. That is, we want the index of idx'th entry in
374 * clk_info->parents which does not equal -1.
375 */
376 hw_idx = curr_idx = 0;
377 num_poss = 1 << clk_info->mux.bits;
378 for (; hw_idx < num_poss; hw_idx++) {
379 if (clk_info->parents[hw_idx] == -1)
380 continue;
381 if (curr_idx == idx)
382 break;
383 curr_idx++;
384 }
385
386 /* idx should always be a valid parent */
387 BUG_ON(curr_idx != idx);
388
389 mask = GENMASK(clk_info->mux.bits - 1, 0);
390 mask <<= clk_info->mux.shift;
391
392 spin_lock_irqsave(&cgu->lock, flags);
393
394 /* write the register */
395 reg = readl(cgu->base + clk_info->mux.reg);
396 reg &= ~mask;
397 reg |= hw_idx << clk_info->mux.shift;
398 writel(reg, cgu->base + clk_info->mux.reg);
399
400 spin_unlock_irqrestore(&cgu->lock, flags);
401 return 0;
402 }
403
404 return idx ? -EINVAL : 0;
405 }
406
407 static unsigned long
ingenic_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)408 ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
409 {
410 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
411 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
412 struct ingenic_cgu *cgu = ingenic_clk->cgu;
413 unsigned long rate = parent_rate;
414 u32 div_reg, div;
415 u8 parent;
416
417 if (clk_info->type & CGU_CLK_DIV) {
418 parent = ingenic_clk_get_parent(hw);
419
420 if (!(clk_info->div.bypass_mask & BIT(parent))) {
421 div_reg = readl(cgu->base + clk_info->div.reg);
422 div = (div_reg >> clk_info->div.shift) &
423 GENMASK(clk_info->div.bits - 1, 0);
424
425 if (clk_info->div.div_table)
426 div = clk_info->div.div_table[div];
427 else
428 div = (div + 1) * clk_info->div.div;
429
430 rate /= div;
431 }
432 } else if (clk_info->type & CGU_CLK_FIXDIV) {
433 rate /= clk_info->fixdiv.div;
434 }
435
436 return rate;
437 }
438
439 static unsigned int
ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info * clk_info,unsigned int div)440 ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info,
441 unsigned int div)
442 {
443 unsigned int i, best_i = 0, best = (unsigned int)-1;
444
445 for (i = 0; i < (1 << clk_info->div.bits)
446 && clk_info->div.div_table[i]; i++) {
447 if (clk_info->div.div_table[i] >= div &&
448 clk_info->div.div_table[i] < best) {
449 best = clk_info->div.div_table[i];
450 best_i = i;
451
452 if (div == best)
453 break;
454 }
455 }
456
457 return best_i;
458 }
459
460 static unsigned
ingenic_clk_calc_div(struct clk_hw * hw,const struct ingenic_cgu_clk_info * clk_info,unsigned long parent_rate,unsigned long req_rate)461 ingenic_clk_calc_div(struct clk_hw *hw,
462 const struct ingenic_cgu_clk_info *clk_info,
463 unsigned long parent_rate, unsigned long req_rate)
464 {
465 unsigned int div, hw_div;
466 u8 parent;
467
468 parent = ingenic_clk_get_parent(hw);
469 if (clk_info->div.bypass_mask & BIT(parent))
470 return 1;
471
472 /* calculate the divide */
473 div = DIV_ROUND_UP(parent_rate, req_rate);
474
475 if (clk_info->div.div_table) {
476 hw_div = ingenic_clk_calc_hw_div(clk_info, div);
477
478 return clk_info->div.div_table[hw_div];
479 }
480
481 /* Impose hardware constraints */
482 div = clamp_t(unsigned int, div, clk_info->div.div,
483 clk_info->div.div << clk_info->div.bits);
484
485 /*
486 * If the divider value itself must be divided before being written to
487 * the divider register, we must ensure we don't have any bits set that
488 * would be lost as a result of doing so.
489 */
490 div = DIV_ROUND_UP(div, clk_info->div.div);
491 div *= clk_info->div.div;
492
493 return div;
494 }
495
ingenic_clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)496 static int ingenic_clk_determine_rate(struct clk_hw *hw,
497 struct clk_rate_request *req)
498 {
499 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
500 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
501 unsigned int div = 1;
502
503 if (clk_info->type & CGU_CLK_DIV)
504 div = ingenic_clk_calc_div(hw, clk_info, req->best_parent_rate,
505 req->rate);
506 else if (clk_info->type & CGU_CLK_FIXDIV)
507 div = clk_info->fixdiv.div;
508 else if (clk_hw_can_set_rate_parent(hw))
509 req->best_parent_rate = req->rate;
510
511 req->rate = DIV_ROUND_UP(req->best_parent_rate, div);
512 return 0;
513 }
514
ingenic_clk_check_stable(struct ingenic_cgu * cgu,const struct ingenic_cgu_clk_info * clk_info)515 static inline int ingenic_clk_check_stable(struct ingenic_cgu *cgu,
516 const struct ingenic_cgu_clk_info *clk_info)
517 {
518 u32 reg;
519
520 return readl_poll_timeout(cgu->base + clk_info->div.reg, reg,
521 !(reg & BIT(clk_info->div.busy_bit)),
522 0, 100 * USEC_PER_MSEC);
523 }
524
525 static int
ingenic_clk_set_rate(struct clk_hw * hw,unsigned long req_rate,unsigned long parent_rate)526 ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
527 unsigned long parent_rate)
528 {
529 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
530 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
531 struct ingenic_cgu *cgu = ingenic_clk->cgu;
532 unsigned long rate, flags;
533 unsigned int hw_div, div;
534 u32 reg, mask;
535 int ret = 0;
536
537 if (clk_info->type & CGU_CLK_DIV) {
538 div = ingenic_clk_calc_div(hw, clk_info, parent_rate, req_rate);
539 rate = DIV_ROUND_UP(parent_rate, div);
540
541 if (rate != req_rate)
542 return -EINVAL;
543
544 if (clk_info->div.div_table)
545 hw_div = ingenic_clk_calc_hw_div(clk_info, div);
546 else
547 hw_div = ((div / clk_info->div.div) - 1);
548
549 spin_lock_irqsave(&cgu->lock, flags);
550 reg = readl(cgu->base + clk_info->div.reg);
551
552 /* update the divide */
553 mask = GENMASK(clk_info->div.bits - 1, 0);
554 reg &= ~(mask << clk_info->div.shift);
555 reg |= hw_div << clk_info->div.shift;
556
557 /* clear the stop bit */
558 if (clk_info->div.stop_bit != -1)
559 reg &= ~BIT(clk_info->div.stop_bit);
560
561 /* set the change enable bit */
562 if (clk_info->div.ce_bit != -1)
563 reg |= BIT(clk_info->div.ce_bit);
564
565 /* update the hardware */
566 writel(reg, cgu->base + clk_info->div.reg);
567
568 /* wait for the change to take effect */
569 if (clk_info->div.busy_bit != -1)
570 ret = ingenic_clk_check_stable(cgu, clk_info);
571
572 spin_unlock_irqrestore(&cgu->lock, flags);
573 return ret;
574 }
575
576 return -EINVAL;
577 }
578
ingenic_clk_enable(struct clk_hw * hw)579 static int ingenic_clk_enable(struct clk_hw *hw)
580 {
581 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
582 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
583 struct ingenic_cgu *cgu = ingenic_clk->cgu;
584 unsigned long flags;
585
586 if (clk_info->type & CGU_CLK_GATE) {
587 /* ungate the clock */
588 spin_lock_irqsave(&cgu->lock, flags);
589 ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
590 spin_unlock_irqrestore(&cgu->lock, flags);
591
592 if (clk_info->gate.delay_us)
593 udelay(clk_info->gate.delay_us);
594 }
595
596 return 0;
597 }
598
ingenic_clk_disable(struct clk_hw * hw)599 static void ingenic_clk_disable(struct clk_hw *hw)
600 {
601 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
602 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
603 struct ingenic_cgu *cgu = ingenic_clk->cgu;
604 unsigned long flags;
605
606 if (clk_info->type & CGU_CLK_GATE) {
607 /* gate the clock */
608 spin_lock_irqsave(&cgu->lock, flags);
609 ingenic_cgu_gate_set(cgu, &clk_info->gate, true);
610 spin_unlock_irqrestore(&cgu->lock, flags);
611 }
612 }
613
ingenic_clk_is_enabled(struct clk_hw * hw)614 static int ingenic_clk_is_enabled(struct clk_hw *hw)
615 {
616 struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
617 const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
618 struct ingenic_cgu *cgu = ingenic_clk->cgu;
619 int enabled = 1;
620
621 if (clk_info->type & CGU_CLK_GATE)
622 enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
623
624 return enabled;
625 }
626
627 static const struct clk_ops ingenic_clk_ops = {
628 .get_parent = ingenic_clk_get_parent,
629 .set_parent = ingenic_clk_set_parent,
630
631 .recalc_rate = ingenic_clk_recalc_rate,
632 .determine_rate = ingenic_clk_determine_rate,
633 .set_rate = ingenic_clk_set_rate,
634
635 .enable = ingenic_clk_enable,
636 .disable = ingenic_clk_disable,
637 .is_enabled = ingenic_clk_is_enabled,
638 };
639
640 /*
641 * Setup functions.
642 */
643
ingenic_register_clock(struct ingenic_cgu * cgu,unsigned idx)644 static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
645 {
646 const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx];
647 struct clk_init_data clk_init;
648 struct ingenic_clk *ingenic_clk = NULL;
649 struct clk *clk, *parent;
650 const char *parent_names[4];
651 unsigned caps, i, num_possible;
652 int err = -EINVAL;
653
654 BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names));
655
656 if (clk_info->type == CGU_CLK_EXT) {
657 clk = of_clk_get_by_name(cgu->np, clk_info->name);
658 if (IS_ERR(clk)) {
659 pr_err("%s: no external clock '%s' provided\n",
660 __func__, clk_info->name);
661 err = -ENODEV;
662 goto out;
663 }
664 err = clk_register_clkdev(clk, clk_info->name, NULL);
665 if (err) {
666 clk_put(clk);
667 goto out;
668 }
669 cgu->clocks.clks[idx] = clk;
670 return 0;
671 }
672
673 if (!clk_info->type) {
674 pr_err("%s: no clock type specified for '%s'\n", __func__,
675 clk_info->name);
676 goto out;
677 }
678
679 ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL);
680 if (!ingenic_clk) {
681 err = -ENOMEM;
682 goto out;
683 }
684
685 ingenic_clk->hw.init = &clk_init;
686 ingenic_clk->cgu = cgu;
687 ingenic_clk->idx = idx;
688
689 clk_init.name = clk_info->name;
690 clk_init.flags = clk_info->flags;
691 clk_init.parent_names = parent_names;
692
693 caps = clk_info->type;
694
695 if (caps & CGU_CLK_DIV) {
696 caps &= ~CGU_CLK_DIV;
697 } else if (!(caps & CGU_CLK_CUSTOM)) {
698 /* pass rate changes to the parent clock */
699 clk_init.flags |= CLK_SET_RATE_PARENT;
700 }
701
702 if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
703 clk_init.num_parents = 0;
704
705 if (caps & CGU_CLK_MUX)
706 num_possible = 1 << clk_info->mux.bits;
707 else
708 num_possible = ARRAY_SIZE(clk_info->parents);
709
710 for (i = 0; i < num_possible; i++) {
711 if (clk_info->parents[i] == -1)
712 continue;
713
714 parent = cgu->clocks.clks[clk_info->parents[i]];
715 parent_names[clk_init.num_parents] =
716 __clk_get_name(parent);
717 clk_init.num_parents++;
718 }
719
720 BUG_ON(!clk_init.num_parents);
721 BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names));
722 } else {
723 BUG_ON(clk_info->parents[0] == -1);
724 clk_init.num_parents = 1;
725 parent = cgu->clocks.clks[clk_info->parents[0]];
726 parent_names[0] = __clk_get_name(parent);
727 }
728
729 if (caps & CGU_CLK_CUSTOM) {
730 clk_init.ops = clk_info->custom.clk_ops;
731
732 caps &= ~CGU_CLK_CUSTOM;
733
734 if (caps) {
735 pr_err("%s: custom clock may not be combined with type 0x%x\n",
736 __func__, caps);
737 goto out;
738 }
739 } else if (caps & CGU_CLK_PLL) {
740 clk_init.ops = &ingenic_pll_ops;
741
742 caps &= ~CGU_CLK_PLL;
743
744 if (caps) {
745 pr_err("%s: PLL may not be combined with type 0x%x\n",
746 __func__, caps);
747 goto out;
748 }
749 } else {
750 clk_init.ops = &ingenic_clk_ops;
751 }
752
753 /* nothing to do for gates or fixed dividers */
754 caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV);
755
756 if (caps & CGU_CLK_MUX) {
757 if (!(caps & CGU_CLK_MUX_GLITCHFREE))
758 clk_init.flags |= CLK_SET_PARENT_GATE;
759
760 caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
761 }
762
763 if (caps) {
764 pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
765 goto out;
766 }
767
768 clk = clk_register(NULL, &ingenic_clk->hw);
769 if (IS_ERR(clk)) {
770 pr_err("%s: failed to register clock '%s'\n", __func__,
771 clk_info->name);
772 err = PTR_ERR(clk);
773 goto out;
774 }
775
776 err = clk_register_clkdev(clk, clk_info->name, NULL);
777 if (err)
778 goto out;
779
780 cgu->clocks.clks[idx] = clk;
781 out:
782 if (err)
783 kfree(ingenic_clk);
784 return err;
785 }
786
787 struct ingenic_cgu *
ingenic_cgu_new(const struct ingenic_cgu_clk_info * clock_info,unsigned num_clocks,struct device_node * np)788 ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
789 unsigned num_clocks, struct device_node *np)
790 {
791 struct ingenic_cgu *cgu;
792
793 cgu = kzalloc(sizeof(*cgu), GFP_KERNEL);
794 if (!cgu)
795 goto err_out;
796
797 cgu->base = of_iomap(np, 0);
798 if (!cgu->base) {
799 pr_err("%s: failed to map CGU registers\n", __func__);
800 goto err_out_free;
801 }
802
803 cgu->np = np;
804 cgu->clock_info = clock_info;
805 cgu->clocks.clk_num = num_clocks;
806
807 spin_lock_init(&cgu->lock);
808
809 return cgu;
810
811 err_out_free:
812 kfree(cgu);
813 err_out:
814 return NULL;
815 }
816
ingenic_cgu_register_clocks(struct ingenic_cgu * cgu)817 int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
818 {
819 unsigned i;
820 int err;
821
822 cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *),
823 GFP_KERNEL);
824 if (!cgu->clocks.clks) {
825 err = -ENOMEM;
826 goto err_out;
827 }
828
829 for (i = 0; i < cgu->clocks.clk_num; i++) {
830 err = ingenic_register_clock(cgu, i);
831 if (err)
832 goto err_out_unregister;
833 }
834
835 err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get,
836 &cgu->clocks);
837 if (err)
838 goto err_out_unregister;
839
840 return 0;
841
842 err_out_unregister:
843 for (i = 0; i < cgu->clocks.clk_num; i++) {
844 if (!cgu->clocks.clks[i])
845 continue;
846 if (cgu->clock_info[i].type & CGU_CLK_EXT)
847 clk_put(cgu->clocks.clks[i]);
848 else
849 clk_unregister(cgu->clocks.clks[i]);
850 }
851 kfree(cgu->clocks.clks);
852 err_out:
853 return err;
854 }
855