1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * clk-xgene.c - AppliedMicro X-Gene Clock Interface
4 *
5 * Copyright (c) 2013, Applied Micro Circuits Corporation
6 * Author: Loc Ho <lho@apm.com>
7 */
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/string_choices.h>
11 #include <linux/io.h>
12 #include <linux/of.h>
13 #include <linux/clkdev.h>
14 #include <linux/clk-provider.h>
15 #include <linux/of_address.h>
16
17 /* Register SCU_PCPPLL bit fields */
18 #define N_DIV_RD(src) ((src) & 0x000001ff)
19 #define SC_N_DIV_RD(src) ((src) & 0x0000007f)
20 #define SC_OUTDIV2(src) (((src) & 0x00000100) >> 8)
21
22 /* Register SCU_SOCPLL bit fields */
23 #define CLKR_RD(src) (((src) & 0x07000000)>>24)
24 #define CLKOD_RD(src) (((src) & 0x00300000)>>20)
25 #define REGSPEC_RESET_F1_MASK 0x00010000
26 #define CLKF_RD(src) (((src) & 0x000001ff))
27
28 #define XGENE_CLK_DRIVER_VER "0.1"
29
30 static DEFINE_SPINLOCK(clk_lock);
31
xgene_clk_read(void __iomem * csr)32 static inline u32 xgene_clk_read(void __iomem *csr)
33 {
34 return readl_relaxed(csr);
35 }
36
xgene_clk_write(u32 data,void __iomem * csr)37 static inline void xgene_clk_write(u32 data, void __iomem *csr)
38 {
39 writel_relaxed(data, csr);
40 }
41
42 /* PLL Clock */
43 enum xgene_pll_type {
44 PLL_TYPE_PCP = 0,
45 PLL_TYPE_SOC = 1,
46 };
47
48 struct xgene_clk_pll {
49 struct clk_hw hw;
50 void __iomem *reg;
51 spinlock_t *lock;
52 u32 pll_offset;
53 enum xgene_pll_type type;
54 int version;
55 };
56
57 #define to_xgene_clk_pll(_hw) container_of(_hw, struct xgene_clk_pll, hw)
58
xgene_clk_pll_is_enabled(struct clk_hw * hw)59 static int xgene_clk_pll_is_enabled(struct clk_hw *hw)
60 {
61 struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
62 u32 data;
63
64 data = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
65 pr_debug("%s pll %s\n", clk_hw_get_name(hw),
66 data & REGSPEC_RESET_F1_MASK ? "disabled" : "enabled");
67
68 return data & REGSPEC_RESET_F1_MASK ? 0 : 1;
69 }
70
xgene_clk_pll_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)71 static unsigned long xgene_clk_pll_recalc_rate(struct clk_hw *hw,
72 unsigned long parent_rate)
73 {
74 struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
75 unsigned long fref;
76 unsigned long fvco;
77 u32 pll;
78 u32 nref;
79 u32 nout;
80 u32 nfb;
81
82 pll = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
83
84 if (pllclk->version <= 1) {
85 if (pllclk->type == PLL_TYPE_PCP) {
86 /*
87 * PLL VCO = Reference clock * NF
88 * PCP PLL = PLL_VCO / 2
89 */
90 nout = 2;
91 fvco = parent_rate * (N_DIV_RD(pll) + 4);
92 } else {
93 /*
94 * Fref = Reference Clock / NREF;
95 * Fvco = Fref * NFB;
96 * Fout = Fvco / NOUT;
97 */
98 nref = CLKR_RD(pll) + 1;
99 nout = CLKOD_RD(pll) + 1;
100 nfb = CLKF_RD(pll);
101 fref = parent_rate / nref;
102 fvco = fref * nfb;
103 }
104 } else {
105 /*
106 * fvco = Reference clock * FBDIVC
107 * PLL freq = fvco / NOUT
108 */
109 nout = SC_OUTDIV2(pll) ? 2 : 3;
110 fvco = parent_rate * SC_N_DIV_RD(pll);
111 }
112 pr_debug("%s pll recalc rate %ld parent %ld version %d\n",
113 clk_hw_get_name(hw), fvco / nout, parent_rate,
114 pllclk->version);
115
116 return fvco / nout;
117 }
118
119 static const struct clk_ops xgene_clk_pll_ops = {
120 .is_enabled = xgene_clk_pll_is_enabled,
121 .recalc_rate = xgene_clk_pll_recalc_rate,
122 };
123
xgene_register_clk_pll(struct device * dev,const char * name,const char * parent_name,unsigned long flags,void __iomem * reg,u32 pll_offset,u32 type,spinlock_t * lock,int version)124 static struct clk *xgene_register_clk_pll(struct device *dev,
125 const char *name, const char *parent_name,
126 unsigned long flags, void __iomem *reg, u32 pll_offset,
127 u32 type, spinlock_t *lock, int version)
128 {
129 struct xgene_clk_pll *apmclk;
130 struct clk *clk;
131 struct clk_init_data init;
132
133 /* allocate the APM clock structure */
134 apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
135 if (!apmclk)
136 return ERR_PTR(-ENOMEM);
137
138 init.name = name;
139 init.ops = &xgene_clk_pll_ops;
140 init.flags = flags;
141 init.parent_names = parent_name ? &parent_name : NULL;
142 init.num_parents = parent_name ? 1 : 0;
143
144 apmclk->version = version;
145 apmclk->reg = reg;
146 apmclk->lock = lock;
147 apmclk->pll_offset = pll_offset;
148 apmclk->type = type;
149 apmclk->hw.init = &init;
150
151 /* Register the clock */
152 clk = clk_register(dev, &apmclk->hw);
153 if (IS_ERR(clk)) {
154 pr_err("%s: could not register clk %s\n", __func__, name);
155 kfree(apmclk);
156 return NULL;
157 }
158 return clk;
159 }
160
xgene_pllclk_version(struct device_node * np)161 static int xgene_pllclk_version(struct device_node *np)
162 {
163 if (of_device_is_compatible(np, "apm,xgene-socpll-clock"))
164 return 1;
165 if (of_device_is_compatible(np, "apm,xgene-pcppll-clock"))
166 return 1;
167 return 2;
168 }
169
xgene_pllclk_init(struct device_node * np,enum xgene_pll_type pll_type)170 static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_type)
171 {
172 const char *clk_name = np->full_name;
173 struct clk *clk;
174 void __iomem *reg;
175 int version = xgene_pllclk_version(np);
176
177 reg = of_iomap(np, 0);
178 if (!reg) {
179 pr_err("Unable to map CSR register for %pOF\n", np);
180 return;
181 }
182 of_property_read_string(np, "clock-output-names", &clk_name);
183 clk = xgene_register_clk_pll(NULL,
184 clk_name, of_clk_get_parent_name(np, 0),
185 0, reg, 0, pll_type, &clk_lock,
186 version);
187 if (!IS_ERR(clk)) {
188 of_clk_add_provider(np, of_clk_src_simple_get, clk);
189 clk_register_clkdev(clk, clk_name, NULL);
190 pr_debug("Add %s clock PLL\n", clk_name);
191 }
192 }
193
xgene_socpllclk_init(struct device_node * np)194 static void xgene_socpllclk_init(struct device_node *np)
195 {
196 xgene_pllclk_init(np, PLL_TYPE_SOC);
197 }
198
xgene_pcppllclk_init(struct device_node * np)199 static void xgene_pcppllclk_init(struct device_node *np)
200 {
201 xgene_pllclk_init(np, PLL_TYPE_PCP);
202 }
203
204 /**
205 * struct xgene_clk_pmd - PMD clock
206 *
207 * @hw: handle between common and hardware-specific interfaces
208 * @reg: register containing the fractional scale multiplier (scaler)
209 * @shift: shift to the unit bit field
210 * @mask: mask to the unit bit field
211 * @denom: 1/denominator unit
212 * @lock: register lock
213 * @flags: XGENE_CLK_PMD_SCALE_INVERTED - By default the scaler is the value read
214 * from the register plus one. For example,
215 * 0 for (0 + 1) / denom,
216 * 1 for (1 + 1) / denom and etc.
217 * If this flag is set, it is
218 * 0 for (denom - 0) / denom,
219 * 1 for (denom - 1) / denom and etc.
220 */
221 struct xgene_clk_pmd {
222 struct clk_hw hw;
223 void __iomem *reg;
224 u8 shift;
225 u32 mask;
226 u64 denom;
227 u32 flags;
228 spinlock_t *lock;
229 };
230
231 #define to_xgene_clk_pmd(_hw) container_of(_hw, struct xgene_clk_pmd, hw)
232
233 #define XGENE_CLK_PMD_SCALE_INVERTED BIT(0)
234 #define XGENE_CLK_PMD_SHIFT 8
235 #define XGENE_CLK_PMD_WIDTH 3
236
xgene_clk_pmd_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)237 static unsigned long xgene_clk_pmd_recalc_rate(struct clk_hw *hw,
238 unsigned long parent_rate)
239 {
240 struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
241 unsigned long flags = 0;
242 u64 ret, scale;
243 u32 val;
244
245 if (fd->lock)
246 spin_lock_irqsave(fd->lock, flags);
247 else
248 __acquire(fd->lock);
249
250 val = readl(fd->reg);
251
252 if (fd->lock)
253 spin_unlock_irqrestore(fd->lock, flags);
254 else
255 __release(fd->lock);
256
257 ret = (u64)parent_rate;
258
259 scale = (val & fd->mask) >> fd->shift;
260 if (fd->flags & XGENE_CLK_PMD_SCALE_INVERTED)
261 scale = fd->denom - scale;
262 else
263 scale++;
264
265 /* freq = parent_rate * scaler / denom */
266 do_div(ret, fd->denom);
267 ret *= scale;
268 if (ret == 0)
269 ret = (u64)parent_rate;
270
271 return ret;
272 }
273
xgene_clk_pmd_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)274 static int xgene_clk_pmd_determine_rate(struct clk_hw *hw,
275 struct clk_rate_request *req)
276 {
277 struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
278 u64 ret, scale;
279
280 if (!req->rate || req->rate >= req->best_parent_rate) {
281 req->rate = req->best_parent_rate;
282
283 return 0;
284 }
285
286 /* freq = parent_rate * scaler / denom */
287 ret = req->rate * fd->denom;
288 scale = DIV_ROUND_UP_ULL(ret, req->best_parent_rate);
289
290 ret = (u64)req->best_parent_rate * scale;
291 do_div(ret, fd->denom);
292
293 req->rate = ret;
294
295 return 0;
296 }
297
xgene_clk_pmd_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)298 static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
299 unsigned long parent_rate)
300 {
301 struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
302 unsigned long flags = 0;
303 u64 scale, ret;
304 u32 val;
305
306 /*
307 * Compute the scaler:
308 *
309 * freq = parent_rate * scaler / denom, or
310 * scaler = freq * denom / parent_rate
311 */
312 ret = rate * fd->denom;
313 scale = DIV_ROUND_UP_ULL(ret, (u64)parent_rate);
314
315 /* Check if inverted */
316 if (fd->flags & XGENE_CLK_PMD_SCALE_INVERTED)
317 scale = fd->denom - scale;
318 else
319 scale--;
320
321 if (fd->lock)
322 spin_lock_irqsave(fd->lock, flags);
323 else
324 __acquire(fd->lock);
325
326 val = readl(fd->reg);
327 val &= ~fd->mask;
328 val |= (scale << fd->shift);
329 writel(val, fd->reg);
330
331 if (fd->lock)
332 spin_unlock_irqrestore(fd->lock, flags);
333 else
334 __release(fd->lock);
335
336 return 0;
337 }
338
339 static const struct clk_ops xgene_clk_pmd_ops = {
340 .recalc_rate = xgene_clk_pmd_recalc_rate,
341 .determine_rate = xgene_clk_pmd_determine_rate,
342 .set_rate = xgene_clk_pmd_set_rate,
343 };
344
345 static struct clk *
xgene_register_clk_pmd(struct device * dev,const char * name,const char * parent_name,unsigned long flags,void __iomem * reg,u8 shift,u8 width,u64 denom,u32 clk_flags,spinlock_t * lock)346 xgene_register_clk_pmd(struct device *dev,
347 const char *name, const char *parent_name,
348 unsigned long flags, void __iomem *reg, u8 shift,
349 u8 width, u64 denom, u32 clk_flags, spinlock_t *lock)
350 {
351 struct xgene_clk_pmd *fd;
352 struct clk_init_data init;
353 struct clk *clk;
354
355 fd = kzalloc(sizeof(*fd), GFP_KERNEL);
356 if (!fd)
357 return ERR_PTR(-ENOMEM);
358
359 init.name = name;
360 init.ops = &xgene_clk_pmd_ops;
361 init.flags = flags;
362 init.parent_names = parent_name ? &parent_name : NULL;
363 init.num_parents = parent_name ? 1 : 0;
364
365 fd->reg = reg;
366 fd->shift = shift;
367 fd->mask = (BIT(width) - 1) << shift;
368 fd->denom = denom;
369 fd->flags = clk_flags;
370 fd->lock = lock;
371 fd->hw.init = &init;
372
373 clk = clk_register(dev, &fd->hw);
374 if (IS_ERR(clk)) {
375 pr_err("%s: could not register clk %s\n", __func__, name);
376 kfree(fd);
377 return NULL;
378 }
379
380 return clk;
381 }
382
xgene_pmdclk_init(struct device_node * np)383 static void xgene_pmdclk_init(struct device_node *np)
384 {
385 const char *clk_name = np->full_name;
386 void __iomem *csr_reg;
387 struct resource res;
388 struct clk *clk;
389 u64 denom;
390 u32 flags = 0;
391 int rc;
392
393 /* Check if the entry is disabled */
394 if (!of_device_is_available(np))
395 return;
396
397 /* Parse the DTS register for resource */
398 rc = of_address_to_resource(np, 0, &res);
399 if (rc != 0) {
400 pr_err("no DTS register for %pOF\n", np);
401 return;
402 }
403 csr_reg = of_iomap(np, 0);
404 if (!csr_reg) {
405 pr_err("Unable to map resource for %pOF\n", np);
406 return;
407 }
408 of_property_read_string(np, "clock-output-names", &clk_name);
409
410 denom = BIT(XGENE_CLK_PMD_WIDTH);
411 flags |= XGENE_CLK_PMD_SCALE_INVERTED;
412
413 clk = xgene_register_clk_pmd(NULL, clk_name,
414 of_clk_get_parent_name(np, 0), 0,
415 csr_reg, XGENE_CLK_PMD_SHIFT,
416 XGENE_CLK_PMD_WIDTH, denom,
417 flags, &clk_lock);
418 if (!IS_ERR(clk)) {
419 of_clk_add_provider(np, of_clk_src_simple_get, clk);
420 clk_register_clkdev(clk, clk_name, NULL);
421 pr_debug("Add %s clock\n", clk_name);
422 } else {
423 if (csr_reg)
424 iounmap(csr_reg);
425 }
426 }
427
428 /* IP Clock */
429 struct xgene_dev_parameters {
430 void __iomem *csr_reg; /* CSR for IP clock */
431 u32 reg_clk_offset; /* Offset to clock enable CSR */
432 u32 reg_clk_mask; /* Mask bit for clock enable */
433 u32 reg_csr_offset; /* Offset to CSR reset */
434 u32 reg_csr_mask; /* Mask bit for disable CSR reset */
435 void __iomem *divider_reg; /* CSR for divider */
436 u32 reg_divider_offset; /* Offset to divider register */
437 u32 reg_divider_shift; /* Bit shift to divider field */
438 u32 reg_divider_width; /* Width of the bit to divider field */
439 };
440
441 struct xgene_clk {
442 struct clk_hw hw;
443 spinlock_t *lock;
444 struct xgene_dev_parameters param;
445 };
446
447 #define to_xgene_clk(_hw) container_of(_hw, struct xgene_clk, hw)
448
xgene_clk_enable(struct clk_hw * hw)449 static int xgene_clk_enable(struct clk_hw *hw)
450 {
451 struct xgene_clk *pclk = to_xgene_clk(hw);
452 unsigned long flags = 0;
453 u32 data;
454
455 if (pclk->lock)
456 spin_lock_irqsave(pclk->lock, flags);
457
458 if (pclk->param.csr_reg) {
459 pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
460 /* First enable the clock */
461 data = xgene_clk_read(pclk->param.csr_reg +
462 pclk->param.reg_clk_offset);
463 data |= pclk->param.reg_clk_mask;
464 xgene_clk_write(data, pclk->param.csr_reg +
465 pclk->param.reg_clk_offset);
466 pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n",
467 clk_hw_get_name(hw),
468 pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
469 data);
470
471 /* Second enable the CSR */
472 data = xgene_clk_read(pclk->param.csr_reg +
473 pclk->param.reg_csr_offset);
474 data &= ~pclk->param.reg_csr_mask;
475 xgene_clk_write(data, pclk->param.csr_reg +
476 pclk->param.reg_csr_offset);
477 pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n",
478 clk_hw_get_name(hw),
479 pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
480 data);
481 }
482
483 if (pclk->lock)
484 spin_unlock_irqrestore(pclk->lock, flags);
485
486 return 0;
487 }
488
xgene_clk_disable(struct clk_hw * hw)489 static void xgene_clk_disable(struct clk_hw *hw)
490 {
491 struct xgene_clk *pclk = to_xgene_clk(hw);
492 unsigned long flags = 0;
493 u32 data;
494
495 if (pclk->lock)
496 spin_lock_irqsave(pclk->lock, flags);
497
498 if (pclk->param.csr_reg) {
499 pr_debug("%s clock disabled\n", clk_hw_get_name(hw));
500 /* First put the CSR in reset */
501 data = xgene_clk_read(pclk->param.csr_reg +
502 pclk->param.reg_csr_offset);
503 data |= pclk->param.reg_csr_mask;
504 xgene_clk_write(data, pclk->param.csr_reg +
505 pclk->param.reg_csr_offset);
506
507 /* Second disable the clock */
508 data = xgene_clk_read(pclk->param.csr_reg +
509 pclk->param.reg_clk_offset);
510 data &= ~pclk->param.reg_clk_mask;
511 xgene_clk_write(data, pclk->param.csr_reg +
512 pclk->param.reg_clk_offset);
513 }
514
515 if (pclk->lock)
516 spin_unlock_irqrestore(pclk->lock, flags);
517 }
518
xgene_clk_is_enabled(struct clk_hw * hw)519 static int xgene_clk_is_enabled(struct clk_hw *hw)
520 {
521 struct xgene_clk *pclk = to_xgene_clk(hw);
522 u32 data = 0;
523
524 if (pclk->param.csr_reg) {
525 pr_debug("%s clock checking\n", clk_hw_get_name(hw));
526 data = xgene_clk_read(pclk->param.csr_reg +
527 pclk->param.reg_clk_offset);
528 pr_debug("%s clock is %s\n", clk_hw_get_name(hw),
529 str_enabled_disabled(data & pclk->param.reg_clk_mask));
530 } else {
531 return 1;
532 }
533
534 return data & pclk->param.reg_clk_mask ? 1 : 0;
535 }
536
xgene_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)537 static unsigned long xgene_clk_recalc_rate(struct clk_hw *hw,
538 unsigned long parent_rate)
539 {
540 struct xgene_clk *pclk = to_xgene_clk(hw);
541 u32 data;
542
543 if (pclk->param.divider_reg) {
544 data = xgene_clk_read(pclk->param.divider_reg +
545 pclk->param.reg_divider_offset);
546 data >>= pclk->param.reg_divider_shift;
547 data &= (1 << pclk->param.reg_divider_width) - 1;
548
549 pr_debug("%s clock recalc rate %ld parent %ld\n",
550 clk_hw_get_name(hw),
551 parent_rate / data, parent_rate);
552
553 return parent_rate / data;
554 } else {
555 pr_debug("%s clock recalc rate %ld parent %ld\n",
556 clk_hw_get_name(hw), parent_rate, parent_rate);
557 return parent_rate;
558 }
559 }
560
xgene_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)561 static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
562 unsigned long parent_rate)
563 {
564 struct xgene_clk *pclk = to_xgene_clk(hw);
565 unsigned long flags = 0;
566 u32 data;
567 u32 divider;
568 u32 divider_save;
569
570 if (pclk->lock)
571 spin_lock_irqsave(pclk->lock, flags);
572
573 if (pclk->param.divider_reg) {
574 /* Let's compute the divider */
575 if (rate > parent_rate)
576 rate = parent_rate;
577 divider_save = divider = parent_rate / rate; /* Rounded down */
578 divider &= (1 << pclk->param.reg_divider_width) - 1;
579 divider <<= pclk->param.reg_divider_shift;
580
581 /* Set new divider */
582 data = xgene_clk_read(pclk->param.divider_reg +
583 pclk->param.reg_divider_offset);
584 data &= ~(((1 << pclk->param.reg_divider_width) - 1)
585 << pclk->param.reg_divider_shift);
586 data |= divider;
587 xgene_clk_write(data, pclk->param.divider_reg +
588 pclk->param.reg_divider_offset);
589 pr_debug("%s clock set rate %ld\n", clk_hw_get_name(hw),
590 parent_rate / divider_save);
591 } else {
592 divider_save = 1;
593 }
594
595 if (pclk->lock)
596 spin_unlock_irqrestore(pclk->lock, flags);
597
598 return parent_rate / divider_save;
599 }
600
xgene_clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)601 static int xgene_clk_determine_rate(struct clk_hw *hw,
602 struct clk_rate_request *req)
603 {
604 struct xgene_clk *pclk = to_xgene_clk(hw);
605 unsigned long parent_rate = req->best_parent_rate;
606 u32 divider;
607
608 if (pclk->param.divider_reg) {
609 /* Let's compute the divider */
610 if (req->rate > parent_rate)
611 req->rate = parent_rate;
612 divider = parent_rate / req->rate; /* Rounded down */
613 } else {
614 divider = 1;
615 }
616
617 req->rate = parent_rate / divider;
618
619 return 0;
620 }
621
622 static const struct clk_ops xgene_clk_ops = {
623 .enable = xgene_clk_enable,
624 .disable = xgene_clk_disable,
625 .is_enabled = xgene_clk_is_enabled,
626 .recalc_rate = xgene_clk_recalc_rate,
627 .set_rate = xgene_clk_set_rate,
628 .determine_rate = xgene_clk_determine_rate,
629 };
630
xgene_register_clk(struct device * dev,const char * name,const char * parent_name,struct xgene_dev_parameters * parameters,spinlock_t * lock)631 static struct clk *xgene_register_clk(struct device *dev,
632 const char *name, const char *parent_name,
633 struct xgene_dev_parameters *parameters, spinlock_t *lock)
634 {
635 struct xgene_clk *apmclk;
636 struct clk *clk;
637 struct clk_init_data init;
638 int rc;
639
640 /* allocate the APM clock structure */
641 apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
642 if (!apmclk)
643 return ERR_PTR(-ENOMEM);
644
645 init.name = name;
646 init.ops = &xgene_clk_ops;
647 init.flags = 0;
648 init.parent_names = parent_name ? &parent_name : NULL;
649 init.num_parents = parent_name ? 1 : 0;
650
651 apmclk->lock = lock;
652 apmclk->hw.init = &init;
653 apmclk->param = *parameters;
654
655 /* Register the clock */
656 clk = clk_register(dev, &apmclk->hw);
657 if (IS_ERR(clk)) {
658 pr_err("%s: could not register clk %s\n", __func__, name);
659 kfree(apmclk);
660 return clk;
661 }
662
663 /* Register the clock for lookup */
664 rc = clk_register_clkdev(clk, name, NULL);
665 if (rc != 0) {
666 pr_err("%s: could not register lookup clk %s\n",
667 __func__, name);
668 }
669 return clk;
670 }
671
xgene_devclk_init(struct device_node * np)672 static void __init xgene_devclk_init(struct device_node *np)
673 {
674 const char *clk_name = np->full_name;
675 struct clk *clk;
676 struct resource res;
677 int rc;
678 struct xgene_dev_parameters parameters;
679 int i;
680
681 /* Check if the entry is disabled */
682 if (!of_device_is_available(np))
683 return;
684
685 /* Parse the DTS register for resource */
686 parameters.csr_reg = NULL;
687 parameters.divider_reg = NULL;
688 for (i = 0; i < 2; i++) {
689 void __iomem *map_res;
690 rc = of_address_to_resource(np, i, &res);
691 if (rc != 0) {
692 if (i == 0) {
693 pr_err("no DTS register for %pOF\n", np);
694 return;
695 }
696 break;
697 }
698 map_res = of_iomap(np, i);
699 if (!map_res) {
700 pr_err("Unable to map resource %d for %pOF\n", i, np);
701 goto err;
702 }
703 if (strcmp(res.name, "div-reg") == 0)
704 parameters.divider_reg = map_res;
705 else /* if (strcmp(res->name, "csr-reg") == 0) */
706 parameters.csr_reg = map_res;
707 }
708 if (of_property_read_u32(np, "csr-offset", ¶meters.reg_csr_offset))
709 parameters.reg_csr_offset = 0;
710 if (of_property_read_u32(np, "csr-mask", ¶meters.reg_csr_mask))
711 parameters.reg_csr_mask = 0xF;
712 if (of_property_read_u32(np, "enable-offset",
713 ¶meters.reg_clk_offset))
714 parameters.reg_clk_offset = 0x8;
715 if (of_property_read_u32(np, "enable-mask", ¶meters.reg_clk_mask))
716 parameters.reg_clk_mask = 0xF;
717 if (of_property_read_u32(np, "divider-offset",
718 ¶meters.reg_divider_offset))
719 parameters.reg_divider_offset = 0;
720 if (of_property_read_u32(np, "divider-width",
721 ¶meters.reg_divider_width))
722 parameters.reg_divider_width = 0;
723 if (of_property_read_u32(np, "divider-shift",
724 ¶meters.reg_divider_shift))
725 parameters.reg_divider_shift = 0;
726 of_property_read_string(np, "clock-output-names", &clk_name);
727
728 clk = xgene_register_clk(NULL, clk_name,
729 of_clk_get_parent_name(np, 0), ¶meters, &clk_lock);
730 if (IS_ERR(clk))
731 goto err;
732 pr_debug("Add %s clock\n", clk_name);
733 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
734 if (rc != 0)
735 pr_err("%s: could register provider clk %pOF\n", __func__, np);
736
737 return;
738
739 err:
740 if (parameters.csr_reg)
741 iounmap(parameters.csr_reg);
742 if (parameters.divider_reg)
743 iounmap(parameters.divider_reg);
744 }
745
746 CLK_OF_DECLARE(xgene_socpll_clock, "apm,xgene-socpll-clock", xgene_socpllclk_init);
747 CLK_OF_DECLARE(xgene_pcppll_clock, "apm,xgene-pcppll-clock", xgene_pcppllclk_init);
748 CLK_OF_DECLARE(xgene_pmd_clock, "apm,xgene-pmd-clock", xgene_pmdclk_init);
749 CLK_OF_DECLARE(xgene_socpll_v2_clock, "apm,xgene-socpll-v2-clock",
750 xgene_socpllclk_init);
751 CLK_OF_DECLARE(xgene_pcppll_v2_clock, "apm,xgene-pcppll-v2-clock",
752 xgene_pcppllclk_init);
753 CLK_OF_DECLARE(xgene_dev_clock, "apm,xgene-device-clock", xgene_devclk_init);
754