1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Xilinx 'Clocking Wizard' driver
4 *
5 * Copyright (C) 2013 - 2021 Xilinx
6 *
7 * Sören Brinkmann <soren.brinkmann@xilinx.com>
8 *
9 */
10
11 #include <linux/bitfield.h>
12 #include <linux/platform_device.h>
13 #include <linux/clk.h>
14 #include <linux/clk-provider.h>
15 #include <linux/slab.h>
16 #include <linux/io.h>
17 #include <linux/of.h>
18 #include <linux/math64.h>
19 #include <linux/module.h>
20 #include <linux/overflow.h>
21 #include <linux/err.h>
22 #include <linux/iopoll.h>
23
24 #define WZRD_NUM_OUTPUTS 7
25 #define WZRD_ACLK_MAX_FREQ 250000000UL
26
27 #define WZRD_CLK_CFG_REG(v, n) (0x200 + 0x130 * (v) + 4 * (n))
28
29 #define WZRD_CLKOUT0_FRAC_EN BIT(18)
30 #define WZRD_CLKFBOUT_1 0
31 #define WZRD_CLKFBOUT_2 1
32 #define WZRD_CLKOUT0_1 2
33 #define WZRD_CLKOUT0_2 3
34 #define WZRD_DESKEW_2 20
35 #define WZRD_DIVCLK 21
36 #define WZRD_CLKFBOUT_4 51
37 #define WZRD_CLKFBOUT_3 48
38 #define WZRD_DUTY_CYCLE 2
39 #define WZRD_O_DIV 4
40
41 #define WZRD_CLKFBOUT_FRAC_EN BIT(1)
42 #define WZRD_CLKFBOUT_PREDIV2 (BIT(11) | BIT(12) | BIT(9))
43 #define WZRD_MULT_PREDIV2 (BIT(10) | BIT(9) | BIT(12))
44 #define WZRD_CLKFBOUT_EDGE BIT(8)
45 #define WZRD_P5EN BIT(13)
46 #define WZRD_P5EN_SHIFT 13
47 #define WZRD_P5FEDGE BIT(15)
48 #define WZRD_DIVCLK_EDGE BIT(10)
49 #define WZRD_P5FEDGE_SHIFT 15
50 #define WZRD_CLKOUT0_PREDIV2 BIT(11)
51 #define WZRD_EDGE_SHIFT 8
52
53 #define WZRD_CLKFBOUT_MULT_SHIFT 8
54 #define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT)
55 #define WZRD_CLKFBOUT_MULT_FRAC_MASK GENMASK(25, 16)
56 #define WZRD_CLKFBOUT_O_MASK GENMASK(7, 0)
57 #define WZRD_CLKFBOUT_L_SHIFT 0
58 #define WZRD_CLKFBOUT_H_SHIFT 8
59 #define WZRD_CLKFBOUT_L_MASK GENMASK(7, 0)
60 #define WZRD_CLKFBOUT_H_MASK GENMASK(15, 8)
61 #define WZRD_CLKFBOUT_FRAC_SHIFT 16
62 #define WZRD_CLKFBOUT_FRAC_MASK (0x3ff << WZRD_CLKFBOUT_FRAC_SHIFT)
63 #define WZRD_VERSAL_FRAC_MASK GENMASK(5, 0)
64 #define WZRD_DIVCLK_DIVIDE_SHIFT 0
65 #define WZRD_DIVCLK_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
66 #define WZRD_CLKOUT_DIVIDE_SHIFT 0
67 #define WZRD_CLKOUT_DIVIDE_WIDTH 8
68 #define WZRD_CLKOUT_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
69 #define WZRD_CLKOUT_FRAC_SHIFT 8
70 #define WZRD_CLKOUT_FRAC_MASK 0x3ff
71 #define WZRD_CLKOUT0_FRAC_MASK GENMASK(17, 8)
72
73 #define WZRD_DR_MAX_INT_DIV_VALUE 255
74 #define WZRD_DR_STATUS_REG_OFFSET 0x04
75 #define WZRD_DR_LOCK_BIT_MASK 0x00000001
76 #define WZRD_DR_INIT_REG_OFFSET 0x25C
77 #define WZRD_DR_INIT_VERSAL_OFFSET 0x14
78 #define WZRD_DR_DIV_TO_PHASE_OFFSET 4
79 #define WZRD_DR_BEGIN_DYNA_RECONF 0x03
80 #define WZRD_DR_BEGIN_DYNA_RECONF_5_2 0x07
81 #define WZRD_DR_BEGIN_DYNA_RECONF1_5_2 0x02
82
83 #define WZRD_USEC_POLL 10
84 #define WZRD_TIMEOUT_POLL 1000
85 #define WZRD_FRAC_GRADIENT 64
86 #define PREDIV2_MULT 2
87
88 /* Divider limits, from UG572 Table 3-4 for Ultrascale+ */
89 #define DIV_O 0x01
90 #define DIV_ALL 0x03
91
92 #define WZRD_M_MIN 2ULL
93 #define WZRD_M_MAX 128ULL
94 #define WZRD_D_MIN 1ULL
95 #define WZRD_D_MAX 106ULL
96 #define WZRD_VCO_MIN 800000000ULL
97 #define WZRD_VCO_MAX 1600000000ULL
98 #define WZRD_O_MIN 2ULL
99 #define WZRD_O_MAX 128ULL
100 #define VER_WZRD_M_MIN 4
101 #define VER_WZRD_M_MAX 432
102 #define VER_WZRD_D_MIN 1
103 #define VER_WZRD_D_MAX 123
104 #define VER_WZRD_VCO_MIN 2160000000ULL
105 #define VER_WZRD_VCO_MAX 4320000000ULL
106 #define VER_WZRD_O_MIN 2
107 #define VER_WZRD_O_MAX 511
108 #define WZRD_MIN_ERR 20000
109 #define WZRD_FRAC_POINTS 1000
110
111 /* Get the mask from width */
112 #define div_mask(width) ((1 << (width)) - 1)
113
114 /* Extract divider instance from clock hardware instance */
115 #define to_clk_wzrd_divider(_hw) container_of(_hw, struct clk_wzrd_divider, hw)
116
117 enum clk_wzrd_int_clks {
118 wzrd_clk_mul,
119 wzrd_clk_mul_div,
120 wzrd_clk_mul_frac,
121 wzrd_clk_int_max
122 };
123
124 /**
125 * struct clk_wzrd - Clock wizard private data structure
126 *
127 * @nb: Notifier block
128 * @base: Memory base
129 * @clk_in1: Handle to input clock 'clk_in1'
130 * @axi_clk: Handle to input clock 's_axi_aclk'
131 * @clks_internal: Internal clocks
132 * @speed_grade: Speed grade of the device
133 * @suspended: Flag indicating power state of the device
134 * @clk_data: Output clock data
135 */
136 struct clk_wzrd {
137 struct notifier_block nb;
138 void __iomem *base;
139 struct clk *clk_in1;
140 struct clk *axi_clk;
141 struct clk_hw *clks_internal[wzrd_clk_int_max];
142 unsigned int speed_grade;
143 bool suspended;
144 struct clk_hw_onecell_data clk_data;
145 };
146
147 /**
148 * struct clk_wzrd_divider - clock divider specific to clk_wzrd
149 *
150 * @hw: handle between common and hardware-specific interfaces
151 * @base: base address of register containing the divider
152 * @offset: offset address of register containing the divider
153 * @shift: shift to the divider bit field
154 * @width: width of the divider bit field
155 * @flags: clk_wzrd divider flags
156 * @table: array of value/divider pairs, last entry should have div = 0
157 * @m: value of the multiplier
158 * @m_frac: fractional value of the multiplier
159 * @d: value of the common divider
160 * @o: value of the leaf divider
161 * @o_frac: value of the fractional leaf divider
162 * @lock: register lock
163 */
164 struct clk_wzrd_divider {
165 struct clk_hw hw;
166 void __iomem *base;
167 u16 offset;
168 u8 shift;
169 u8 width;
170 u8 flags;
171 const struct clk_div_table *table;
172 u32 m;
173 u32 m_frac;
174 u32 d;
175 u32 o;
176 u32 o_frac;
177 spinlock_t *lock; /* divider lock */
178 };
179
180 struct versal_clk_data {
181 bool is_versal;
182 };
183
184 #define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
185
186 /* maximum frequencies for input/output clocks per speed grade */
187 static const unsigned long clk_wzrd_max_freq[] = {
188 800000000UL,
189 933000000UL,
190 1066000000UL
191 };
192
193 /* spin lock variable for clk_wzrd */
194 static DEFINE_SPINLOCK(clkwzrd_lock);
195
clk_wzrd_recalc_rate_ver(struct clk_hw * hw,unsigned long parent_rate)196 static unsigned long clk_wzrd_recalc_rate_ver(struct clk_hw *hw,
197 unsigned long parent_rate)
198 {
199 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
200 void __iomem *div_addr = divider->base + divider->offset;
201 u32 div, p5en, edge, prediv2, all;
202 unsigned int vall, valh;
203
204 edge = !!(readl(div_addr) & WZRD_CLKFBOUT_EDGE);
205 p5en = !!(readl(div_addr) & WZRD_P5EN);
206 prediv2 = !!(readl(div_addr) & WZRD_CLKOUT0_PREDIV2);
207 vall = readl(div_addr + 4) & WZRD_CLKFBOUT_L_MASK;
208 valh = readl(div_addr + 4) >> WZRD_CLKFBOUT_H_SHIFT;
209 all = valh + vall + edge;
210 if (!all)
211 all = 1;
212
213 if (prediv2)
214 div = 2 * all + prediv2 * p5en;
215 else
216 div = all;
217
218 return DIV_ROUND_UP_ULL((u64)parent_rate, div);
219 }
220
clk_wzrd_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)221 static unsigned long clk_wzrd_recalc_rate(struct clk_hw *hw,
222 unsigned long parent_rate)
223 {
224 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
225 void __iomem *div_addr = divider->base + divider->offset;
226 unsigned int val;
227
228 val = readl(div_addr) >> divider->shift;
229 val &= div_mask(divider->width);
230
231 return divider_recalc_rate(hw, parent_rate, val, divider->table,
232 divider->flags, divider->width);
233 }
234
clk_wzrd_ver_dynamic_reconfig(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)235 static int clk_wzrd_ver_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
236 unsigned long parent_rate)
237 {
238 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
239 void __iomem *div_addr = divider->base + divider->offset;
240 u32 value, regh, edged, p5en, p5fedge, regval, regval1;
241 unsigned long flags;
242 int err;
243
244 spin_lock_irqsave(divider->lock, flags);
245
246 value = DIV_ROUND_CLOSEST(parent_rate, rate);
247
248 regh = (value / 4);
249 regval1 = readl(div_addr);
250 regval1 |= WZRD_CLKFBOUT_PREDIV2;
251 regval1 = regval1 & ~(WZRD_CLKFBOUT_EDGE | WZRD_P5EN | WZRD_P5FEDGE);
252 if (value % 4 > 1) {
253 edged = 1;
254 regval1 |= (edged << WZRD_EDGE_SHIFT);
255 }
256 p5fedge = value % 2;
257 p5en = value % 2;
258 regval1 = regval1 | p5en << WZRD_P5EN_SHIFT | p5fedge << WZRD_P5FEDGE_SHIFT;
259 writel(regval1, div_addr);
260
261 regval = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
262 writel(regval, div_addr + 4);
263 /* Check status register */
264 err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
265 value, value & WZRD_DR_LOCK_BIT_MASK,
266 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
267 if (err)
268 goto err_reconfig;
269
270 /* Initiate reconfiguration */
271 writel(WZRD_DR_BEGIN_DYNA_RECONF,
272 divider->base + WZRD_DR_INIT_VERSAL_OFFSET);
273
274 /* Check status register */
275 err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
276 value, value & WZRD_DR_LOCK_BIT_MASK,
277 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
278 err_reconfig:
279 spin_unlock_irqrestore(divider->lock, flags);
280 return err;
281 }
282
clk_wzrd_dynamic_reconfig(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)283 static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
284 unsigned long parent_rate)
285 {
286 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
287 void __iomem *div_addr = divider->base + divider->offset;
288 unsigned long flags;
289 u32 value;
290 int err;
291
292 spin_lock_irqsave(divider->lock, flags);
293
294 value = DIV_ROUND_CLOSEST(parent_rate, rate);
295
296 /* Cap the value to max */
297 min_t(u32, value, WZRD_DR_MAX_INT_DIV_VALUE);
298
299 /* Set divisor and clear phase offset */
300 writel(value, div_addr);
301 writel(0x00, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
302
303 /* Check status register */
304 err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
305 value, value & WZRD_DR_LOCK_BIT_MASK,
306 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
307 if (err)
308 goto err_reconfig;
309
310 /* Initiate reconfiguration */
311 writel(WZRD_DR_BEGIN_DYNA_RECONF_5_2,
312 divider->base + WZRD_DR_INIT_REG_OFFSET);
313 writel(WZRD_DR_BEGIN_DYNA_RECONF1_5_2,
314 divider->base + WZRD_DR_INIT_REG_OFFSET);
315
316 /* Check status register */
317 err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
318 value, value & WZRD_DR_LOCK_BIT_MASK,
319 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
320 err_reconfig:
321 spin_unlock_irqrestore(divider->lock, flags);
322 return err;
323 }
324
clk_wzrd_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)325 static int clk_wzrd_determine_rate(struct clk_hw *hw,
326 struct clk_rate_request *req)
327 {
328 u8 div;
329
330 /*
331 * since we don't change parent rate we just round rate to closest
332 * achievable
333 */
334 div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
335
336 req->rate = req->best_parent_rate / div;
337
338 return 0;
339 }
340
clk_wzrd_get_divisors_ver(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)341 static int clk_wzrd_get_divisors_ver(struct clk_hw *hw, unsigned long rate,
342 unsigned long parent_rate)
343 {
344 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
345 u64 vco_freq, freq, diff, vcomin, vcomax, best_diff = -1ULL;
346 u32 m, d, o;
347 u32 mmin, mmax, dmin, dmax, omin, omax;
348
349 mmin = VER_WZRD_M_MIN;
350 mmax = VER_WZRD_M_MAX;
351 dmin = VER_WZRD_D_MIN;
352 dmax = VER_WZRD_D_MAX;
353 omin = VER_WZRD_O_MIN;
354 omax = VER_WZRD_O_MAX;
355 vcomin = VER_WZRD_VCO_MIN;
356 vcomax = VER_WZRD_VCO_MAX;
357
358 for (m = mmin; m <= mmax; m++) {
359 for (d = dmin; d <= dmax; d++) {
360 vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
361 if (vco_freq < vcomin || vco_freq > vcomax)
362 continue;
363
364 o = DIV_ROUND_CLOSEST_ULL(vco_freq, rate);
365 if (o < omin || o > omax)
366 continue;
367 freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
368 diff = abs(freq - rate);
369
370 if (diff < best_diff) {
371 best_diff = diff;
372 divider->m = m;
373 divider->d = d;
374 divider->o = o;
375 if (!diff)
376 return 0;
377 }
378 }
379 }
380 return 0;
381 }
382
clk_wzrd_get_divisors(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)383 static int clk_wzrd_get_divisors(struct clk_hw *hw, unsigned long rate,
384 unsigned long parent_rate)
385 {
386 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
387 u64 vco_freq, freq, diff, vcomin, vcomax, best_diff = -1ULL;
388 u64 m, d, o;
389 u64 mmin, mmax, dmin, dmax, omin, omax, mdmin, mdmax;
390
391 mmin = WZRD_M_MIN << 3;
392 mmax = WZRD_M_MAX << 3;
393 dmin = WZRD_D_MIN;
394 dmax = WZRD_D_MAX;
395 omin = WZRD_O_MIN << 3;
396 omax = WZRD_O_MAX << 3;
397 vcomin = WZRD_VCO_MIN << 3;
398 vcomax = WZRD_VCO_MAX << 3;
399
400 for (m = mmin; m <= mmax; m++) {
401 mdmin = max(dmin, div64_u64(parent_rate * m + vcomax / 2, vcomax));
402 mdmax = min(dmax, div64_u64(parent_rate * m + vcomin / 2, vcomin));
403 for (d = mdmin; d <= mdmax; d++) {
404 vco_freq = DIV_ROUND_CLOSEST_ULL((parent_rate * m), d);
405 o = DIV_ROUND_CLOSEST_ULL(vco_freq, rate);
406 if (o < omin || o > omax)
407 continue;
408 freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
409 diff = freq - rate;
410 if (diff < best_diff) {
411 best_diff = diff;
412 divider->m = m >> 3;
413 divider->m_frac = (m - (divider->m << 3)) * 125;
414 divider->d = d;
415 divider->o = o >> 3;
416 divider->o_frac = (o - (divider->o << 3)) * 125;
417 }
418 }
419 }
420 return best_diff < WZRD_MIN_ERR ? 0 : -EBUSY;
421 }
422
clk_wzrd_reconfig(struct clk_wzrd_divider * divider,void __iomem * div_addr)423 static int clk_wzrd_reconfig(struct clk_wzrd_divider *divider, void __iomem *div_addr)
424 {
425 u32 value;
426 int err;
427
428 /* Check status register */
429 err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
430 value & WZRD_DR_LOCK_BIT_MASK,
431 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
432 if (err)
433 return -ETIMEDOUT;
434
435 /* Initiate reconfiguration */
436 writel(WZRD_DR_BEGIN_DYNA_RECONF, div_addr);
437 /* Check status register */
438 return readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
439 value & WZRD_DR_LOCK_BIT_MASK,
440 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
441 }
442
clk_wzrd_dynamic_ver_all_nolock(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)443 static int clk_wzrd_dynamic_ver_all_nolock(struct clk_hw *hw, unsigned long rate,
444 unsigned long parent_rate)
445 {
446 u32 regh, edged, p5en, p5fedge, value2, m, regval, regval1, value;
447 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
448 void __iomem *div_addr;
449 int err;
450
451 err = clk_wzrd_get_divisors_ver(hw, rate, parent_rate);
452 if (err)
453 return err;
454
455 writel(0, divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_4));
456
457 m = divider->m;
458 edged = m % WZRD_DUTY_CYCLE;
459 regh = m / WZRD_DUTY_CYCLE;
460 regval1 = readl(divider->base + WZRD_CLK_CFG_REG(1,
461 WZRD_CLKFBOUT_1));
462 regval1 |= WZRD_MULT_PREDIV2;
463 if (edged)
464 regval1 = regval1 | WZRD_CLKFBOUT_EDGE;
465 else
466 regval1 = regval1 & ~WZRD_CLKFBOUT_EDGE;
467
468 writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
469 WZRD_CLKFBOUT_1));
470 regval1 = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
471 writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
472 WZRD_CLKFBOUT_2));
473
474 value2 = divider->d;
475 edged = value2 % WZRD_DUTY_CYCLE;
476 regh = (value2 / WZRD_DUTY_CYCLE);
477 regval1 = FIELD_PREP(WZRD_DIVCLK_EDGE, edged);
478 writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
479 WZRD_DESKEW_2));
480 regval1 = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
481 writel(regval1, divider->base + WZRD_CLK_CFG_REG(1, WZRD_DIVCLK));
482
483 value = divider->o;
484 regh = value / WZRD_O_DIV;
485 regval1 = readl(divider->base + WZRD_CLK_CFG_REG(1,
486 WZRD_CLKOUT0_1));
487 regval1 |= WZRD_CLKFBOUT_PREDIV2;
488 regval1 = regval1 & ~(WZRD_CLKFBOUT_EDGE | WZRD_P5EN | WZRD_P5FEDGE);
489
490 if (value % WZRD_O_DIV > 1) {
491 edged = 1;
492 regval1 |= edged << WZRD_CLKFBOUT_H_SHIFT;
493 }
494
495 p5fedge = value % WZRD_DUTY_CYCLE;
496 p5en = value % WZRD_DUTY_CYCLE;
497
498 regval1 = regval1 | FIELD_PREP(WZRD_P5EN, p5en) | FIELD_PREP(WZRD_P5FEDGE, p5fedge);
499 writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
500 WZRD_CLKOUT0_1));
501 regval = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
502 writel(regval, divider->base + WZRD_CLK_CFG_REG(1,
503 WZRD_CLKOUT0_2));
504 div_addr = divider->base + WZRD_DR_INIT_VERSAL_OFFSET;
505
506 return clk_wzrd_reconfig(divider, div_addr);
507 }
508
clk_wzrd_dynamic_all_nolock(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)509 static int clk_wzrd_dynamic_all_nolock(struct clk_hw *hw, unsigned long rate,
510 unsigned long parent_rate)
511 {
512 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
513 void __iomem *div_addr;
514 u32 reg;
515 int err;
516
517 err = clk_wzrd_get_divisors(hw, rate, parent_rate);
518 if (err)
519 return err;
520
521 reg = FIELD_PREP(WZRD_CLKOUT_DIVIDE_MASK, divider->o) |
522 FIELD_PREP(WZRD_CLKOUT0_FRAC_MASK, divider->o_frac);
523
524 writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 2));
525 reg = FIELD_PREP(WZRD_CLKFBOUT_MULT_MASK, divider->m) |
526 FIELD_PREP(WZRD_CLKFBOUT_MULT_FRAC_MASK, divider->m_frac) |
527 FIELD_PREP(WZRD_DIVCLK_DIVIDE_MASK, divider->d);
528 writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 0));
529 writel(0, divider->base + WZRD_CLK_CFG_REG(0, 3));
530 div_addr = divider->base + WZRD_DR_INIT_REG_OFFSET;
531 return clk_wzrd_reconfig(divider, div_addr);
532 }
533
clk_wzrd_dynamic_all(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)534 static int clk_wzrd_dynamic_all(struct clk_hw *hw, unsigned long rate,
535 unsigned long parent_rate)
536 {
537 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
538 unsigned long flags;
539 int ret;
540
541 spin_lock_irqsave(divider->lock, flags);
542
543 ret = clk_wzrd_dynamic_all_nolock(hw, rate, parent_rate);
544
545 spin_unlock_irqrestore(divider->lock, flags);
546
547 return ret;
548 }
549
clk_wzrd_dynamic_all_ver(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)550 static int clk_wzrd_dynamic_all_ver(struct clk_hw *hw, unsigned long rate,
551 unsigned long parent_rate)
552 {
553 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
554 unsigned long flags;
555 int ret;
556
557 spin_lock_irqsave(divider->lock, flags);
558
559 ret = clk_wzrd_dynamic_ver_all_nolock(hw, rate, parent_rate);
560
561 spin_unlock_irqrestore(divider->lock, flags);
562
563 return ret;
564 }
565
clk_wzrd_recalc_rate_all(struct clk_hw * hw,unsigned long parent_rate)566 static unsigned long clk_wzrd_recalc_rate_all(struct clk_hw *hw,
567 unsigned long parent_rate)
568 {
569 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
570 u32 m, d, o, reg, f, mf;
571 u64 mul;
572
573 reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 0));
574 d = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
575 m = FIELD_GET(WZRD_CLKFBOUT_MULT_MASK, reg);
576 mf = FIELD_GET(WZRD_CLKFBOUT_MULT_FRAC_MASK, reg);
577 reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 2));
578 o = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
579 f = FIELD_GET(WZRD_CLKOUT0_FRAC_MASK, reg);
580
581 mul = m * 1000 + mf;
582 return DIV_ROUND_CLOSEST_ULL(parent_rate * mul, d * (o * 1000 + f));
583 }
584
clk_wzrd_recalc_rate_all_ver(struct clk_hw * hw,unsigned long parent_rate)585 static unsigned long clk_wzrd_recalc_rate_all_ver(struct clk_hw *hw,
586 unsigned long parent_rate)
587 {
588 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
589 u32 edged, div2, p5en, edge, prediv2, all, regl, regh, mult;
590 u32 div, reg;
591
592 edge = !!(readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_1)) &
593 WZRD_CLKFBOUT_EDGE);
594
595 reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_2));
596 regl = FIELD_GET(WZRD_CLKFBOUT_L_MASK, reg);
597 regh = FIELD_GET(WZRD_CLKFBOUT_H_MASK, reg);
598
599 mult = regl + regh + edge;
600 if (!mult)
601 mult = 1;
602
603 regl = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_4)) &
604 WZRD_CLKFBOUT_FRAC_EN;
605 if (regl) {
606 regl = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_3))
607 & WZRD_VERSAL_FRAC_MASK;
608 mult = mult * WZRD_FRAC_GRADIENT + regl;
609 parent_rate = DIV_ROUND_CLOSEST((parent_rate * mult), WZRD_FRAC_GRADIENT);
610 } else {
611 parent_rate = parent_rate * mult;
612 }
613
614 /* O Calculation */
615 reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKOUT0_1));
616 edged = FIELD_GET(WZRD_CLKFBOUT_EDGE, reg);
617 p5en = FIELD_GET(WZRD_P5EN, reg);
618 prediv2 = FIELD_GET(WZRD_CLKOUT0_PREDIV2, reg);
619
620 reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKOUT0_2));
621 /* Low time */
622 regl = FIELD_GET(WZRD_CLKFBOUT_L_MASK, reg);
623 /* High time */
624 regh = FIELD_GET(WZRD_CLKFBOUT_H_MASK, reg);
625 all = regh + regl + edged;
626 if (!all)
627 all = 1;
628
629 if (prediv2)
630 div2 = PREDIV2_MULT * all + p5en;
631 else
632 div2 = all;
633
634 /* D calculation */
635 edged = !!(readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_DESKEW_2)) &
636 WZRD_DIVCLK_EDGE);
637 reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_DIVCLK));
638 /* Low time */
639 regl = FIELD_GET(WZRD_CLKFBOUT_L_MASK, reg);
640 /* High time */
641 regh = FIELD_GET(WZRD_CLKFBOUT_H_MASK, reg);
642 div = regl + regh + edged;
643 if (!div)
644 div = 1;
645
646 div = div * div2;
647 return divider_recalc_rate(hw, parent_rate, div, divider->table,
648 divider->flags, divider->width);
649 }
650
clk_wzrd_determine_rate_all(struct clk_hw * hw,struct clk_rate_request * req)651 static int clk_wzrd_determine_rate_all(struct clk_hw *hw,
652 struct clk_rate_request *req)
653 {
654 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
655 u32 m, d, o;
656 int err;
657
658 err = clk_wzrd_get_divisors(hw, req->rate, req->best_parent_rate);
659 if (err)
660 return err;
661
662 m = divider->m;
663 d = divider->d;
664 o = divider->o;
665
666 req->rate = div_u64(req->best_parent_rate * (m * 1000 + divider->m_frac),
667 d * (o * 1000 + divider->o_frac));
668 return 0;
669 }
670
clk_wzrd_ver_determine_rate_all(struct clk_hw * hw,struct clk_rate_request * req)671 static int clk_wzrd_ver_determine_rate_all(struct clk_hw *hw,
672 struct clk_rate_request *req)
673 {
674 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
675 unsigned long int_freq;
676 u32 m, d, o, div, f;
677 int err;
678
679 err = clk_wzrd_get_divisors_ver(hw, req->rate, req->best_parent_rate);
680 if (err)
681 return err;
682
683 m = divider->m;
684 d = divider->d;
685 o = divider->o;
686
687 div = d * o;
688 int_freq = divider_recalc_rate(hw, req->best_parent_rate * m, div,
689 divider->table,
690 divider->flags, divider->width);
691
692 if (req->rate > int_freq) {
693 f = DIV_ROUND_CLOSEST_ULL(req->rate * WZRD_FRAC_POINTS,
694 int_freq);
695 req->rate = DIV_ROUND_CLOSEST(int_freq * f, WZRD_FRAC_POINTS);
696 }
697 return 0;
698 }
699
700 static const struct clk_ops clk_wzrd_ver_divider_ops = {
701 .determine_rate = clk_wzrd_determine_rate,
702 .set_rate = clk_wzrd_ver_dynamic_reconfig,
703 .recalc_rate = clk_wzrd_recalc_rate_ver,
704 };
705
706 static const struct clk_ops clk_wzrd_ver_div_all_ops = {
707 .determine_rate = clk_wzrd_ver_determine_rate_all,
708 .set_rate = clk_wzrd_dynamic_all_ver,
709 .recalc_rate = clk_wzrd_recalc_rate_all_ver,
710 };
711
712 static const struct clk_ops clk_wzrd_clk_divider_ops = {
713 .determine_rate = clk_wzrd_determine_rate,
714 .set_rate = clk_wzrd_dynamic_reconfig,
715 .recalc_rate = clk_wzrd_recalc_rate,
716 };
717
718 static const struct clk_ops clk_wzrd_clk_div_all_ops = {
719 .determine_rate = clk_wzrd_determine_rate_all,
720 .set_rate = clk_wzrd_dynamic_all,
721 .recalc_rate = clk_wzrd_recalc_rate_all,
722 };
723
clk_wzrd_recalc_ratef(struct clk_hw * hw,unsigned long parent_rate)724 static unsigned long clk_wzrd_recalc_ratef(struct clk_hw *hw,
725 unsigned long parent_rate)
726 {
727 unsigned int val;
728 u32 div, frac;
729 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
730 void __iomem *div_addr = divider->base + divider->offset;
731
732 val = readl(div_addr);
733 div = val & div_mask(divider->width);
734 frac = (val >> WZRD_CLKOUT_FRAC_SHIFT) & WZRD_CLKOUT_FRAC_MASK;
735
736 return mult_frac(parent_rate, 1000, (div * 1000) + frac);
737 }
738
clk_wzrd_dynamic_reconfig_f(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)739 static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
740 unsigned long parent_rate)
741 {
742 int err;
743 u32 value, pre;
744 unsigned long rate_div, f, clockout0_div;
745 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
746 void __iomem *div_addr = divider->base + divider->offset;
747
748 rate_div = DIV_ROUND_DOWN_ULL(parent_rate * 1000, rate);
749 clockout0_div = rate_div / 1000;
750
751 pre = DIV_ROUND_CLOSEST((parent_rate * 1000), rate);
752 f = (u32)(pre - (clockout0_div * 1000));
753 f = f & WZRD_CLKOUT_FRAC_MASK;
754 f = f << WZRD_CLKOUT_DIVIDE_WIDTH;
755
756 value = (f | (clockout0_div & WZRD_CLKOUT_DIVIDE_MASK));
757
758 /* Set divisor and clear phase offset */
759 writel(value, div_addr);
760 writel(0x0, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
761
762 /* Check status register */
763 err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
764 value & WZRD_DR_LOCK_BIT_MASK,
765 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
766 if (err)
767 return err;
768
769 /* Initiate reconfiguration */
770 writel(WZRD_DR_BEGIN_DYNA_RECONF_5_2,
771 divider->base + WZRD_DR_INIT_REG_OFFSET);
772 writel(WZRD_DR_BEGIN_DYNA_RECONF1_5_2,
773 divider->base + WZRD_DR_INIT_REG_OFFSET);
774
775 /* Check status register */
776 return readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
777 value & WZRD_DR_LOCK_BIT_MASK,
778 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
779 }
780
clk_wzrd_determine_rate_f(struct clk_hw * hw,struct clk_rate_request * req)781 static int clk_wzrd_determine_rate_f(struct clk_hw *hw,
782 struct clk_rate_request *req)
783 {
784 return 0;
785 }
786
787 static const struct clk_ops clk_wzrd_clk_divider_ops_f = {
788 .determine_rate = clk_wzrd_determine_rate_f,
789 .set_rate = clk_wzrd_dynamic_reconfig_f,
790 .recalc_rate = clk_wzrd_recalc_ratef,
791 };
792
clk_wzrd_register_divf(struct device * dev,const char * name,const char * parent_name,unsigned long flags,void __iomem * base,u16 offset,u8 shift,u8 width,u8 clk_divider_flags,u32 div_type,spinlock_t * lock)793 static struct clk_hw *clk_wzrd_register_divf(struct device *dev,
794 const char *name,
795 const char *parent_name,
796 unsigned long flags,
797 void __iomem *base, u16 offset,
798 u8 shift, u8 width,
799 u8 clk_divider_flags,
800 u32 div_type,
801 spinlock_t *lock)
802 {
803 struct clk_wzrd_divider *div;
804 struct clk_hw *hw;
805 struct clk_init_data init;
806 int ret;
807
808 div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
809 if (!div)
810 return ERR_PTR(-ENOMEM);
811
812 init.name = name;
813
814 init.ops = &clk_wzrd_clk_divider_ops_f;
815
816 init.flags = flags;
817 init.parent_names = &parent_name;
818 init.num_parents = 1;
819
820 div->base = base;
821 div->offset = offset;
822 div->shift = shift;
823 div->width = width;
824 div->flags = clk_divider_flags;
825 div->lock = lock;
826 div->hw.init = &init;
827
828 hw = &div->hw;
829 ret = devm_clk_hw_register(dev, hw);
830 if (ret)
831 return ERR_PTR(ret);
832
833 return hw;
834 }
835
clk_wzrd_ver_register_divider(struct device * dev,const char * name,const char * parent_name,unsigned long flags,void __iomem * base,u16 offset,u8 shift,u8 width,u8 clk_divider_flags,u32 div_type,spinlock_t * lock)836 static struct clk_hw *clk_wzrd_ver_register_divider(struct device *dev,
837 const char *name,
838 const char *parent_name,
839 unsigned long flags,
840 void __iomem *base,
841 u16 offset,
842 u8 shift, u8 width,
843 u8 clk_divider_flags,
844 u32 div_type,
845 spinlock_t *lock)
846 {
847 struct clk_wzrd_divider *div;
848 struct clk_hw *hw;
849 struct clk_init_data init;
850 int ret;
851
852 div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
853 if (!div)
854 return ERR_PTR(-ENOMEM);
855
856 init.name = name;
857 if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
858 init.ops = &clk_divider_ro_ops;
859 else if (div_type == DIV_O)
860 init.ops = &clk_wzrd_ver_divider_ops;
861 else
862 init.ops = &clk_wzrd_ver_div_all_ops;
863 init.flags = flags;
864 init.parent_names = &parent_name;
865 init.num_parents = 1;
866
867 div->base = base;
868 div->offset = offset;
869 div->shift = shift;
870 div->width = width;
871 div->flags = clk_divider_flags;
872 div->lock = lock;
873 div->hw.init = &init;
874
875 hw = &div->hw;
876 ret = devm_clk_hw_register(dev, hw);
877 if (ret)
878 return ERR_PTR(ret);
879
880 return hw;
881 }
882
clk_wzrd_register_divider(struct device * dev,const char * name,const char * parent_name,unsigned long flags,void __iomem * base,u16 offset,u8 shift,u8 width,u8 clk_divider_flags,u32 div_type,spinlock_t * lock)883 static struct clk_hw *clk_wzrd_register_divider(struct device *dev,
884 const char *name,
885 const char *parent_name,
886 unsigned long flags,
887 void __iomem *base, u16 offset,
888 u8 shift, u8 width,
889 u8 clk_divider_flags,
890 u32 div_type,
891 spinlock_t *lock)
892 {
893 struct clk_wzrd_divider *div;
894 struct clk_hw *hw;
895 struct clk_init_data init;
896 int ret;
897
898 div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
899 if (!div)
900 return ERR_PTR(-ENOMEM);
901
902 init.name = name;
903 if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
904 init.ops = &clk_divider_ro_ops;
905 else if (div_type == DIV_O)
906 init.ops = &clk_wzrd_clk_divider_ops;
907 else
908 init.ops = &clk_wzrd_clk_div_all_ops;
909 init.flags = flags;
910 init.parent_names = &parent_name;
911 init.num_parents = 1;
912
913 div->base = base;
914 div->offset = offset;
915 div->shift = shift;
916 div->width = width;
917 div->flags = clk_divider_flags;
918 div->lock = lock;
919 div->hw.init = &init;
920
921 hw = &div->hw;
922 ret = devm_clk_hw_register(dev, hw);
923 if (ret)
924 return ERR_PTR(ret);
925
926 return hw;
927 }
928
clk_wzrd_clk_notifier(struct notifier_block * nb,unsigned long event,void * data)929 static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
930 void *data)
931 {
932 unsigned long max;
933 struct clk_notifier_data *ndata = data;
934 struct clk_wzrd *clk_wzrd = to_clk_wzrd(nb);
935
936 if (clk_wzrd->suspended)
937 return NOTIFY_OK;
938
939 if (ndata->clk == clk_wzrd->clk_in1)
940 max = clk_wzrd_max_freq[clk_wzrd->speed_grade - 1];
941 else if (ndata->clk == clk_wzrd->axi_clk)
942 max = WZRD_ACLK_MAX_FREQ;
943 else
944 return NOTIFY_DONE; /* should never happen */
945
946 switch (event) {
947 case PRE_RATE_CHANGE:
948 if (ndata->new_rate > max)
949 return NOTIFY_BAD;
950 return NOTIFY_OK;
951 case POST_RATE_CHANGE:
952 case ABORT_RATE_CHANGE:
953 default:
954 return NOTIFY_DONE;
955 }
956 }
957
clk_wzrd_suspend(struct device * dev)958 static int __maybe_unused clk_wzrd_suspend(struct device *dev)
959 {
960 struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
961
962 clk_disable_unprepare(clk_wzrd->axi_clk);
963 clk_wzrd->suspended = true;
964
965 return 0;
966 }
967
clk_wzrd_resume(struct device * dev)968 static int __maybe_unused clk_wzrd_resume(struct device *dev)
969 {
970 int ret;
971 struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
972
973 ret = clk_prepare_enable(clk_wzrd->axi_clk);
974 if (ret) {
975 dev_err(dev, "unable to enable s_axi_aclk\n");
976 return ret;
977 }
978
979 clk_wzrd->suspended = false;
980
981 return 0;
982 }
983
984 static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops, clk_wzrd_suspend,
985 clk_wzrd_resume);
986
987 static const struct versal_clk_data versal_data = {
988 .is_versal = true,
989 };
990
clk_wzrd_register_output_clocks(struct device * dev,int nr_outputs)991 static int clk_wzrd_register_output_clocks(struct device *dev, int nr_outputs)
992 {
993 const char *clkout_name, *clk_name, *clk_mul_name;
994 struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
995 u32 regl, regh, edge, regld, reghd, edged, div;
996 const struct versal_clk_data *data;
997 unsigned long flags = 0;
998 bool is_versal = false;
999 void __iomem *ctrl_reg;
1000 u32 reg, reg_f, mult;
1001 int i;
1002
1003 data = device_get_match_data(dev);
1004 if (data)
1005 is_versal = data->is_versal;
1006
1007 clkout_name = devm_kasprintf(dev, GFP_KERNEL, "%s_out0", dev_name(dev));
1008 if (!clkout_name)
1009 return -ENOMEM;
1010
1011 if (is_versal) {
1012 if (nr_outputs == 1) {
1013 clk_wzrd->clk_data.hws[0] = clk_wzrd_ver_register_divider
1014 (dev, clkout_name,
1015 __clk_get_name(clk_wzrd->clk_in1), 0,
1016 clk_wzrd->base, WZRD_CLK_CFG_REG(is_versal, 3),
1017 WZRD_CLKOUT_DIVIDE_SHIFT,
1018 WZRD_CLKOUT_DIVIDE_WIDTH,
1019 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
1020 DIV_ALL, &clkwzrd_lock);
1021
1022 return 0;
1023 }
1024 /* register multiplier */
1025 edge = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0)) &
1026 BIT(8));
1027 regl = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 1)) &
1028 WZRD_CLKFBOUT_L_MASK) >> WZRD_CLKFBOUT_L_SHIFT;
1029 regh = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 1)) &
1030 WZRD_CLKFBOUT_H_MASK) >> WZRD_CLKFBOUT_H_SHIFT;
1031 mult = regl + regh + edge;
1032 if (!mult)
1033 mult = 1;
1034 mult = mult * WZRD_FRAC_GRADIENT;
1035
1036 regl = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 51)) &
1037 WZRD_CLKFBOUT_FRAC_EN;
1038 if (regl) {
1039 regl = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 48)) &
1040 WZRD_VERSAL_FRAC_MASK;
1041 mult = mult + regl;
1042 }
1043 div = 64;
1044 } else {
1045 if (nr_outputs == 1) {
1046 clk_wzrd->clk_data.hws[0] = clk_wzrd_register_divider
1047 (dev, clkout_name,
1048 __clk_get_name(clk_wzrd->clk_in1), 0,
1049 clk_wzrd->base, WZRD_CLK_CFG_REG(is_versal, 3),
1050 WZRD_CLKOUT_DIVIDE_SHIFT,
1051 WZRD_CLKOUT_DIVIDE_WIDTH,
1052 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
1053 DIV_ALL, &clkwzrd_lock);
1054
1055 return 0;
1056 }
1057 reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0));
1058 reg_f = reg & WZRD_CLKFBOUT_FRAC_MASK;
1059 reg_f = reg_f >> WZRD_CLKFBOUT_FRAC_SHIFT;
1060
1061 reg = reg & WZRD_CLKFBOUT_MULT_MASK;
1062 reg = reg >> WZRD_CLKFBOUT_MULT_SHIFT;
1063 mult = (reg * 1000) + reg_f;
1064 div = 1000;
1065 }
1066 clk_name = devm_kasprintf(dev, GFP_KERNEL, "%s_mul", dev_name(dev));
1067 if (!clk_name)
1068 return -ENOMEM;
1069 clk_wzrd->clks_internal[wzrd_clk_mul] = devm_clk_hw_register_fixed_factor
1070 (dev, clk_name,
1071 __clk_get_name(clk_wzrd->clk_in1),
1072 0, mult, div);
1073 if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
1074 dev_err(dev, "unable to register fixed-factor clock\n");
1075 return PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
1076 }
1077
1078 clk_name = devm_kasprintf(dev, GFP_KERNEL, "%s_mul_div", dev_name(dev));
1079 if (!clk_name)
1080 return -ENOMEM;
1081
1082 if (is_versal) {
1083 edged = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 20)) &
1084 BIT(10));
1085 regld = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 21)) &
1086 WZRD_CLKFBOUT_L_MASK) >> WZRD_CLKFBOUT_L_SHIFT;
1087 reghd = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 21)) &
1088 WZRD_CLKFBOUT_H_MASK) >> WZRD_CLKFBOUT_H_SHIFT;
1089 div = (regld + reghd + edged);
1090 if (!div)
1091 div = 1;
1092
1093 clk_mul_name = clk_hw_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]);
1094 clk_wzrd->clks_internal[wzrd_clk_mul_div] =
1095 devm_clk_hw_register_fixed_factor(dev, clk_name, clk_mul_name, 0, 1, div);
1096 } else {
1097 ctrl_reg = clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0);
1098 clk_wzrd->clks_internal[wzrd_clk_mul_div] = devm_clk_hw_register_divider
1099 (dev, clk_name,
1100 clk_hw_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
1101 flags, ctrl_reg, 0, 8, CLK_DIVIDER_ONE_BASED |
1102 CLK_DIVIDER_ALLOW_ZERO, &clkwzrd_lock);
1103 }
1104 if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) {
1105 dev_err(dev, "unable to register divider clock\n");
1106 return PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
1107 }
1108
1109 /* register div per output */
1110 for (i = nr_outputs - 1; i >= 0 ; i--) {
1111 clkout_name = devm_kasprintf(dev, GFP_KERNEL, "%s_out%d", dev_name(dev), i);
1112 if (!clkout_name)
1113 return -ENOMEM;
1114
1115 if (is_versal) {
1116 clk_wzrd->clk_data.hws[i] = clk_wzrd_ver_register_divider
1117 (dev,
1118 clkout_name, clk_name, 0,
1119 clk_wzrd->base,
1120 (WZRD_CLK_CFG_REG(is_versal, 2) + i * 8),
1121 WZRD_CLKOUT_DIVIDE_SHIFT,
1122 WZRD_CLKOUT_DIVIDE_WIDTH,
1123 CLK_DIVIDER_ONE_BASED |
1124 CLK_DIVIDER_ALLOW_ZERO,
1125 DIV_O, &clkwzrd_lock);
1126 } else {
1127 if (!i)
1128 clk_wzrd->clk_data.hws[i] = clk_wzrd_register_divf
1129 (dev, clkout_name, clk_name, flags, clk_wzrd->base,
1130 (WZRD_CLK_CFG_REG(is_versal, 2) + i * 12),
1131 WZRD_CLKOUT_DIVIDE_SHIFT,
1132 WZRD_CLKOUT_DIVIDE_WIDTH,
1133 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
1134 DIV_O, &clkwzrd_lock);
1135 else
1136 clk_wzrd->clk_data.hws[i] = clk_wzrd_register_divider
1137 (dev, clkout_name, clk_name, 0, clk_wzrd->base,
1138 (WZRD_CLK_CFG_REG(is_versal, 2) + i * 12),
1139 WZRD_CLKOUT_DIVIDE_SHIFT,
1140 WZRD_CLKOUT_DIVIDE_WIDTH,
1141 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
1142 DIV_O, &clkwzrd_lock);
1143 }
1144 if (IS_ERR(clk_wzrd->clk_data.hws[i])) {
1145 dev_err(dev, "unable to register divider clock\n");
1146 return PTR_ERR(clk_wzrd->clk_data.hws[i]);
1147 }
1148 }
1149
1150 return 0;
1151 }
1152
clk_wzrd_probe(struct platform_device * pdev)1153 static int clk_wzrd_probe(struct platform_device *pdev)
1154 {
1155 struct device_node *np = pdev->dev.of_node;
1156 struct clk_wzrd *clk_wzrd;
1157 unsigned long rate;
1158 int nr_outputs;
1159 int ret;
1160
1161 ret = of_property_read_u32(np, "xlnx,nr-outputs", &nr_outputs);
1162 if (ret || nr_outputs > WZRD_NUM_OUTPUTS)
1163 return -EINVAL;
1164
1165 clk_wzrd = devm_kzalloc(&pdev->dev, struct_size(clk_wzrd, clk_data.hws, nr_outputs),
1166 GFP_KERNEL);
1167 if (!clk_wzrd)
1168 return -ENOMEM;
1169 platform_set_drvdata(pdev, clk_wzrd);
1170
1171 clk_wzrd->base = devm_platform_ioremap_resource(pdev, 0);
1172 if (IS_ERR(clk_wzrd->base))
1173 return PTR_ERR(clk_wzrd->base);
1174
1175 clk_wzrd->axi_clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
1176 if (IS_ERR(clk_wzrd->axi_clk))
1177 return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->axi_clk),
1178 "s_axi_aclk not found\n");
1179 rate = clk_get_rate(clk_wzrd->axi_clk);
1180 if (rate > WZRD_ACLK_MAX_FREQ) {
1181 dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n", rate);
1182 return -EINVAL;
1183 }
1184
1185 if (!of_property_present(np, "xlnx,static-config")) {
1186 ret = of_property_read_u32(np, "xlnx,speed-grade", &clk_wzrd->speed_grade);
1187 if (!ret) {
1188 if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) {
1189 dev_warn(&pdev->dev, "invalid speed grade '%d'\n",
1190 clk_wzrd->speed_grade);
1191 clk_wzrd->speed_grade = 0;
1192 }
1193 }
1194
1195 clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
1196 if (IS_ERR(clk_wzrd->clk_in1))
1197 return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->clk_in1),
1198 "clk_in1 not found\n");
1199
1200 ret = clk_wzrd_register_output_clocks(&pdev->dev, nr_outputs);
1201 if (ret)
1202 return ret;
1203
1204 clk_wzrd->clk_data.num = nr_outputs;
1205 ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
1206 &clk_wzrd->clk_data);
1207 if (ret) {
1208 dev_err(&pdev->dev, "unable to register clock provider\n");
1209 return ret;
1210 }
1211
1212 if (clk_wzrd->speed_grade) {
1213 clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier;
1214
1215 ret = devm_clk_notifier_register(&pdev->dev, clk_wzrd->clk_in1,
1216 &clk_wzrd->nb);
1217 if (ret)
1218 dev_warn(&pdev->dev,
1219 "unable to register clock notifier\n");
1220
1221 ret = devm_clk_notifier_register(&pdev->dev, clk_wzrd->axi_clk,
1222 &clk_wzrd->nb);
1223 if (ret)
1224 dev_warn(&pdev->dev,
1225 "unable to register clock notifier\n");
1226 }
1227 }
1228
1229 return 0;
1230 }
1231
1232 static const struct of_device_id clk_wzrd_ids[] = {
1233 { .compatible = "xlnx,versal-clk-wizard", .data = &versal_data },
1234 { .compatible = "xlnx,clocking-wizard" },
1235 { .compatible = "xlnx,clocking-wizard-v5.2" },
1236 { .compatible = "xlnx,clocking-wizard-v6.0" },
1237 { },
1238 };
1239 MODULE_DEVICE_TABLE(of, clk_wzrd_ids);
1240
1241 static struct platform_driver clk_wzrd_driver = {
1242 .driver = {
1243 .name = "clk-wizard",
1244 .of_match_table = clk_wzrd_ids,
1245 .pm = &clk_wzrd_dev_pm_ops,
1246 },
1247 .probe = clk_wzrd_probe,
1248 };
1249 module_platform_driver(clk_wzrd_driver);
1250
1251 MODULE_LICENSE("GPL");
1252 MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com");
1253 MODULE_DESCRIPTION("Driver for the Xilinx Clocking Wizard IP core");
1254