xref: /linux/drivers/clk/xilinx/clk-xlnx-clock-wizard.c (revision e814f3fd16acfb7f9966773953de8f740a1e3202)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx 'Clocking Wizard' driver
4  *
5  *  Copyright (C) 2013 - 2021 Xilinx
6  *
7  *  Sören Brinkmann <soren.brinkmann@xilinx.com>
8  *
9  */
10 
11 #include <linux/bitfield.h>
12 #include <linux/platform_device.h>
13 #include <linux/clk.h>
14 #include <linux/clk-provider.h>
15 #include <linux/slab.h>
16 #include <linux/io.h>
17 #include <linux/of.h>
18 #include <linux/math64.h>
19 #include <linux/module.h>
20 #include <linux/overflow.h>
21 #include <linux/err.h>
22 #include <linux/iopoll.h>
23 
24 #define WZRD_NUM_OUTPUTS	7
25 #define WZRD_ACLK_MAX_FREQ	250000000UL
26 
27 #define WZRD_CLK_CFG_REG(v, n)	(0x200 + 0x130 * (v) + 4 * (n))
28 
29 #define WZRD_CLKOUT0_FRAC_EN	BIT(18)
30 #define WZRD_CLKFBOUT_1		0
31 #define WZRD_CLKFBOUT_2		1
32 #define WZRD_CLKOUT0_1		2
33 #define WZRD_CLKOUT0_2		3
34 #define WZRD_DESKEW_2		20
35 #define WZRD_DIVCLK		21
36 #define WZRD_CLKFBOUT_4		51
37 #define WZRD_CLKFBOUT_3		48
38 #define WZRD_DUTY_CYCLE		2
39 #define WZRD_O_DIV		4
40 
41 #define WZRD_CLKFBOUT_FRAC_EN	BIT(1)
42 #define WZRD_CLKFBOUT_PREDIV2	(BIT(11) | BIT(12) | BIT(9))
43 #define WZRD_MULT_PREDIV2	(BIT(10) | BIT(9) | BIT(12))
44 #define WZRD_CLKFBOUT_EDGE	BIT(8)
45 #define WZRD_P5EN		BIT(13)
46 #define WZRD_P5EN_SHIFT		13
47 #define WZRD_P5FEDGE		BIT(15)
48 #define WZRD_DIVCLK_EDGE	BIT(10)
49 #define WZRD_P5FEDGE_SHIFT	15
50 #define WZRD_CLKOUT0_PREDIV2	BIT(11)
51 #define WZRD_EDGE_SHIFT		8
52 
53 #define WZRD_CLKFBOUT_MULT_SHIFT	8
54 #define WZRD_CLKFBOUT_MULT_MASK		(0xff << WZRD_CLKFBOUT_MULT_SHIFT)
55 #define WZRD_CLKFBOUT_MULT_FRAC_MASK	GENMASK(25, 16)
56 #define WZRD_CLKFBOUT_O_MASK		GENMASK(7, 0)
57 #define WZRD_CLKFBOUT_L_SHIFT	0
58 #define WZRD_CLKFBOUT_H_SHIFT	8
59 #define WZRD_CLKFBOUT_L_MASK	GENMASK(7, 0)
60 #define WZRD_CLKFBOUT_H_MASK	GENMASK(15, 8)
61 #define WZRD_CLKFBOUT_FRAC_SHIFT	16
62 #define WZRD_CLKFBOUT_FRAC_MASK		(0x3ff << WZRD_CLKFBOUT_FRAC_SHIFT)
63 #define WZRD_VERSAL_FRAC_MASK		GENMASK(5, 0)
64 #define WZRD_DIVCLK_DIVIDE_SHIFT	0
65 #define WZRD_DIVCLK_DIVIDE_MASK		(0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
66 #define WZRD_CLKOUT_DIVIDE_SHIFT	0
67 #define WZRD_CLKOUT_DIVIDE_WIDTH	8
68 #define WZRD_CLKOUT_DIVIDE_MASK		(0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
69 #define WZRD_CLKOUT_FRAC_SHIFT		8
70 #define WZRD_CLKOUT_FRAC_MASK		0x3ff
71 #define WZRD_CLKOUT0_FRAC_MASK		GENMASK(17, 8)
72 
73 #define WZRD_DR_MAX_INT_DIV_VALUE	255
74 #define WZRD_DR_STATUS_REG_OFFSET	0x04
75 #define WZRD_DR_LOCK_BIT_MASK		0x00000001
76 #define WZRD_DR_INIT_REG_OFFSET		0x25C
77 #define WZRD_DR_INIT_VERSAL_OFFSET	0x14
78 #define WZRD_DR_DIV_TO_PHASE_OFFSET	4
79 #define WZRD_DR_BEGIN_DYNA_RECONF	0x03
80 #define WZRD_DR_BEGIN_DYNA_RECONF_5_2	0x07
81 #define WZRD_DR_BEGIN_DYNA_RECONF1_5_2	0x02
82 
83 #define WZRD_USEC_POLL		10
84 #define WZRD_TIMEOUT_POLL		1000
85 #define WZRD_FRAC_GRADIENT		64
86 #define PREDIV2_MULT			2
87 
88 /* Divider limits, from UG572 Table 3-4 for Ultrascale+ */
89 #define DIV_O				0x01
90 #define DIV_ALL				0x03
91 
92 #define WZRD_M_MIN			2ULL
93 #define WZRD_M_MAX			128ULL
94 #define WZRD_D_MIN			1ULL
95 #define WZRD_D_MAX			106ULL
96 #define WZRD_VCO_MIN			800000000ULL
97 #define WZRD_VCO_MAX			1600000000ULL
98 #define WZRD_O_MIN			2ULL
99 #define WZRD_O_MAX			128ULL
100 #define VER_WZRD_M_MIN			4
101 #define VER_WZRD_M_MAX			432
102 #define VER_WZRD_D_MIN			1
103 #define VER_WZRD_D_MAX			123
104 #define VER_WZRD_VCO_MIN		2160000000ULL
105 #define VER_WZRD_VCO_MAX		4320000000ULL
106 #define VER_WZRD_O_MIN			2
107 #define VER_WZRD_O_MAX			511
108 #define WZRD_MIN_ERR			20000
109 #define WZRD_FRAC_POINTS		1000
110 
111 /* Get the mask from width */
112 #define div_mask(width)			((1 << (width)) - 1)
113 
114 /* Extract divider instance from clock hardware instance */
115 #define to_clk_wzrd_divider(_hw) container_of(_hw, struct clk_wzrd_divider, hw)
116 
117 enum clk_wzrd_int_clks {
118 	wzrd_clk_mul,
119 	wzrd_clk_mul_div,
120 	wzrd_clk_mul_frac,
121 	wzrd_clk_int_max
122 };
123 
124 /**
125  * struct clk_wzrd - Clock wizard private data structure
126  *
127  * @nb:			Notifier block
128  * @base:		Memory base
129  * @clk_in1:		Handle to input clock 'clk_in1'
130  * @axi_clk:		Handle to input clock 's_axi_aclk'
131  * @clks_internal:	Internal clocks
132  * @speed_grade:	Speed grade of the device
133  * @suspended:		Flag indicating power state of the device
134  * @clk_data:		Output clock data
135  */
136 struct clk_wzrd {
137 	struct notifier_block nb;
138 	void __iomem *base;
139 	struct clk *clk_in1;
140 	struct clk *axi_clk;
141 	struct clk_hw *clks_internal[wzrd_clk_int_max];
142 	unsigned int speed_grade;
143 	bool suspended;
144 	struct clk_hw_onecell_data clk_data;
145 };
146 
147 /**
148  * struct clk_wzrd_divider - clock divider specific to clk_wzrd
149  *
150  * @hw:		handle between common and hardware-specific interfaces
151  * @base:	base address of register containing the divider
152  * @offset:	offset address of register containing the divider
153  * @shift:	shift to the divider bit field
154  * @width:	width of the divider bit field
155  * @flags:	clk_wzrd divider flags
156  * @table:	array of value/divider pairs, last entry should have div = 0
157  * @m:	value of the multiplier
158  * @m_frac:	fractional value of the multiplier
159  * @d:	value of the common divider
160  * @o:	value of the leaf divider
161  * @o_frac:	value of the fractional leaf divider
162  * @lock:	register lock
163  */
164 struct clk_wzrd_divider {
165 	struct clk_hw hw;
166 	void __iomem *base;
167 	u16 offset;
168 	u8 shift;
169 	u8 width;
170 	u8 flags;
171 	const struct clk_div_table *table;
172 	u32 m;
173 	u32 m_frac;
174 	u32 d;
175 	u32 o;
176 	u32 o_frac;
177 	spinlock_t *lock;  /* divider lock */
178 };
179 
180 struct versal_clk_data {
181 	bool is_versal;
182 };
183 
184 #define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
185 
186 /* maximum frequencies for input/output clocks per speed grade */
187 static const unsigned long clk_wzrd_max_freq[] = {
188 	800000000UL,
189 	933000000UL,
190 	1066000000UL
191 };
192 
193 /* spin lock variable for clk_wzrd */
194 static DEFINE_SPINLOCK(clkwzrd_lock);
195 
196 static unsigned long clk_wzrd_recalc_rate_ver(struct clk_hw *hw,
197 					      unsigned long parent_rate)
198 {
199 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
200 	void __iomem *div_addr = divider->base + divider->offset;
201 	u32 div, p5en, edge, prediv2, all;
202 	unsigned int vall, valh;
203 
204 	edge = !!(readl(div_addr) & WZRD_CLKFBOUT_EDGE);
205 	p5en = !!(readl(div_addr) & WZRD_P5EN);
206 	prediv2 = !!(readl(div_addr) & WZRD_CLKOUT0_PREDIV2);
207 	vall = readl(div_addr + 4) & WZRD_CLKFBOUT_L_MASK;
208 	valh = readl(div_addr + 4) >> WZRD_CLKFBOUT_H_SHIFT;
209 	all = valh + vall + edge;
210 	if (!all)
211 		all = 1;
212 
213 	if (prediv2)
214 		div = 2 * all + prediv2 * p5en;
215 	else
216 		div = all;
217 
218 	return DIV_ROUND_UP_ULL((u64)parent_rate, div);
219 }
220 
221 static unsigned long clk_wzrd_recalc_rate(struct clk_hw *hw,
222 					  unsigned long parent_rate)
223 {
224 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
225 	void __iomem *div_addr = divider->base + divider->offset;
226 	unsigned int val;
227 
228 	val = readl(div_addr) >> divider->shift;
229 	val &= div_mask(divider->width);
230 
231 	return divider_recalc_rate(hw, parent_rate, val, divider->table,
232 			divider->flags, divider->width);
233 }
234 
235 static int clk_wzrd_ver_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
236 					 unsigned long parent_rate)
237 {
238 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
239 	void __iomem *div_addr = divider->base + divider->offset;
240 	u32 value, regh, edged, p5en, p5fedge, regval, regval1;
241 	unsigned long flags;
242 	int err;
243 
244 	spin_lock_irqsave(divider->lock, flags);
245 
246 	value = DIV_ROUND_CLOSEST(parent_rate, rate);
247 
248 	regh = (value / 4);
249 	regval1 = readl(div_addr);
250 	regval1 |= WZRD_CLKFBOUT_PREDIV2;
251 	regval1 = regval1 & ~(WZRD_CLKFBOUT_EDGE | WZRD_P5EN | WZRD_P5FEDGE);
252 	if (value % 4 > 1) {
253 		edged = 1;
254 		regval1 |= (edged << WZRD_EDGE_SHIFT);
255 	}
256 	p5fedge = value % 2;
257 	p5en = value % 2;
258 	regval1 = regval1 | p5en << WZRD_P5EN_SHIFT | p5fedge << WZRD_P5FEDGE_SHIFT;
259 	writel(regval1, div_addr);
260 
261 	regval = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
262 	writel(regval, div_addr + 4);
263 	/* Check status register */
264 	err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
265 					value, value & WZRD_DR_LOCK_BIT_MASK,
266 					WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
267 	if (err)
268 		goto err_reconfig;
269 
270 	/* Initiate reconfiguration */
271 	writel(WZRD_DR_BEGIN_DYNA_RECONF,
272 	       divider->base + WZRD_DR_INIT_VERSAL_OFFSET);
273 
274 	/* Check status register */
275 	err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
276 					value, value & WZRD_DR_LOCK_BIT_MASK,
277 					WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
278 err_reconfig:
279 	spin_unlock_irqrestore(divider->lock, flags);
280 	return err;
281 }
282 
283 static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
284 				     unsigned long parent_rate)
285 {
286 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
287 	void __iomem *div_addr = divider->base + divider->offset;
288 	unsigned long flags;
289 	u32 value;
290 	int err;
291 
292 	spin_lock_irqsave(divider->lock, flags);
293 
294 	value = DIV_ROUND_CLOSEST(parent_rate, rate);
295 
296 	/* Cap the value to max */
297 	min_t(u32, value, WZRD_DR_MAX_INT_DIV_VALUE);
298 
299 	/* Set divisor and clear phase offset */
300 	writel(value, div_addr);
301 	writel(0x00, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
302 
303 	/* Check status register */
304 	err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
305 					value, value & WZRD_DR_LOCK_BIT_MASK,
306 					WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
307 	if (err)
308 		goto err_reconfig;
309 
310 	/* Initiate reconfiguration */
311 	writel(WZRD_DR_BEGIN_DYNA_RECONF_5_2,
312 	       divider->base + WZRD_DR_INIT_REG_OFFSET);
313 	writel(WZRD_DR_BEGIN_DYNA_RECONF1_5_2,
314 	       divider->base + WZRD_DR_INIT_REG_OFFSET);
315 
316 	/* Check status register */
317 	err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
318 					value, value & WZRD_DR_LOCK_BIT_MASK,
319 					WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
320 err_reconfig:
321 	spin_unlock_irqrestore(divider->lock, flags);
322 	return err;
323 }
324 
325 static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
326 				unsigned long *prate)
327 {
328 	u8 div;
329 
330 	/*
331 	 * since we don't change parent rate we just round rate to closest
332 	 * achievable
333 	 */
334 	div = DIV_ROUND_CLOSEST(*prate, rate);
335 
336 	return *prate / div;
337 }
338 
339 static int clk_wzrd_get_divisors_ver(struct clk_hw *hw, unsigned long rate,
340 				     unsigned long parent_rate)
341 {
342 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
343 	u64 vco_freq, freq, diff, vcomin, vcomax;
344 	u32 m, d, o;
345 	u32 mmin, mmax, dmin, dmax, omin, omax;
346 
347 	mmin = VER_WZRD_M_MIN;
348 	mmax = VER_WZRD_M_MAX;
349 	dmin = VER_WZRD_D_MIN;
350 	dmax = VER_WZRD_D_MAX;
351 	omin = VER_WZRD_O_MIN;
352 	omax = VER_WZRD_O_MAX;
353 	vcomin = VER_WZRD_VCO_MIN;
354 	vcomax = VER_WZRD_VCO_MAX;
355 
356 	for (m = mmin; m <= mmax; m++) {
357 		for (d = dmin; d <= dmax; d++) {
358 			vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
359 			if (vco_freq >= vcomin && vco_freq <= vcomax) {
360 				for (o = omin; o <= omax; o++) {
361 					freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
362 					diff = abs(freq - rate);
363 
364 					if (diff < WZRD_MIN_ERR) {
365 						divider->m = m;
366 						divider->d = d;
367 						divider->o = o;
368 						return 0;
369 					}
370 				}
371 			}
372 		}
373 	}
374 	return -EBUSY;
375 }
376 
377 static int clk_wzrd_get_divisors(struct clk_hw *hw, unsigned long rate,
378 				 unsigned long parent_rate)
379 {
380 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
381 	u64 vco_freq, freq, diff, vcomin, vcomax, best_diff = -1ULL;
382 	u64 m, d, o;
383 	u64 mmin, mmax, dmin, dmax, omin, omax, mdmin, mdmax;
384 
385 	mmin = WZRD_M_MIN << 3;
386 	mmax = WZRD_M_MAX << 3;
387 	dmin = WZRD_D_MIN;
388 	dmax = WZRD_D_MAX;
389 	omin = WZRD_O_MIN << 3;
390 	omax = WZRD_O_MAX << 3;
391 	vcomin = WZRD_VCO_MIN << 3;
392 	vcomax = WZRD_VCO_MAX << 3;
393 
394 	for (m = mmin; m <= mmax; m++) {
395 		mdmin = max(dmin, div64_u64(parent_rate * m + vcomax / 2, vcomax));
396 		mdmax = min(dmax, div64_u64(parent_rate * m + vcomin / 2, vcomin));
397 		for (d = mdmin; d <= mdmax; d++) {
398 			vco_freq = DIV_ROUND_CLOSEST_ULL((parent_rate * m), d);
399 			o = DIV_ROUND_CLOSEST_ULL(vco_freq, rate);
400 			if (o < omin || o > omax)
401 				continue;
402 			freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
403 			diff = freq - rate;
404 			if (diff < best_diff) {
405 				best_diff = diff;
406 				divider->m = m >> 3;
407 				divider->m_frac = (m - (divider->m << 3)) * 125;
408 				divider->d = d;
409 				divider->o = o >> 3;
410 				divider->o_frac = (o - (divider->o << 3)) * 125;
411 			}
412 		}
413 	}
414 	return best_diff < WZRD_MIN_ERR ? 0 : -EBUSY;
415 }
416 
417 static int clk_wzrd_reconfig(struct clk_wzrd_divider *divider, void __iomem *div_addr)
418 {
419 	u32 value;
420 	int err;
421 
422 	/* Check status register */
423 	err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
424 					value & WZRD_DR_LOCK_BIT_MASK,
425 					WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
426 	if (err)
427 		return -ETIMEDOUT;
428 
429 	/* Initiate reconfiguration */
430 	writel(WZRD_DR_BEGIN_DYNA_RECONF, div_addr);
431 	/* Check status register */
432 	return readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
433 				 value & WZRD_DR_LOCK_BIT_MASK,
434 				 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
435 }
436 
437 static int clk_wzrd_dynamic_ver_all_nolock(struct clk_hw *hw, unsigned long rate,
438 					   unsigned long parent_rate)
439 {
440 	u32 regh, edged, p5en, p5fedge, value2, m, regval, regval1, value;
441 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
442 	void __iomem *div_addr;
443 	int err;
444 
445 	err = clk_wzrd_get_divisors_ver(hw, rate, parent_rate);
446 	if (err)
447 		return err;
448 
449 	writel(0, divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_4));
450 
451 	m = divider->m;
452 	edged = m % WZRD_DUTY_CYCLE;
453 	regh = m / WZRD_DUTY_CYCLE;
454 	regval1 = readl(divider->base + WZRD_CLK_CFG_REG(1,
455 							 WZRD_CLKFBOUT_1));
456 	regval1 |= WZRD_MULT_PREDIV2;
457 	if (edged)
458 		regval1 = regval1 | WZRD_CLKFBOUT_EDGE;
459 	else
460 		regval1 = regval1 & ~WZRD_CLKFBOUT_EDGE;
461 
462 	writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
463 							 WZRD_CLKFBOUT_1));
464 	regval1 = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
465 	writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
466 							 WZRD_CLKFBOUT_2));
467 
468 	value2 = divider->d;
469 	edged = value2 % WZRD_DUTY_CYCLE;
470 	regh = (value2 / WZRD_DUTY_CYCLE);
471 	regval1 = FIELD_PREP(WZRD_DIVCLK_EDGE, edged);
472 	writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
473 							 WZRD_DESKEW_2));
474 	regval1 = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
475 	writel(regval1, divider->base + WZRD_CLK_CFG_REG(1, WZRD_DIVCLK));
476 
477 	value = divider->o;
478 	regh = value / WZRD_O_DIV;
479 	regval1 = readl(divider->base + WZRD_CLK_CFG_REG(1,
480 							 WZRD_CLKOUT0_1));
481 	regval1 |= WZRD_CLKFBOUT_PREDIV2;
482 	regval1 = regval1 & ~(WZRD_CLKFBOUT_EDGE | WZRD_P5EN | WZRD_P5FEDGE);
483 
484 	if (value % WZRD_O_DIV > 1) {
485 		edged = 1;
486 		regval1 |= edged << WZRD_CLKFBOUT_H_SHIFT;
487 	}
488 
489 	p5fedge = value % WZRD_DUTY_CYCLE;
490 	p5en = value % WZRD_DUTY_CYCLE;
491 
492 	regval1 = regval1 | FIELD_PREP(WZRD_P5EN, p5en) | FIELD_PREP(WZRD_P5FEDGE, p5fedge);
493 	writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
494 							 WZRD_CLKOUT0_1));
495 	regval = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
496 	writel(regval, divider->base + WZRD_CLK_CFG_REG(1,
497 							WZRD_CLKOUT0_2));
498 	div_addr = divider->base + WZRD_DR_INIT_VERSAL_OFFSET;
499 
500 	return clk_wzrd_reconfig(divider, div_addr);
501 }
502 
503 static int clk_wzrd_dynamic_all_nolock(struct clk_hw *hw, unsigned long rate,
504 				       unsigned long parent_rate)
505 {
506 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
507 	void __iomem *div_addr;
508 	u32 reg;
509 	int err;
510 
511 	err = clk_wzrd_get_divisors(hw, rate, parent_rate);
512 	if (err)
513 		return err;
514 
515 	reg = FIELD_PREP(WZRD_CLKOUT_DIVIDE_MASK, divider->o) |
516 	      FIELD_PREP(WZRD_CLKOUT0_FRAC_MASK, divider->o_frac);
517 
518 	writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 2));
519 	reg = FIELD_PREP(WZRD_CLKFBOUT_MULT_MASK, divider->m) |
520 	      FIELD_PREP(WZRD_CLKFBOUT_MULT_FRAC_MASK, divider->m_frac) |
521 	      FIELD_PREP(WZRD_DIVCLK_DIVIDE_MASK, divider->d);
522 	writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 0));
523 	writel(0, divider->base + WZRD_CLK_CFG_REG(0, 3));
524 	div_addr = divider->base + WZRD_DR_INIT_REG_OFFSET;
525 	return clk_wzrd_reconfig(divider, div_addr);
526 }
527 
528 static int clk_wzrd_dynamic_all(struct clk_hw *hw, unsigned long rate,
529 				unsigned long parent_rate)
530 {
531 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
532 	unsigned long flags;
533 	int ret;
534 
535 	spin_lock_irqsave(divider->lock, flags);
536 
537 	ret = clk_wzrd_dynamic_all_nolock(hw, rate, parent_rate);
538 
539 	spin_unlock_irqrestore(divider->lock, flags);
540 
541 	return ret;
542 }
543 
544 static int clk_wzrd_dynamic_all_ver(struct clk_hw *hw, unsigned long rate,
545 				    unsigned long parent_rate)
546 {
547 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
548 	unsigned long flags;
549 	int ret;
550 
551 	spin_lock_irqsave(divider->lock, flags);
552 
553 	ret = clk_wzrd_dynamic_ver_all_nolock(hw, rate, parent_rate);
554 
555 	spin_unlock_irqrestore(divider->lock, flags);
556 
557 	return ret;
558 }
559 
560 static unsigned long clk_wzrd_recalc_rate_all(struct clk_hw *hw,
561 					      unsigned long parent_rate)
562 {
563 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
564 	u32 m, d, o, reg, f, mf;
565 	u64 mul;
566 
567 	reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 0));
568 	d = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
569 	m = FIELD_GET(WZRD_CLKFBOUT_MULT_MASK, reg);
570 	mf = FIELD_GET(WZRD_CLKFBOUT_MULT_FRAC_MASK, reg);
571 	reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 2));
572 	o = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
573 	f = FIELD_GET(WZRD_CLKOUT0_FRAC_MASK, reg);
574 
575 	mul = m * 1000 + mf;
576 	return DIV_ROUND_CLOSEST_ULL(parent_rate * mul, d * (o * 1000 + f));
577 }
578 
579 static unsigned long clk_wzrd_recalc_rate_all_ver(struct clk_hw *hw,
580 						  unsigned long parent_rate)
581 {
582 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
583 	u32 edged, div2, p5en, edge, prediv2, all, regl, regh, mult;
584 	u32 div, reg;
585 
586 	edge = !!(readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_1)) &
587 			WZRD_CLKFBOUT_EDGE);
588 
589 	reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_2));
590 	regl = FIELD_GET(WZRD_CLKFBOUT_L_MASK, reg);
591 	regh = FIELD_GET(WZRD_CLKFBOUT_H_MASK, reg);
592 
593 	mult = regl + regh + edge;
594 	if (!mult)
595 		mult = 1;
596 
597 	regl = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_4)) &
598 		     WZRD_CLKFBOUT_FRAC_EN;
599 	if (regl) {
600 		regl = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_3))
601 				& WZRD_VERSAL_FRAC_MASK;
602 		mult = mult * WZRD_FRAC_GRADIENT + regl;
603 		parent_rate = DIV_ROUND_CLOSEST((parent_rate * mult), WZRD_FRAC_GRADIENT);
604 	} else {
605 		parent_rate = parent_rate * mult;
606 	}
607 
608 	/* O Calculation */
609 	reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKOUT0_1));
610 	edged = FIELD_GET(WZRD_CLKFBOUT_EDGE, reg);
611 	p5en = FIELD_GET(WZRD_P5EN, reg);
612 	prediv2 = FIELD_GET(WZRD_CLKOUT0_PREDIV2, reg);
613 
614 	reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKOUT0_2));
615 	/* Low time */
616 	regl = FIELD_GET(WZRD_CLKFBOUT_L_MASK, reg);
617 	/* High time */
618 	regh = FIELD_GET(WZRD_CLKFBOUT_H_MASK, reg);
619 	all = regh + regl + edged;
620 	if (!all)
621 		all = 1;
622 
623 	if (prediv2)
624 		div2 = PREDIV2_MULT * all + p5en;
625 	else
626 		div2 = all;
627 
628 	/* D calculation */
629 	edged = !!(readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_DESKEW_2)) &
630 		     WZRD_DIVCLK_EDGE);
631 	reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_DIVCLK));
632 	/* Low time */
633 	regl = FIELD_GET(WZRD_CLKFBOUT_L_MASK, reg);
634 	/* High time */
635 	regh = FIELD_GET(WZRD_CLKFBOUT_H_MASK, reg);
636 	div = regl + regh + edged;
637 	if (!div)
638 		div = 1;
639 
640 	div = div * div2;
641 	return divider_recalc_rate(hw, parent_rate, div, divider->table,
642 			divider->flags, divider->width);
643 }
644 
645 static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate,
646 				    unsigned long *prate)
647 {
648 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
649 	u32 m, d, o;
650 	int err;
651 
652 	err = clk_wzrd_get_divisors(hw, rate, *prate);
653 	if (err)
654 		return err;
655 
656 	m = divider->m;
657 	d = divider->d;
658 	o = divider->o;
659 
660 	rate = div_u64(*prate * (m * 1000 + divider->m_frac), d * (o * 1000 + divider->o_frac));
661 	return rate;
662 }
663 
664 static long clk_wzrd_ver_round_rate_all(struct clk_hw *hw, unsigned long rate,
665 					unsigned long *prate)
666 {
667 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
668 	unsigned long int_freq;
669 	u32 m, d, o, div, f;
670 	int err;
671 
672 	err = clk_wzrd_get_divisors(hw, rate, *prate);
673 	if (err)
674 		return err;
675 
676 	m = divider->m;
677 	d = divider->d;
678 	o = divider->o;
679 
680 	div = d * o;
681 	int_freq =  divider_recalc_rate(hw, *prate * m, div, divider->table,
682 					divider->flags, divider->width);
683 
684 	if (rate > int_freq) {
685 		f = DIV_ROUND_CLOSEST_ULL(rate * WZRD_FRAC_POINTS, int_freq);
686 		rate = DIV_ROUND_CLOSEST(int_freq * f, WZRD_FRAC_POINTS);
687 	}
688 	return rate;
689 }
690 
691 static const struct clk_ops clk_wzrd_ver_divider_ops = {
692 	.round_rate = clk_wzrd_round_rate,
693 	.set_rate = clk_wzrd_ver_dynamic_reconfig,
694 	.recalc_rate = clk_wzrd_recalc_rate_ver,
695 };
696 
697 static const struct clk_ops clk_wzrd_ver_div_all_ops = {
698 	.round_rate = clk_wzrd_ver_round_rate_all,
699 	.set_rate = clk_wzrd_dynamic_all_ver,
700 	.recalc_rate = clk_wzrd_recalc_rate_all_ver,
701 };
702 
703 static const struct clk_ops clk_wzrd_clk_divider_ops = {
704 	.round_rate = clk_wzrd_round_rate,
705 	.set_rate = clk_wzrd_dynamic_reconfig,
706 	.recalc_rate = clk_wzrd_recalc_rate,
707 };
708 
709 static const struct clk_ops clk_wzrd_clk_div_all_ops = {
710 	.round_rate = clk_wzrd_round_rate_all,
711 	.set_rate = clk_wzrd_dynamic_all,
712 	.recalc_rate = clk_wzrd_recalc_rate_all,
713 };
714 
715 static unsigned long clk_wzrd_recalc_ratef(struct clk_hw *hw,
716 					   unsigned long parent_rate)
717 {
718 	unsigned int val;
719 	u32 div, frac;
720 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
721 	void __iomem *div_addr = divider->base + divider->offset;
722 
723 	val = readl(div_addr);
724 	div = val & div_mask(divider->width);
725 	frac = (val >> WZRD_CLKOUT_FRAC_SHIFT) & WZRD_CLKOUT_FRAC_MASK;
726 
727 	return mult_frac(parent_rate, 1000, (div * 1000) + frac);
728 }
729 
730 static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
731 				       unsigned long parent_rate)
732 {
733 	int err;
734 	u32 value, pre;
735 	unsigned long rate_div, f, clockout0_div;
736 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
737 	void __iomem *div_addr = divider->base + divider->offset;
738 
739 	rate_div = DIV_ROUND_DOWN_ULL(parent_rate * 1000, rate);
740 	clockout0_div = rate_div / 1000;
741 
742 	pre = DIV_ROUND_CLOSEST((parent_rate * 1000), rate);
743 	f = (u32)(pre - (clockout0_div * 1000));
744 	f = f & WZRD_CLKOUT_FRAC_MASK;
745 	f = f << WZRD_CLKOUT_DIVIDE_WIDTH;
746 
747 	value = (f  | (clockout0_div & WZRD_CLKOUT_DIVIDE_MASK));
748 
749 	/* Set divisor and clear phase offset */
750 	writel(value, div_addr);
751 	writel(0x0, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
752 
753 	/* Check status register */
754 	err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
755 				 value & WZRD_DR_LOCK_BIT_MASK,
756 				 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
757 	if (err)
758 		return err;
759 
760 	/* Initiate reconfiguration */
761 	writel(WZRD_DR_BEGIN_DYNA_RECONF_5_2,
762 	       divider->base + WZRD_DR_INIT_REG_OFFSET);
763 	writel(WZRD_DR_BEGIN_DYNA_RECONF1_5_2,
764 	       divider->base + WZRD_DR_INIT_REG_OFFSET);
765 
766 	/* Check status register */
767 	return readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
768 				value & WZRD_DR_LOCK_BIT_MASK,
769 				WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
770 }
771 
772 static long clk_wzrd_round_rate_f(struct clk_hw *hw, unsigned long rate,
773 				  unsigned long *prate)
774 {
775 	return rate;
776 }
777 
778 static const struct clk_ops clk_wzrd_clk_divider_ops_f = {
779 	.round_rate = clk_wzrd_round_rate_f,
780 	.set_rate = clk_wzrd_dynamic_reconfig_f,
781 	.recalc_rate = clk_wzrd_recalc_ratef,
782 };
783 
784 static struct clk_hw *clk_wzrd_register_divf(struct device *dev,
785 					  const char *name,
786 					  const char *parent_name,
787 					  unsigned long flags,
788 					  void __iomem *base, u16 offset,
789 					  u8 shift, u8 width,
790 					  u8 clk_divider_flags,
791 					  u32 div_type,
792 					  spinlock_t *lock)
793 {
794 	struct clk_wzrd_divider *div;
795 	struct clk_hw *hw;
796 	struct clk_init_data init;
797 	int ret;
798 
799 	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
800 	if (!div)
801 		return ERR_PTR(-ENOMEM);
802 
803 	init.name = name;
804 
805 	init.ops = &clk_wzrd_clk_divider_ops_f;
806 
807 	init.flags = flags;
808 	init.parent_names = &parent_name;
809 	init.num_parents = 1;
810 
811 	div->base = base;
812 	div->offset = offset;
813 	div->shift = shift;
814 	div->width = width;
815 	div->flags = clk_divider_flags;
816 	div->lock = lock;
817 	div->hw.init = &init;
818 
819 	hw = &div->hw;
820 	ret =  devm_clk_hw_register(dev, hw);
821 	if (ret)
822 		return ERR_PTR(ret);
823 
824 	return hw;
825 }
826 
827 static struct clk_hw *clk_wzrd_ver_register_divider(struct device *dev,
828 						 const char *name,
829 						 const char *parent_name,
830 						 unsigned long flags,
831 						 void __iomem *base,
832 						 u16 offset,
833 						 u8 shift, u8 width,
834 						 u8 clk_divider_flags,
835 						 u32 div_type,
836 						 spinlock_t *lock)
837 {
838 	struct clk_wzrd_divider *div;
839 	struct clk_hw *hw;
840 	struct clk_init_data init;
841 	int ret;
842 
843 	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
844 	if (!div)
845 		return ERR_PTR(-ENOMEM);
846 
847 	init.name = name;
848 	if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
849 		init.ops = &clk_divider_ro_ops;
850 	else if (div_type == DIV_O)
851 		init.ops = &clk_wzrd_ver_divider_ops;
852 	else
853 		init.ops = &clk_wzrd_ver_div_all_ops;
854 	init.flags = flags;
855 	init.parent_names =  &parent_name;
856 	init.num_parents =  1;
857 
858 	div->base = base;
859 	div->offset = offset;
860 	div->shift = shift;
861 	div->width = width;
862 	div->flags = clk_divider_flags;
863 	div->lock = lock;
864 	div->hw.init = &init;
865 
866 	hw = &div->hw;
867 	ret = devm_clk_hw_register(dev, hw);
868 	if (ret)
869 		return ERR_PTR(ret);
870 
871 	return hw;
872 }
873 
874 static struct clk_hw *clk_wzrd_register_divider(struct device *dev,
875 					     const char *name,
876 					     const char *parent_name,
877 					     unsigned long flags,
878 					     void __iomem *base, u16 offset,
879 					     u8 shift, u8 width,
880 					     u8 clk_divider_flags,
881 					     u32 div_type,
882 					     spinlock_t *lock)
883 {
884 	struct clk_wzrd_divider *div;
885 	struct clk_hw *hw;
886 	struct clk_init_data init;
887 	int ret;
888 
889 	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
890 	if (!div)
891 		return ERR_PTR(-ENOMEM);
892 
893 	init.name = name;
894 	if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
895 		init.ops = &clk_divider_ro_ops;
896 	else if (div_type == DIV_O)
897 		init.ops = &clk_wzrd_clk_divider_ops;
898 	else
899 		init.ops = &clk_wzrd_clk_div_all_ops;
900 	init.flags = flags;
901 	init.parent_names =  &parent_name;
902 	init.num_parents =  1;
903 
904 	div->base = base;
905 	div->offset = offset;
906 	div->shift = shift;
907 	div->width = width;
908 	div->flags = clk_divider_flags;
909 	div->lock = lock;
910 	div->hw.init = &init;
911 
912 	hw = &div->hw;
913 	ret = devm_clk_hw_register(dev, hw);
914 	if (ret)
915 		return ERR_PTR(ret);
916 
917 	return hw;
918 }
919 
920 static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
921 				 void *data)
922 {
923 	unsigned long max;
924 	struct clk_notifier_data *ndata = data;
925 	struct clk_wzrd *clk_wzrd = to_clk_wzrd(nb);
926 
927 	if (clk_wzrd->suspended)
928 		return NOTIFY_OK;
929 
930 	if (ndata->clk == clk_wzrd->clk_in1)
931 		max = clk_wzrd_max_freq[clk_wzrd->speed_grade - 1];
932 	else if (ndata->clk == clk_wzrd->axi_clk)
933 		max = WZRD_ACLK_MAX_FREQ;
934 	else
935 		return NOTIFY_DONE;	/* should never happen */
936 
937 	switch (event) {
938 	case PRE_RATE_CHANGE:
939 		if (ndata->new_rate > max)
940 			return NOTIFY_BAD;
941 		return NOTIFY_OK;
942 	case POST_RATE_CHANGE:
943 	case ABORT_RATE_CHANGE:
944 	default:
945 		return NOTIFY_DONE;
946 	}
947 }
948 
949 static int __maybe_unused clk_wzrd_suspend(struct device *dev)
950 {
951 	struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
952 
953 	clk_disable_unprepare(clk_wzrd->axi_clk);
954 	clk_wzrd->suspended = true;
955 
956 	return 0;
957 }
958 
959 static int __maybe_unused clk_wzrd_resume(struct device *dev)
960 {
961 	int ret;
962 	struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
963 
964 	ret = clk_prepare_enable(clk_wzrd->axi_clk);
965 	if (ret) {
966 		dev_err(dev, "unable to enable s_axi_aclk\n");
967 		return ret;
968 	}
969 
970 	clk_wzrd->suspended = false;
971 
972 	return 0;
973 }
974 
975 static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops, clk_wzrd_suspend,
976 			 clk_wzrd_resume);
977 
978 static const struct versal_clk_data versal_data = {
979 	.is_versal	= true,
980 };
981 
982 static int clk_wzrd_register_output_clocks(struct device *dev, int nr_outputs)
983 {
984 	const char *clkout_name, *clk_name, *clk_mul_name;
985 	struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
986 	u32 regl, regh, edge, regld, reghd, edged, div;
987 	const struct versal_clk_data *data;
988 	unsigned long flags = 0;
989 	bool is_versal = false;
990 	void __iomem *ctrl_reg;
991 	u32 reg, reg_f, mult;
992 	int i;
993 
994 	data = device_get_match_data(dev);
995 	if (data)
996 		is_versal = data->is_versal;
997 
998 	clkout_name = devm_kasprintf(dev, GFP_KERNEL, "%s_out0", dev_name(dev));
999 	if (!clkout_name)
1000 		return -ENOMEM;
1001 
1002 	if (is_versal) {
1003 		if (nr_outputs == 1) {
1004 			clk_wzrd->clk_data.hws[0] = clk_wzrd_ver_register_divider
1005 				(dev, clkout_name,
1006 				__clk_get_name(clk_wzrd->clk_in1), 0,
1007 				clk_wzrd->base, WZRD_CLK_CFG_REG(is_versal, 3),
1008 				WZRD_CLKOUT_DIVIDE_SHIFT,
1009 				WZRD_CLKOUT_DIVIDE_WIDTH,
1010 				CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
1011 				DIV_ALL, &clkwzrd_lock);
1012 
1013 			return 0;
1014 		}
1015 		/* register multiplier */
1016 		edge = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0)) &
1017 				BIT(8));
1018 		regl = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 1)) &
1019 			     WZRD_CLKFBOUT_L_MASK) >> WZRD_CLKFBOUT_L_SHIFT;
1020 		regh = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 1)) &
1021 			     WZRD_CLKFBOUT_H_MASK) >> WZRD_CLKFBOUT_H_SHIFT;
1022 		mult = regl + regh + edge;
1023 		if (!mult)
1024 			mult = 1;
1025 		mult = mult * WZRD_FRAC_GRADIENT;
1026 
1027 		regl = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 51)) &
1028 			     WZRD_CLKFBOUT_FRAC_EN;
1029 		if (regl) {
1030 			regl = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 48)) &
1031 				WZRD_VERSAL_FRAC_MASK;
1032 			mult = mult + regl;
1033 		}
1034 		div = 64;
1035 	} else {
1036 		if (nr_outputs == 1) {
1037 			clk_wzrd->clk_data.hws[0] = clk_wzrd_register_divider
1038 				(dev, clkout_name,
1039 				__clk_get_name(clk_wzrd->clk_in1), 0,
1040 				clk_wzrd->base, WZRD_CLK_CFG_REG(is_versal, 3),
1041 				WZRD_CLKOUT_DIVIDE_SHIFT,
1042 				WZRD_CLKOUT_DIVIDE_WIDTH,
1043 				CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
1044 				DIV_ALL, &clkwzrd_lock);
1045 
1046 			return 0;
1047 		}
1048 		reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0));
1049 		reg_f = reg & WZRD_CLKFBOUT_FRAC_MASK;
1050 		reg_f =  reg_f >> WZRD_CLKFBOUT_FRAC_SHIFT;
1051 
1052 		reg = reg & WZRD_CLKFBOUT_MULT_MASK;
1053 		reg =  reg >> WZRD_CLKFBOUT_MULT_SHIFT;
1054 		mult = (reg * 1000) + reg_f;
1055 		div = 1000;
1056 	}
1057 	clk_name = devm_kasprintf(dev, GFP_KERNEL, "%s_mul", dev_name(dev));
1058 	if (!clk_name)
1059 		return -ENOMEM;
1060 	clk_wzrd->clks_internal[wzrd_clk_mul] = devm_clk_hw_register_fixed_factor
1061 			(dev, clk_name,
1062 			 __clk_get_name(clk_wzrd->clk_in1),
1063 			0, mult, div);
1064 	if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
1065 		dev_err(dev, "unable to register fixed-factor clock\n");
1066 		return PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
1067 	}
1068 
1069 	clk_name = devm_kasprintf(dev, GFP_KERNEL, "%s_mul_div", dev_name(dev));
1070 	if (!clk_name)
1071 		return -ENOMEM;
1072 
1073 	if (is_versal) {
1074 		edged = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 20)) &
1075 			     BIT(10));
1076 		regld = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 21)) &
1077 			     WZRD_CLKFBOUT_L_MASK) >> WZRD_CLKFBOUT_L_SHIFT;
1078 		reghd = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 21)) &
1079 		     WZRD_CLKFBOUT_H_MASK) >> WZRD_CLKFBOUT_H_SHIFT;
1080 		div = (regld  + reghd + edged);
1081 		if (!div)
1082 			div = 1;
1083 
1084 		clk_mul_name = clk_hw_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]);
1085 		clk_wzrd->clks_internal[wzrd_clk_mul_div] =
1086 			devm_clk_hw_register_fixed_factor(dev, clk_name, clk_mul_name, 0, 1, div);
1087 	} else {
1088 		ctrl_reg = clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0);
1089 		clk_wzrd->clks_internal[wzrd_clk_mul_div] = devm_clk_hw_register_divider
1090 			(dev, clk_name,
1091 			 clk_hw_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
1092 			flags, ctrl_reg, 0, 8, CLK_DIVIDER_ONE_BASED |
1093 			CLK_DIVIDER_ALLOW_ZERO, &clkwzrd_lock);
1094 	}
1095 	if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) {
1096 		dev_err(dev, "unable to register divider clock\n");
1097 		return PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
1098 	}
1099 
1100 	/* register div per output */
1101 	for (i = nr_outputs - 1; i >= 0 ; i--) {
1102 		clkout_name = devm_kasprintf(dev, GFP_KERNEL, "%s_out%d", dev_name(dev), i);
1103 		if (!clkout_name)
1104 			return -ENOMEM;
1105 
1106 		if (is_versal) {
1107 			clk_wzrd->clk_data.hws[i] = clk_wzrd_ver_register_divider
1108 						(dev,
1109 						 clkout_name, clk_name, 0,
1110 						 clk_wzrd->base,
1111 						 (WZRD_CLK_CFG_REG(is_versal, 3) + i * 8),
1112 						 WZRD_CLKOUT_DIVIDE_SHIFT,
1113 						 WZRD_CLKOUT_DIVIDE_WIDTH,
1114 						 CLK_DIVIDER_ONE_BASED |
1115 						 CLK_DIVIDER_ALLOW_ZERO,
1116 						 DIV_O, &clkwzrd_lock);
1117 		} else {
1118 			if (!i)
1119 				clk_wzrd->clk_data.hws[i] = clk_wzrd_register_divf
1120 					(dev, clkout_name, clk_name, flags, clk_wzrd->base,
1121 					(WZRD_CLK_CFG_REG(is_versal, 2) + i * 12),
1122 					WZRD_CLKOUT_DIVIDE_SHIFT,
1123 					WZRD_CLKOUT_DIVIDE_WIDTH,
1124 					CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
1125 					DIV_O, &clkwzrd_lock);
1126 			else
1127 				clk_wzrd->clk_data.hws[i] = clk_wzrd_register_divider
1128 					(dev, clkout_name, clk_name, 0, clk_wzrd->base,
1129 					(WZRD_CLK_CFG_REG(is_versal, 2) + i * 12),
1130 					WZRD_CLKOUT_DIVIDE_SHIFT,
1131 					WZRD_CLKOUT_DIVIDE_WIDTH,
1132 					CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
1133 					DIV_O, &clkwzrd_lock);
1134 		}
1135 		if (IS_ERR(clk_wzrd->clk_data.hws[i])) {
1136 			dev_err(dev, "unable to register divider clock\n");
1137 			return PTR_ERR(clk_wzrd->clk_data.hws[i]);
1138 		}
1139 	}
1140 
1141 	return 0;
1142 }
1143 
1144 static int clk_wzrd_probe(struct platform_device *pdev)
1145 {
1146 	struct device_node *np = pdev->dev.of_node;
1147 	struct clk_wzrd *clk_wzrd;
1148 	unsigned long rate;
1149 	int nr_outputs;
1150 	int ret;
1151 
1152 	ret = of_property_read_u32(np, "xlnx,nr-outputs", &nr_outputs);
1153 	if (ret || nr_outputs > WZRD_NUM_OUTPUTS)
1154 		return -EINVAL;
1155 
1156 	clk_wzrd = devm_kzalloc(&pdev->dev, struct_size(clk_wzrd, clk_data.hws, nr_outputs),
1157 				GFP_KERNEL);
1158 	if (!clk_wzrd)
1159 		return -ENOMEM;
1160 	platform_set_drvdata(pdev, clk_wzrd);
1161 
1162 	clk_wzrd->base = devm_platform_ioremap_resource(pdev, 0);
1163 	if (IS_ERR(clk_wzrd->base))
1164 		return PTR_ERR(clk_wzrd->base);
1165 
1166 	clk_wzrd->axi_clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
1167 	if (IS_ERR(clk_wzrd->axi_clk))
1168 		return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->axi_clk),
1169 				     "s_axi_aclk not found\n");
1170 	rate = clk_get_rate(clk_wzrd->axi_clk);
1171 	if (rate > WZRD_ACLK_MAX_FREQ) {
1172 		dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n", rate);
1173 		return -EINVAL;
1174 	}
1175 
1176 	if (!of_property_present(np, "xlnx,static-config")) {
1177 		ret = of_property_read_u32(np, "xlnx,speed-grade", &clk_wzrd->speed_grade);
1178 		if (!ret) {
1179 			if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) {
1180 				dev_warn(&pdev->dev, "invalid speed grade '%d'\n",
1181 					 clk_wzrd->speed_grade);
1182 				clk_wzrd->speed_grade = 0;
1183 			}
1184 		}
1185 
1186 		clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
1187 		if (IS_ERR(clk_wzrd->clk_in1))
1188 			return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->clk_in1),
1189 					     "clk_in1 not found\n");
1190 
1191 		ret = clk_wzrd_register_output_clocks(&pdev->dev, nr_outputs);
1192 		if (ret)
1193 			return ret;
1194 
1195 		clk_wzrd->clk_data.num = nr_outputs;
1196 		ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
1197 						  &clk_wzrd->clk_data);
1198 		if (ret) {
1199 			dev_err(&pdev->dev, "unable to register clock provider\n");
1200 			return ret;
1201 		}
1202 
1203 		if (clk_wzrd->speed_grade) {
1204 			clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier;
1205 
1206 			ret = devm_clk_notifier_register(&pdev->dev, clk_wzrd->clk_in1,
1207 							 &clk_wzrd->nb);
1208 			if (ret)
1209 				dev_warn(&pdev->dev,
1210 					 "unable to register clock notifier\n");
1211 
1212 			ret = devm_clk_notifier_register(&pdev->dev, clk_wzrd->axi_clk,
1213 							 &clk_wzrd->nb);
1214 			if (ret)
1215 				dev_warn(&pdev->dev,
1216 					 "unable to register clock notifier\n");
1217 		}
1218 	}
1219 
1220 	return 0;
1221 }
1222 
1223 static const struct of_device_id clk_wzrd_ids[] = {
1224 	{ .compatible = "xlnx,versal-clk-wizard", .data = &versal_data },
1225 	{ .compatible = "xlnx,clocking-wizard"   },
1226 	{ .compatible = "xlnx,clocking-wizard-v5.2"   },
1227 	{ .compatible = "xlnx,clocking-wizard-v6.0"  },
1228 	{ },
1229 };
1230 MODULE_DEVICE_TABLE(of, clk_wzrd_ids);
1231 
1232 static struct platform_driver clk_wzrd_driver = {
1233 	.driver = {
1234 		.name = "clk-wizard",
1235 		.of_match_table = clk_wzrd_ids,
1236 		.pm = &clk_wzrd_dev_pm_ops,
1237 	},
1238 	.probe = clk_wzrd_probe,
1239 };
1240 module_platform_driver(clk_wzrd_driver);
1241 
1242 MODULE_LICENSE("GPL");
1243 MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com");
1244 MODULE_DESCRIPTION("Driver for the Xilinx Clocking Wizard IP core");
1245