xref: /linux/drivers/clk/xilinx/clk-xlnx-clock-wizard.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx 'Clocking Wizard' driver
4  *
5  *  Copyright (C) 2013 - 2021 Xilinx
6  *
7  *  Sören Brinkmann <soren.brinkmann@xilinx.com>
8  *
9  */
10 
11 #include <linux/bitfield.h>
12 #include <linux/platform_device.h>
13 #include <linux/clk.h>
14 #include <linux/clk-provider.h>
15 #include <linux/slab.h>
16 #include <linux/io.h>
17 #include <linux/of.h>
18 #include <linux/math64.h>
19 #include <linux/module.h>
20 #include <linux/err.h>
21 #include <linux/iopoll.h>
22 
23 #define WZRD_NUM_OUTPUTS	7
24 #define WZRD_ACLK_MAX_FREQ	250000000UL
25 
26 #define WZRD_CLK_CFG_REG(v, n)	(0x200 + 0x130 * (v) + 4 * (n))
27 
28 #define WZRD_CLKOUT0_FRAC_EN	BIT(18)
29 #define WZRD_CLKFBOUT_1		0
30 #define WZRD_CLKFBOUT_2		1
31 #define WZRD_CLKOUT0_1		2
32 #define WZRD_CLKOUT0_2		3
33 #define WZRD_DESKEW_2		20
34 #define WZRD_DIVCLK		21
35 #define WZRD_CLKFBOUT_4		51
36 #define WZRD_CLKFBOUT_3		48
37 #define WZRD_DUTY_CYCLE		2
38 #define WZRD_O_DIV		4
39 
40 #define WZRD_CLKFBOUT_FRAC_EN	BIT(1)
41 #define WZRD_CLKFBOUT_PREDIV2	(BIT(11) | BIT(12) | BIT(9))
42 #define WZRD_MULT_PREDIV2	(BIT(10) | BIT(9) | BIT(12))
43 #define WZRD_CLKFBOUT_EDGE	BIT(8)
44 #define WZRD_P5EN		BIT(13)
45 #define WZRD_P5EN_SHIFT		13
46 #define WZRD_P5FEDGE		BIT(15)
47 #define WZRD_DIVCLK_EDGE	BIT(10)
48 #define WZRD_P5FEDGE_SHIFT	15
49 #define WZRD_CLKOUT0_PREDIV2	BIT(11)
50 #define WZRD_EDGE_SHIFT		8
51 
52 #define WZRD_CLKFBOUT_MULT_SHIFT	8
53 #define WZRD_CLKFBOUT_MULT_MASK		(0xff << WZRD_CLKFBOUT_MULT_SHIFT)
54 #define WZRD_CLKFBOUT_L_SHIFT	0
55 #define WZRD_CLKFBOUT_H_SHIFT	8
56 #define WZRD_CLKFBOUT_L_MASK	GENMASK(7, 0)
57 #define WZRD_CLKFBOUT_H_MASK	GENMASK(15, 8)
58 #define WZRD_CLKFBOUT_FRAC_SHIFT	16
59 #define WZRD_CLKFBOUT_FRAC_MASK		(0x3ff << WZRD_CLKFBOUT_FRAC_SHIFT)
60 #define WZRD_VERSAL_FRAC_MASK		GENMASK(5, 0)
61 #define WZRD_DIVCLK_DIVIDE_SHIFT	0
62 #define WZRD_DIVCLK_DIVIDE_MASK		(0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
63 #define WZRD_CLKOUT_DIVIDE_SHIFT	0
64 #define WZRD_CLKOUT_DIVIDE_WIDTH	8
65 #define WZRD_CLKOUT_DIVIDE_MASK		(0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
66 #define WZRD_CLKOUT_FRAC_SHIFT		8
67 #define WZRD_CLKOUT_FRAC_MASK		0x3ff
68 #define WZRD_CLKOUT0_FRAC_MASK		GENMASK(17, 8)
69 
70 #define WZRD_DR_MAX_INT_DIV_VALUE	255
71 #define WZRD_DR_STATUS_REG_OFFSET	0x04
72 #define WZRD_DR_LOCK_BIT_MASK		0x00000001
73 #define WZRD_DR_INIT_REG_OFFSET		0x25C
74 #define WZRD_DR_INIT_VERSAL_OFFSET	0x14
75 #define WZRD_DR_DIV_TO_PHASE_OFFSET	4
76 #define WZRD_DR_BEGIN_DYNA_RECONF	0x03
77 #define WZRD_DR_BEGIN_DYNA_RECONF_5_2	0x07
78 #define WZRD_DR_BEGIN_DYNA_RECONF1_5_2	0x02
79 
80 #define WZRD_USEC_POLL		10
81 #define WZRD_TIMEOUT_POLL		1000
82 #define WZRD_FRAC_GRADIENT		64
83 #define PREDIV2_MULT			2
84 
85 /* Divider limits, from UG572 Table 3-4 for Ultrascale+ */
86 #define DIV_O				0x01
87 #define DIV_ALL				0x03
88 
89 #define WZRD_M_MIN			2
90 #define WZRD_M_MAX			128
91 #define WZRD_D_MIN			1
92 #define WZRD_D_MAX			106
93 #define WZRD_VCO_MIN			800000000
94 #define WZRD_VCO_MAX			1600000000
95 #define WZRD_O_MIN			1
96 #define WZRD_O_MAX			128
97 #define VER_WZRD_M_MIN			4
98 #define VER_WZRD_M_MAX			432
99 #define VER_WZRD_D_MIN			1
100 #define VER_WZRD_D_MAX			123
101 #define VER_WZRD_VCO_MIN		2160000000ULL
102 #define VER_WZRD_VCO_MAX		4320000000ULL
103 #define VER_WZRD_O_MIN			2
104 #define VER_WZRD_O_MAX			511
105 #define WZRD_MIN_ERR			20000
106 #define WZRD_FRAC_POINTS		1000
107 
108 /* Get the mask from width */
109 #define div_mask(width)			((1 << (width)) - 1)
110 
111 /* Extract divider instance from clock hardware instance */
112 #define to_clk_wzrd_divider(_hw) container_of(_hw, struct clk_wzrd_divider, hw)
113 
114 enum clk_wzrd_int_clks {
115 	wzrd_clk_mul,
116 	wzrd_clk_mul_div,
117 	wzrd_clk_mul_frac,
118 	wzrd_clk_int_max
119 };
120 
121 /**
122  * struct clk_wzrd - Clock wizard private data structure
123  *
124  * @clk_data:		Clock data
125  * @nb:			Notifier block
126  * @base:		Memory base
127  * @clk_in1:		Handle to input clock 'clk_in1'
128  * @axi_clk:		Handle to input clock 's_axi_aclk'
129  * @clks_internal:	Internal clocks
130  * @clkout:		Output clocks
131  * @speed_grade:	Speed grade of the device
132  * @suspended:		Flag indicating power state of the device
133  */
134 struct clk_wzrd {
135 	struct clk_onecell_data clk_data;
136 	struct notifier_block nb;
137 	void __iomem *base;
138 	struct clk *clk_in1;
139 	struct clk *axi_clk;
140 	struct clk *clks_internal[wzrd_clk_int_max];
141 	struct clk *clkout[WZRD_NUM_OUTPUTS];
142 	unsigned int speed_grade;
143 	bool suspended;
144 };
145 
146 /**
147  * struct clk_wzrd_divider - clock divider specific to clk_wzrd
148  *
149  * @hw:		handle between common and hardware-specific interfaces
150  * @base:	base address of register containing the divider
151  * @offset:	offset address of register containing the divider
152  * @shift:	shift to the divider bit field
153  * @width:	width of the divider bit field
154  * @flags:	clk_wzrd divider flags
155  * @table:	array of value/divider pairs, last entry should have div = 0
156  * @m:	value of the multiplier
157  * @d:	value of the common divider
158  * @o:	value of the leaf divider
159  * @lock:	register lock
160  */
161 struct clk_wzrd_divider {
162 	struct clk_hw hw;
163 	void __iomem *base;
164 	u16 offset;
165 	u8 shift;
166 	u8 width;
167 	u8 flags;
168 	const struct clk_div_table *table;
169 	u32 m;
170 	u32 d;
171 	u32 o;
172 	spinlock_t *lock;  /* divider lock */
173 };
174 
175 struct versal_clk_data {
176 	bool is_versal;
177 };
178 
179 #define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
180 
181 /* maximum frequencies for input/output clocks per speed grade */
182 static const unsigned long clk_wzrd_max_freq[] = {
183 	800000000UL,
184 	933000000UL,
185 	1066000000UL
186 };
187 
188 /* spin lock variable for clk_wzrd */
189 static DEFINE_SPINLOCK(clkwzrd_lock);
190 
191 static unsigned long clk_wzrd_recalc_rate_ver(struct clk_hw *hw,
192 					      unsigned long parent_rate)
193 {
194 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
195 	void __iomem *div_addr = divider->base + divider->offset;
196 	u32 div, p5en, edge, prediv2, all;
197 	unsigned int vall, valh;
198 
199 	edge = !!(readl(div_addr) & WZRD_CLKFBOUT_EDGE);
200 	p5en = !!(readl(div_addr) & WZRD_P5EN);
201 	prediv2 = !!(readl(div_addr) & WZRD_CLKOUT0_PREDIV2);
202 	vall = readl(div_addr + 4) & WZRD_CLKFBOUT_L_MASK;
203 	valh = readl(div_addr + 4) >> WZRD_CLKFBOUT_H_SHIFT;
204 	all = valh + vall + edge;
205 	if (!all)
206 		all = 1;
207 
208 	if (prediv2)
209 		div = 2 * all + prediv2 * p5en;
210 	else
211 		div = all;
212 
213 	return DIV_ROUND_UP_ULL((u64)parent_rate, div);
214 }
215 
216 static unsigned long clk_wzrd_recalc_rate(struct clk_hw *hw,
217 					  unsigned long parent_rate)
218 {
219 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
220 	void __iomem *div_addr = divider->base + divider->offset;
221 	unsigned int val;
222 
223 	val = readl(div_addr) >> divider->shift;
224 	val &= div_mask(divider->width);
225 
226 	return divider_recalc_rate(hw, parent_rate, val, divider->table,
227 			divider->flags, divider->width);
228 }
229 
230 static int clk_wzrd_ver_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
231 					 unsigned long parent_rate)
232 {
233 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
234 	void __iomem *div_addr = divider->base + divider->offset;
235 	u32 value, regh, edged, p5en, p5fedge, regval, regval1;
236 	unsigned long flags;
237 	int err;
238 
239 	spin_lock_irqsave(divider->lock, flags);
240 
241 	value = DIV_ROUND_CLOSEST(parent_rate, rate);
242 
243 	regh = (value / 4);
244 	regval1 = readl(div_addr);
245 	regval1 |= WZRD_CLKFBOUT_PREDIV2;
246 	regval1 = regval1 & ~(WZRD_CLKFBOUT_EDGE | WZRD_P5EN | WZRD_P5FEDGE);
247 	if (value % 4 > 1) {
248 		edged = 1;
249 		regval1 |= (edged << WZRD_EDGE_SHIFT);
250 	}
251 	p5fedge = value % 2;
252 	p5en = value % 2;
253 	regval1 = regval1 | p5en << WZRD_P5EN_SHIFT | p5fedge << WZRD_P5FEDGE_SHIFT;
254 	writel(regval1, div_addr);
255 
256 	regval = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
257 	writel(regval, div_addr + 4);
258 	/* Check status register */
259 	err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
260 					value, value & WZRD_DR_LOCK_BIT_MASK,
261 					WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
262 	if (err)
263 		goto err_reconfig;
264 
265 	/* Initiate reconfiguration */
266 	writel(WZRD_DR_BEGIN_DYNA_RECONF,
267 	       divider->base + WZRD_DR_INIT_VERSAL_OFFSET);
268 
269 	/* Check status register */
270 	err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
271 					value, value & WZRD_DR_LOCK_BIT_MASK,
272 					WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
273 err_reconfig:
274 	spin_unlock_irqrestore(divider->lock, flags);
275 	return err;
276 }
277 
278 static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
279 				     unsigned long parent_rate)
280 {
281 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
282 	void __iomem *div_addr = divider->base + divider->offset;
283 	unsigned long flags;
284 	u32 value;
285 	int err;
286 
287 	spin_lock_irqsave(divider->lock, flags);
288 
289 	value = DIV_ROUND_CLOSEST(parent_rate, rate);
290 
291 	/* Cap the value to max */
292 	min_t(u32, value, WZRD_DR_MAX_INT_DIV_VALUE);
293 
294 	/* Set divisor and clear phase offset */
295 	writel(value, div_addr);
296 	writel(0x00, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
297 
298 	/* Check status register */
299 	err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
300 					value, value & WZRD_DR_LOCK_BIT_MASK,
301 					WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
302 	if (err)
303 		goto err_reconfig;
304 
305 	/* Initiate reconfiguration */
306 	writel(WZRD_DR_BEGIN_DYNA_RECONF_5_2,
307 	       divider->base + WZRD_DR_INIT_REG_OFFSET);
308 	writel(WZRD_DR_BEGIN_DYNA_RECONF1_5_2,
309 	       divider->base + WZRD_DR_INIT_REG_OFFSET);
310 
311 	/* Check status register */
312 	err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
313 					value, value & WZRD_DR_LOCK_BIT_MASK,
314 					WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
315 err_reconfig:
316 	spin_unlock_irqrestore(divider->lock, flags);
317 	return err;
318 }
319 
320 static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
321 				unsigned long *prate)
322 {
323 	u8 div;
324 
325 	/*
326 	 * since we don't change parent rate we just round rate to closest
327 	 * achievable
328 	 */
329 	div = DIV_ROUND_CLOSEST(*prate, rate);
330 
331 	return *prate / div;
332 }
333 
334 static int clk_wzrd_get_divisors_ver(struct clk_hw *hw, unsigned long rate,
335 				     unsigned long parent_rate)
336 {
337 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
338 	u64 vco_freq, freq, diff, vcomin, vcomax;
339 	u32 m, d, o;
340 	u32 mmin, mmax, dmin, dmax, omin, omax;
341 
342 	mmin = VER_WZRD_M_MIN;
343 	mmax = VER_WZRD_M_MAX;
344 	dmin = VER_WZRD_D_MIN;
345 	dmax = VER_WZRD_D_MAX;
346 	omin = VER_WZRD_O_MIN;
347 	omax = VER_WZRD_O_MAX;
348 	vcomin = VER_WZRD_VCO_MIN;
349 	vcomax = VER_WZRD_VCO_MAX;
350 
351 	for (m = mmin; m <= mmax; m++) {
352 		for (d = dmin; d <= dmax; d++) {
353 			vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
354 			if (vco_freq >= vcomin && vco_freq <= vcomax) {
355 				for (o = omin; o <= omax; o++) {
356 					freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
357 					diff = abs(freq - rate);
358 
359 					if (diff < WZRD_MIN_ERR) {
360 						divider->m = m;
361 						divider->d = d;
362 						divider->o = o;
363 						return 0;
364 					}
365 				}
366 			}
367 		}
368 	}
369 	return -EBUSY;
370 }
371 
372 static int clk_wzrd_get_divisors(struct clk_hw *hw, unsigned long rate,
373 				 unsigned long parent_rate)
374 {
375 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
376 	u64 vco_freq, freq, diff, vcomin, vcomax;
377 	u32 m, d, o;
378 	u32 mmin, mmax, dmin, dmax, omin, omax;
379 
380 	mmin = WZRD_M_MIN;
381 	mmax = WZRD_M_MAX;
382 	dmin = WZRD_D_MIN;
383 	dmax = WZRD_D_MAX;
384 	omin = WZRD_O_MIN;
385 	omax = WZRD_O_MAX;
386 	vcomin = WZRD_VCO_MIN;
387 	vcomax = WZRD_VCO_MAX;
388 
389 	for (m = mmin; m <= mmax; m++) {
390 		for (d = dmin; d <= dmax; d++) {
391 			vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
392 			if (vco_freq >= vcomin && vco_freq <= vcomax) {
393 				for (o = omin; o <= omax; o++) {
394 					freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
395 					diff = abs(freq - rate);
396 
397 					if (diff < WZRD_MIN_ERR) {
398 						divider->m = m;
399 						divider->d = d;
400 						divider->o = o;
401 						return 0;
402 					}
403 				}
404 			}
405 		}
406 	}
407 	return -EBUSY;
408 }
409 
410 static int clk_wzrd_reconfig(struct clk_wzrd_divider *divider, void __iomem *div_addr)
411 {
412 	u32 value;
413 	int err;
414 
415 	/* Check status register */
416 	err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
417 					value & WZRD_DR_LOCK_BIT_MASK,
418 					WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
419 	if (err)
420 		return -ETIMEDOUT;
421 
422 	/* Initiate reconfiguration */
423 	writel(WZRD_DR_BEGIN_DYNA_RECONF, div_addr);
424 	/* Check status register */
425 	return readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
426 				 value & WZRD_DR_LOCK_BIT_MASK,
427 				 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
428 }
429 
430 static int clk_wzrd_dynamic_ver_all_nolock(struct clk_hw *hw, unsigned long rate,
431 					   unsigned long parent_rate)
432 {
433 	u32 regh, edged, p5en, p5fedge, value2, m, regval, regval1, value;
434 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
435 	void __iomem *div_addr;
436 	int err;
437 
438 	err = clk_wzrd_get_divisors_ver(hw, rate, parent_rate);
439 	if (err)
440 		return err;
441 
442 	writel(0, divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_4));
443 
444 	m = divider->m;
445 	edged = m % WZRD_DUTY_CYCLE;
446 	regh = m / WZRD_DUTY_CYCLE;
447 	regval1 = readl(divider->base + WZRD_CLK_CFG_REG(1,
448 							 WZRD_CLKFBOUT_1));
449 	regval1 |= WZRD_MULT_PREDIV2;
450 	if (edged)
451 		regval1 = regval1 | WZRD_CLKFBOUT_EDGE;
452 	else
453 		regval1 = regval1 & ~WZRD_CLKFBOUT_EDGE;
454 
455 	writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
456 							 WZRD_CLKFBOUT_1));
457 	regval1 = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
458 	writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
459 							 WZRD_CLKFBOUT_2));
460 
461 	value2 = divider->d;
462 	edged = value2 % WZRD_DUTY_CYCLE;
463 	regh = (value2 / WZRD_DUTY_CYCLE);
464 	regval1 = FIELD_PREP(WZRD_DIVCLK_EDGE, edged);
465 	writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
466 							 WZRD_DESKEW_2));
467 	regval1 = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
468 	writel(regval1, divider->base + WZRD_CLK_CFG_REG(1, WZRD_DIVCLK));
469 
470 	value = divider->o;
471 	regh = value / WZRD_O_DIV;
472 	regval1 = readl(divider->base + WZRD_CLK_CFG_REG(1,
473 							 WZRD_CLKOUT0_1));
474 	regval1 |= WZRD_CLKFBOUT_PREDIV2;
475 	regval1 = regval1 & ~(WZRD_CLKFBOUT_EDGE | WZRD_P5EN | WZRD_P5FEDGE);
476 
477 	if (value % WZRD_O_DIV > 1) {
478 		edged = 1;
479 		regval1 |= edged << WZRD_CLKFBOUT_H_SHIFT;
480 	}
481 
482 	p5fedge = value % WZRD_DUTY_CYCLE;
483 	p5en = value % WZRD_DUTY_CYCLE;
484 
485 	regval1 = regval1 | FIELD_PREP(WZRD_P5EN, p5en) | FIELD_PREP(WZRD_P5FEDGE, p5fedge);
486 	writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
487 							 WZRD_CLKOUT0_1));
488 	regval = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
489 	writel(regval, divider->base + WZRD_CLK_CFG_REG(1,
490 							WZRD_CLKOUT0_2));
491 	div_addr = divider->base + WZRD_DR_INIT_VERSAL_OFFSET;
492 
493 	return clk_wzrd_reconfig(divider, div_addr);
494 }
495 
496 static int clk_wzrd_dynamic_all_nolock(struct clk_hw *hw, unsigned long rate,
497 				       unsigned long parent_rate)
498 {
499 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
500 	unsigned long vco_freq, rate_div, clockout0_div;
501 	void __iomem *div_addr;
502 	u32 reg, pre, f;
503 	int err;
504 
505 	err = clk_wzrd_get_divisors(hw, rate, parent_rate);
506 	if (err)
507 		return err;
508 
509 	vco_freq = DIV_ROUND_CLOSEST(parent_rate * divider->m, divider->d);
510 	rate_div = DIV_ROUND_CLOSEST_ULL((vco_freq * WZRD_FRAC_POINTS), rate);
511 
512 	clockout0_div = div_u64(rate_div,  WZRD_FRAC_POINTS);
513 
514 	pre = DIV_ROUND_CLOSEST_ULL(vco_freq * WZRD_FRAC_POINTS, rate);
515 	f = (pre - (clockout0_div * WZRD_FRAC_POINTS));
516 	f &= WZRD_CLKOUT_FRAC_MASK;
517 
518 	reg = FIELD_PREP(WZRD_CLKOUT_DIVIDE_MASK, clockout0_div) |
519 	      FIELD_PREP(WZRD_CLKOUT0_FRAC_MASK, f);
520 
521 	writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 2));
522 	/* Set divisor and clear phase offset */
523 	reg = FIELD_PREP(WZRD_CLKFBOUT_MULT_MASK, divider->m) |
524 	      FIELD_PREP(WZRD_DIVCLK_DIVIDE_MASK, divider->d);
525 	writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 0));
526 	writel(divider->o, divider->base + WZRD_CLK_CFG_REG(0, 2));
527 	writel(0, divider->base + WZRD_CLK_CFG_REG(0, 3));
528 	div_addr = divider->base + WZRD_DR_INIT_REG_OFFSET;
529 	return clk_wzrd_reconfig(divider, div_addr);
530 }
531 
532 static int clk_wzrd_dynamic_all(struct clk_hw *hw, unsigned long rate,
533 				unsigned long parent_rate)
534 {
535 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
536 	unsigned long flags;
537 	int ret;
538 
539 	spin_lock_irqsave(divider->lock, flags);
540 
541 	ret = clk_wzrd_dynamic_all_nolock(hw, rate, parent_rate);
542 
543 	spin_unlock_irqrestore(divider->lock, flags);
544 
545 	return ret;
546 }
547 
548 static int clk_wzrd_dynamic_all_ver(struct clk_hw *hw, unsigned long rate,
549 				    unsigned long parent_rate)
550 {
551 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
552 	unsigned long flags;
553 	int ret;
554 
555 	spin_lock_irqsave(divider->lock, flags);
556 
557 	ret = clk_wzrd_dynamic_ver_all_nolock(hw, rate, parent_rate);
558 
559 	spin_unlock_irqrestore(divider->lock, flags);
560 
561 	return ret;
562 }
563 
564 static unsigned long clk_wzrd_recalc_rate_all(struct clk_hw *hw,
565 					      unsigned long parent_rate)
566 {
567 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
568 	u32 m, d, o, div, reg, f;
569 
570 	reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 0));
571 	d = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
572 	m = FIELD_GET(WZRD_CLKFBOUT_MULT_MASK, reg);
573 	reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 2));
574 	o = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
575 	f = FIELD_GET(WZRD_CLKOUT0_FRAC_MASK, reg);
576 
577 	div = DIV_ROUND_CLOSEST(d * (WZRD_FRAC_POINTS * o + f), WZRD_FRAC_POINTS);
578 	return divider_recalc_rate(hw, parent_rate * m, div, divider->table,
579 		divider->flags, divider->width);
580 }
581 
582 static unsigned long clk_wzrd_recalc_rate_all_ver(struct clk_hw *hw,
583 						  unsigned long parent_rate)
584 {
585 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
586 	u32 edged, div2, p5en, edge, prediv2, all, regl, regh, mult;
587 	u32 div, reg;
588 
589 	edge = !!(readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_1)) &
590 			WZRD_CLKFBOUT_EDGE);
591 
592 	reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_2));
593 	regl = FIELD_GET(WZRD_CLKFBOUT_L_MASK, reg);
594 	regh = FIELD_GET(WZRD_CLKFBOUT_H_MASK, reg);
595 
596 	mult = regl + regh + edge;
597 	if (!mult)
598 		mult = 1;
599 
600 	regl = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_4)) &
601 		     WZRD_CLKFBOUT_FRAC_EN;
602 	if (regl) {
603 		regl = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_3))
604 				& WZRD_VERSAL_FRAC_MASK;
605 		mult = mult * WZRD_FRAC_GRADIENT + regl;
606 		parent_rate = DIV_ROUND_CLOSEST((parent_rate * mult), WZRD_FRAC_GRADIENT);
607 	} else {
608 		parent_rate = parent_rate * mult;
609 	}
610 
611 	/* O Calculation */
612 	reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKOUT0_1));
613 	edged = FIELD_GET(WZRD_CLKFBOUT_EDGE, reg);
614 	p5en = FIELD_GET(WZRD_P5EN, reg);
615 	prediv2 = FIELD_GET(WZRD_CLKOUT0_PREDIV2, reg);
616 
617 	reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKOUT0_2));
618 	/* Low time */
619 	regl = FIELD_GET(WZRD_CLKFBOUT_L_MASK, reg);
620 	/* High time */
621 	regh = FIELD_GET(WZRD_CLKFBOUT_H_MASK, reg);
622 	all = regh + regl + edged;
623 	if (!all)
624 		all = 1;
625 
626 	if (prediv2)
627 		div2 = PREDIV2_MULT * all + p5en;
628 	else
629 		div2 = all;
630 
631 	/* D calculation */
632 	edged = !!(readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_DESKEW_2)) &
633 		     WZRD_DIVCLK_EDGE);
634 	reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_DIVCLK));
635 	/* Low time */
636 	regl = FIELD_GET(WZRD_CLKFBOUT_L_MASK, reg);
637 	/* High time */
638 	regh = FIELD_GET(WZRD_CLKFBOUT_H_MASK, reg);
639 	div = regl + regh + edged;
640 	if (!div)
641 		div = 1;
642 
643 	div = div * div2;
644 	return divider_recalc_rate(hw, parent_rate, div, divider->table,
645 			divider->flags, divider->width);
646 }
647 
648 static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate,
649 				    unsigned long *prate)
650 {
651 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
652 	unsigned long int_freq;
653 	u32 m, d, o, div, f;
654 	int err;
655 
656 	err = clk_wzrd_get_divisors(hw, rate, *prate);
657 	if (err)
658 		return err;
659 
660 	m = divider->m;
661 	d = divider->d;
662 	o = divider->o;
663 
664 	div = d * o;
665 	int_freq =  divider_recalc_rate(hw, *prate * m, div, divider->table,
666 					divider->flags, divider->width);
667 
668 	if (rate > int_freq) {
669 		f = DIV_ROUND_CLOSEST_ULL(rate * WZRD_FRAC_POINTS, int_freq);
670 		rate = DIV_ROUND_CLOSEST(int_freq * f, WZRD_FRAC_POINTS);
671 	}
672 	return rate;
673 }
674 
675 static const struct clk_ops clk_wzrd_ver_divider_ops = {
676 	.round_rate = clk_wzrd_round_rate,
677 	.set_rate = clk_wzrd_ver_dynamic_reconfig,
678 	.recalc_rate = clk_wzrd_recalc_rate_ver,
679 };
680 
681 static const struct clk_ops clk_wzrd_ver_div_all_ops = {
682 	.round_rate = clk_wzrd_round_rate_all,
683 	.set_rate = clk_wzrd_dynamic_all_ver,
684 	.recalc_rate = clk_wzrd_recalc_rate_all_ver,
685 };
686 
687 static const struct clk_ops clk_wzrd_clk_divider_ops = {
688 	.round_rate = clk_wzrd_round_rate,
689 	.set_rate = clk_wzrd_dynamic_reconfig,
690 	.recalc_rate = clk_wzrd_recalc_rate,
691 };
692 
693 static const struct clk_ops clk_wzrd_clk_div_all_ops = {
694 	.round_rate = clk_wzrd_round_rate_all,
695 	.set_rate = clk_wzrd_dynamic_all,
696 	.recalc_rate = clk_wzrd_recalc_rate_all,
697 };
698 
699 static unsigned long clk_wzrd_recalc_ratef(struct clk_hw *hw,
700 					   unsigned long parent_rate)
701 {
702 	unsigned int val;
703 	u32 div, frac;
704 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
705 	void __iomem *div_addr = divider->base + divider->offset;
706 
707 	val = readl(div_addr);
708 	div = val & div_mask(divider->width);
709 	frac = (val >> WZRD_CLKOUT_FRAC_SHIFT) & WZRD_CLKOUT_FRAC_MASK;
710 
711 	return mult_frac(parent_rate, 1000, (div * 1000) + frac);
712 }
713 
714 static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
715 				       unsigned long parent_rate)
716 {
717 	int err;
718 	u32 value, pre;
719 	unsigned long rate_div, f, clockout0_div;
720 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
721 	void __iomem *div_addr = divider->base + divider->offset;
722 
723 	rate_div = DIV_ROUND_DOWN_ULL(parent_rate * 1000, rate);
724 	clockout0_div = rate_div / 1000;
725 
726 	pre = DIV_ROUND_CLOSEST((parent_rate * 1000), rate);
727 	f = (u32)(pre - (clockout0_div * 1000));
728 	f = f & WZRD_CLKOUT_FRAC_MASK;
729 	f = f << WZRD_CLKOUT_DIVIDE_WIDTH;
730 
731 	value = (f  | (clockout0_div & WZRD_CLKOUT_DIVIDE_MASK));
732 
733 	/* Set divisor and clear phase offset */
734 	writel(value, div_addr);
735 	writel(0x0, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
736 
737 	/* Check status register */
738 	err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
739 				 value & WZRD_DR_LOCK_BIT_MASK,
740 				 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
741 	if (err)
742 		return err;
743 
744 	/* Initiate reconfiguration */
745 	writel(WZRD_DR_BEGIN_DYNA_RECONF_5_2,
746 	       divider->base + WZRD_DR_INIT_REG_OFFSET);
747 	writel(WZRD_DR_BEGIN_DYNA_RECONF1_5_2,
748 	       divider->base + WZRD_DR_INIT_REG_OFFSET);
749 
750 	/* Check status register */
751 	return readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
752 				value & WZRD_DR_LOCK_BIT_MASK,
753 				WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
754 }
755 
756 static long clk_wzrd_round_rate_f(struct clk_hw *hw, unsigned long rate,
757 				  unsigned long *prate)
758 {
759 	return rate;
760 }
761 
762 static const struct clk_ops clk_wzrd_clk_divider_ops_f = {
763 	.round_rate = clk_wzrd_round_rate_f,
764 	.set_rate = clk_wzrd_dynamic_reconfig_f,
765 	.recalc_rate = clk_wzrd_recalc_ratef,
766 };
767 
768 static struct clk *clk_wzrd_register_divf(struct device *dev,
769 					  const char *name,
770 					  const char *parent_name,
771 					  unsigned long flags,
772 					  void __iomem *base, u16 offset,
773 					  u8 shift, u8 width,
774 					  u8 clk_divider_flags,
775 					  u32 div_type,
776 					  spinlock_t *lock)
777 {
778 	struct clk_wzrd_divider *div;
779 	struct clk_hw *hw;
780 	struct clk_init_data init;
781 	int ret;
782 
783 	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
784 	if (!div)
785 		return ERR_PTR(-ENOMEM);
786 
787 	init.name = name;
788 
789 	init.ops = &clk_wzrd_clk_divider_ops_f;
790 
791 	init.flags = flags;
792 	init.parent_names = &parent_name;
793 	init.num_parents = 1;
794 
795 	div->base = base;
796 	div->offset = offset;
797 	div->shift = shift;
798 	div->width = width;
799 	div->flags = clk_divider_flags;
800 	div->lock = lock;
801 	div->hw.init = &init;
802 
803 	hw = &div->hw;
804 	ret =  devm_clk_hw_register(dev, hw);
805 	if (ret)
806 		return ERR_PTR(ret);
807 
808 	return hw->clk;
809 }
810 
811 static struct clk *clk_wzrd_ver_register_divider(struct device *dev,
812 						 const char *name,
813 						 const char *parent_name,
814 						 unsigned long flags,
815 						 void __iomem *base,
816 						 u16 offset,
817 						 u8 shift, u8 width,
818 						 u8 clk_divider_flags,
819 						 u32 div_type,
820 						 spinlock_t *lock)
821 {
822 	struct clk_wzrd_divider *div;
823 	struct clk_hw *hw;
824 	struct clk_init_data init;
825 	int ret;
826 
827 	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
828 	if (!div)
829 		return ERR_PTR(-ENOMEM);
830 
831 	init.name = name;
832 	if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
833 		init.ops = &clk_divider_ro_ops;
834 	else if (div_type == DIV_O)
835 		init.ops = &clk_wzrd_ver_divider_ops;
836 	else
837 		init.ops = &clk_wzrd_ver_div_all_ops;
838 	init.flags = flags;
839 	init.parent_names =  &parent_name;
840 	init.num_parents =  1;
841 
842 	div->base = base;
843 	div->offset = offset;
844 	div->shift = shift;
845 	div->width = width;
846 	div->flags = clk_divider_flags;
847 	div->lock = lock;
848 	div->hw.init = &init;
849 
850 	hw = &div->hw;
851 	ret = devm_clk_hw_register(dev, hw);
852 	if (ret)
853 		return ERR_PTR(ret);
854 
855 	return hw->clk;
856 }
857 
858 static struct clk *clk_wzrd_register_divider(struct device *dev,
859 					     const char *name,
860 					     const char *parent_name,
861 					     unsigned long flags,
862 					     void __iomem *base, u16 offset,
863 					     u8 shift, u8 width,
864 					     u8 clk_divider_flags,
865 					     u32 div_type,
866 					     spinlock_t *lock)
867 {
868 	struct clk_wzrd_divider *div;
869 	struct clk_hw *hw;
870 	struct clk_init_data init;
871 	int ret;
872 
873 	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
874 	if (!div)
875 		return ERR_PTR(-ENOMEM);
876 
877 	init.name = name;
878 	if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
879 		init.ops = &clk_divider_ro_ops;
880 	else if (div_type == DIV_O)
881 		init.ops = &clk_wzrd_clk_divider_ops;
882 	else
883 		init.ops = &clk_wzrd_clk_div_all_ops;
884 	init.flags = flags;
885 	init.parent_names =  &parent_name;
886 	init.num_parents =  1;
887 
888 	div->base = base;
889 	div->offset = offset;
890 	div->shift = shift;
891 	div->width = width;
892 	div->flags = clk_divider_flags;
893 	div->lock = lock;
894 	div->hw.init = &init;
895 
896 	hw = &div->hw;
897 	ret = devm_clk_hw_register(dev, hw);
898 	if (ret)
899 		return ERR_PTR(ret);
900 
901 	return hw->clk;
902 }
903 
904 static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
905 				 void *data)
906 {
907 	unsigned long max;
908 	struct clk_notifier_data *ndata = data;
909 	struct clk_wzrd *clk_wzrd = to_clk_wzrd(nb);
910 
911 	if (clk_wzrd->suspended)
912 		return NOTIFY_OK;
913 
914 	if (ndata->clk == clk_wzrd->clk_in1)
915 		max = clk_wzrd_max_freq[clk_wzrd->speed_grade - 1];
916 	else if (ndata->clk == clk_wzrd->axi_clk)
917 		max = WZRD_ACLK_MAX_FREQ;
918 	else
919 		return NOTIFY_DONE;	/* should never happen */
920 
921 	switch (event) {
922 	case PRE_RATE_CHANGE:
923 		if (ndata->new_rate > max)
924 			return NOTIFY_BAD;
925 		return NOTIFY_OK;
926 	case POST_RATE_CHANGE:
927 	case ABORT_RATE_CHANGE:
928 	default:
929 		return NOTIFY_DONE;
930 	}
931 }
932 
933 static int __maybe_unused clk_wzrd_suspend(struct device *dev)
934 {
935 	struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
936 
937 	clk_disable_unprepare(clk_wzrd->axi_clk);
938 	clk_wzrd->suspended = true;
939 
940 	return 0;
941 }
942 
943 static int __maybe_unused clk_wzrd_resume(struct device *dev)
944 {
945 	int ret;
946 	struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
947 
948 	ret = clk_prepare_enable(clk_wzrd->axi_clk);
949 	if (ret) {
950 		dev_err(dev, "unable to enable s_axi_aclk\n");
951 		return ret;
952 	}
953 
954 	clk_wzrd->suspended = false;
955 
956 	return 0;
957 }
958 
959 static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops, clk_wzrd_suspend,
960 			 clk_wzrd_resume);
961 
962 static const struct versal_clk_data versal_data = {
963 	.is_versal	= true,
964 };
965 
966 static int clk_wzrd_probe(struct platform_device *pdev)
967 {
968 	const char *clkout_name, *clk_name, *clk_mul_name;
969 	u32 regl, regh, edge, regld, reghd, edged, div;
970 	struct device_node *np = pdev->dev.of_node;
971 	const struct versal_clk_data *data;
972 	struct clk_wzrd *clk_wzrd;
973 	unsigned long flags = 0;
974 	void __iomem *ctrl_reg;
975 	u32 reg, reg_f, mult;
976 	bool is_versal = false;
977 	unsigned long rate;
978 	int nr_outputs;
979 	int i, ret;
980 
981 	clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL);
982 	if (!clk_wzrd)
983 		return -ENOMEM;
984 	platform_set_drvdata(pdev, clk_wzrd);
985 
986 	clk_wzrd->base = devm_platform_ioremap_resource(pdev, 0);
987 	if (IS_ERR(clk_wzrd->base))
988 		return PTR_ERR(clk_wzrd->base);
989 
990 	ret = of_property_read_u32(np, "xlnx,speed-grade", &clk_wzrd->speed_grade);
991 	if (!ret) {
992 		if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) {
993 			dev_warn(&pdev->dev, "invalid speed grade '%d'\n",
994 				 clk_wzrd->speed_grade);
995 			clk_wzrd->speed_grade = 0;
996 		}
997 	}
998 
999 	clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
1000 	if (IS_ERR(clk_wzrd->clk_in1))
1001 		return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->clk_in1),
1002 				     "clk_in1 not found\n");
1003 
1004 	clk_wzrd->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
1005 	if (IS_ERR(clk_wzrd->axi_clk))
1006 		return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->axi_clk),
1007 				     "s_axi_aclk not found\n");
1008 	ret = clk_prepare_enable(clk_wzrd->axi_clk);
1009 	if (ret) {
1010 		dev_err(&pdev->dev, "enabling s_axi_aclk failed\n");
1011 		return ret;
1012 	}
1013 	rate = clk_get_rate(clk_wzrd->axi_clk);
1014 	if (rate > WZRD_ACLK_MAX_FREQ) {
1015 		dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n",
1016 			rate);
1017 		ret = -EINVAL;
1018 		goto err_disable_clk;
1019 	}
1020 
1021 	data = device_get_match_data(&pdev->dev);
1022 	if (data)
1023 		is_versal = data->is_versal;
1024 
1025 	ret = of_property_read_u32(np, "xlnx,nr-outputs", &nr_outputs);
1026 	if (ret || nr_outputs > WZRD_NUM_OUTPUTS) {
1027 		ret = -EINVAL;
1028 		goto err_disable_clk;
1029 	}
1030 
1031 	clkout_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_out0", dev_name(&pdev->dev));
1032 	if (!clkout_name) {
1033 		ret = -ENOMEM;
1034 		goto err_disable_clk;
1035 	}
1036 
1037 	if (is_versal) {
1038 		if (nr_outputs == 1) {
1039 			clk_wzrd->clkout[0] = clk_wzrd_ver_register_divider
1040 				(&pdev->dev, clkout_name,
1041 				__clk_get_name(clk_wzrd->clk_in1), 0,
1042 				clk_wzrd->base, WZRD_CLK_CFG_REG(is_versal, 3),
1043 				WZRD_CLKOUT_DIVIDE_SHIFT,
1044 				WZRD_CLKOUT_DIVIDE_WIDTH,
1045 				CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
1046 				DIV_ALL, &clkwzrd_lock);
1047 
1048 			goto out;
1049 		}
1050 		/* register multiplier */
1051 		edge = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0)) &
1052 				BIT(8));
1053 		regl = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 1)) &
1054 			     WZRD_CLKFBOUT_L_MASK) >> WZRD_CLKFBOUT_L_SHIFT;
1055 		regh = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 1)) &
1056 			     WZRD_CLKFBOUT_H_MASK) >> WZRD_CLKFBOUT_H_SHIFT;
1057 		mult = regl + regh + edge;
1058 		if (!mult)
1059 			mult = 1;
1060 		mult = mult * WZRD_FRAC_GRADIENT;
1061 
1062 		regl = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 51)) &
1063 			     WZRD_CLKFBOUT_FRAC_EN;
1064 		if (regl) {
1065 			regl = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 48)) &
1066 				WZRD_VERSAL_FRAC_MASK;
1067 			mult = mult + regl;
1068 		}
1069 		div = 64;
1070 	} else {
1071 		if (nr_outputs == 1) {
1072 			clk_wzrd->clkout[0] = clk_wzrd_register_divider
1073 				(&pdev->dev, clkout_name,
1074 				__clk_get_name(clk_wzrd->clk_in1), 0,
1075 				clk_wzrd->base, WZRD_CLK_CFG_REG(is_versal, 3),
1076 				WZRD_CLKOUT_DIVIDE_SHIFT,
1077 				WZRD_CLKOUT_DIVIDE_WIDTH,
1078 				CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
1079 				DIV_ALL, &clkwzrd_lock);
1080 
1081 			goto out;
1082 		}
1083 		reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0));
1084 		reg_f = reg & WZRD_CLKFBOUT_FRAC_MASK;
1085 		reg_f =  reg_f >> WZRD_CLKFBOUT_FRAC_SHIFT;
1086 
1087 		reg = reg & WZRD_CLKFBOUT_MULT_MASK;
1088 		reg =  reg >> WZRD_CLKFBOUT_MULT_SHIFT;
1089 		mult = (reg * 1000) + reg_f;
1090 		div = 1000;
1091 	}
1092 	clk_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
1093 	if (!clk_name) {
1094 		ret = -ENOMEM;
1095 		goto err_disable_clk;
1096 	}
1097 	clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor
1098 			(&pdev->dev, clk_name,
1099 			 __clk_get_name(clk_wzrd->clk_in1),
1100 			0, mult, div);
1101 	if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
1102 		dev_err(&pdev->dev, "unable to register fixed-factor clock\n");
1103 		ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
1104 		goto err_disable_clk;
1105 	}
1106 
1107 	clk_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_mul_div", dev_name(&pdev->dev));
1108 	if (!clk_name) {
1109 		ret = -ENOMEM;
1110 		goto err_rm_int_clk;
1111 	}
1112 
1113 	if (is_versal) {
1114 		edged = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 20)) &
1115 			     BIT(10));
1116 		regld = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 21)) &
1117 			     WZRD_CLKFBOUT_L_MASK) >> WZRD_CLKFBOUT_L_SHIFT;
1118 		reghd = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 21)) &
1119 		     WZRD_CLKFBOUT_H_MASK) >> WZRD_CLKFBOUT_H_SHIFT;
1120 		div = (regld  + reghd + edged);
1121 		if (!div)
1122 			div = 1;
1123 
1124 		clk_mul_name = __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]);
1125 		clk_wzrd->clks_internal[wzrd_clk_mul_div] =
1126 			clk_register_fixed_factor(&pdev->dev, clk_name,
1127 						  clk_mul_name, 0, 1, div);
1128 	} else {
1129 		ctrl_reg = clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0);
1130 		clk_wzrd->clks_internal[wzrd_clk_mul_div] = clk_register_divider
1131 			(&pdev->dev, clk_name,
1132 			 __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
1133 			flags, ctrl_reg, 0, 8, CLK_DIVIDER_ONE_BASED |
1134 			CLK_DIVIDER_ALLOW_ZERO, &clkwzrd_lock);
1135 	}
1136 	if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) {
1137 		dev_err(&pdev->dev, "unable to register divider clock\n");
1138 		ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
1139 		goto err_rm_int_clk;
1140 	}
1141 
1142 	/* register div per output */
1143 	for (i = nr_outputs - 1; i >= 0 ; i--) {
1144 		clkout_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
1145 					     "%s_out%d", dev_name(&pdev->dev), i);
1146 		if (!clkout_name) {
1147 			ret = -ENOMEM;
1148 			goto err_rm_int_clk;
1149 		}
1150 
1151 		if (is_versal) {
1152 			clk_wzrd->clkout[i] = clk_wzrd_ver_register_divider
1153 						(&pdev->dev,
1154 						 clkout_name, clk_name, 0,
1155 						 clk_wzrd->base,
1156 						 (WZRD_CLK_CFG_REG(is_versal, 3) + i * 8),
1157 						 WZRD_CLKOUT_DIVIDE_SHIFT,
1158 						 WZRD_CLKOUT_DIVIDE_WIDTH,
1159 						 CLK_DIVIDER_ONE_BASED |
1160 						 CLK_DIVIDER_ALLOW_ZERO,
1161 						 DIV_O, &clkwzrd_lock);
1162 		} else {
1163 			if (!i)
1164 				clk_wzrd->clkout[i] = clk_wzrd_register_divf
1165 					(&pdev->dev, clkout_name, clk_name, flags, clk_wzrd->base,
1166 					(WZRD_CLK_CFG_REG(is_versal, 2) + i * 12),
1167 					WZRD_CLKOUT_DIVIDE_SHIFT,
1168 					WZRD_CLKOUT_DIVIDE_WIDTH,
1169 					CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
1170 					DIV_O, &clkwzrd_lock);
1171 			else
1172 				clk_wzrd->clkout[i] = clk_wzrd_register_divider
1173 					(&pdev->dev, clkout_name, clk_name, 0, clk_wzrd->base,
1174 					(WZRD_CLK_CFG_REG(is_versal, 2) + i * 12),
1175 					WZRD_CLKOUT_DIVIDE_SHIFT,
1176 					WZRD_CLKOUT_DIVIDE_WIDTH,
1177 					CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
1178 					DIV_O, &clkwzrd_lock);
1179 		}
1180 		if (IS_ERR(clk_wzrd->clkout[i])) {
1181 			int j;
1182 
1183 			for (j = i + 1; j < nr_outputs; j++)
1184 				clk_unregister(clk_wzrd->clkout[j]);
1185 			dev_err(&pdev->dev,
1186 				"unable to register divider clock\n");
1187 			ret = PTR_ERR(clk_wzrd->clkout[i]);
1188 			goto err_rm_int_clks;
1189 		}
1190 	}
1191 
1192 out:
1193 	clk_wzrd->clk_data.clks = clk_wzrd->clkout;
1194 	clk_wzrd->clk_data.clk_num = ARRAY_SIZE(clk_wzrd->clkout);
1195 	of_clk_add_provider(np, of_clk_src_onecell_get, &clk_wzrd->clk_data);
1196 
1197 	if (clk_wzrd->speed_grade) {
1198 		clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier;
1199 
1200 		ret = clk_notifier_register(clk_wzrd->clk_in1,
1201 					    &clk_wzrd->nb);
1202 		if (ret)
1203 			dev_warn(&pdev->dev,
1204 				 "unable to register clock notifier\n");
1205 
1206 		ret = clk_notifier_register(clk_wzrd->axi_clk, &clk_wzrd->nb);
1207 		if (ret)
1208 			dev_warn(&pdev->dev,
1209 				 "unable to register clock notifier\n");
1210 	}
1211 
1212 	return 0;
1213 
1214 err_rm_int_clks:
1215 	clk_unregister(clk_wzrd->clks_internal[1]);
1216 err_rm_int_clk:
1217 	clk_unregister(clk_wzrd->clks_internal[0]);
1218 err_disable_clk:
1219 	clk_disable_unprepare(clk_wzrd->axi_clk);
1220 
1221 	return ret;
1222 }
1223 
1224 static void clk_wzrd_remove(struct platform_device *pdev)
1225 {
1226 	int i;
1227 	struct clk_wzrd *clk_wzrd = platform_get_drvdata(pdev);
1228 
1229 	of_clk_del_provider(pdev->dev.of_node);
1230 
1231 	for (i = 0; i < WZRD_NUM_OUTPUTS; i++)
1232 		clk_unregister(clk_wzrd->clkout[i]);
1233 	for (i = 0; i < wzrd_clk_int_max; i++)
1234 		clk_unregister(clk_wzrd->clks_internal[i]);
1235 
1236 	if (clk_wzrd->speed_grade) {
1237 		clk_notifier_unregister(clk_wzrd->axi_clk, &clk_wzrd->nb);
1238 		clk_notifier_unregister(clk_wzrd->clk_in1, &clk_wzrd->nb);
1239 	}
1240 
1241 	clk_disable_unprepare(clk_wzrd->axi_clk);
1242 }
1243 
1244 static const struct of_device_id clk_wzrd_ids[] = {
1245 	{ .compatible = "xlnx,versal-clk-wizard", .data = &versal_data },
1246 	{ .compatible = "xlnx,clocking-wizard"   },
1247 	{ .compatible = "xlnx,clocking-wizard-v5.2"   },
1248 	{ .compatible = "xlnx,clocking-wizard-v6.0"  },
1249 	{ },
1250 };
1251 MODULE_DEVICE_TABLE(of, clk_wzrd_ids);
1252 
1253 static struct platform_driver clk_wzrd_driver = {
1254 	.driver = {
1255 		.name = "clk-wizard",
1256 		.of_match_table = clk_wzrd_ids,
1257 		.pm = &clk_wzrd_dev_pm_ops,
1258 	},
1259 	.probe = clk_wzrd_probe,
1260 	.remove = clk_wzrd_remove,
1261 };
1262 module_platform_driver(clk_wzrd_driver);
1263 
1264 MODULE_LICENSE("GPL");
1265 MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com");
1266 MODULE_DESCRIPTION("Driver for the Xilinx Clocking Wizard IP core");
1267