xref: /linux/drivers/clk/xilinx/clk-xlnx-clock-wizard.c (revision 9f3a2ba62c7226a6604b8aaeb92b5ff906fa4e6b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Xilinx 'Clocking Wizard' driver
4  *
5  *  Copyright (C) 2013 - 2021 Xilinx
6  *
7  *  Sören Brinkmann <soren.brinkmann@xilinx.com>
8  *
9  */
10 
11 #include <linux/bitfield.h>
12 #include <linux/platform_device.h>
13 #include <linux/clk.h>
14 #include <linux/clk-provider.h>
15 #include <linux/slab.h>
16 #include <linux/io.h>
17 #include <linux/of.h>
18 #include <linux/math64.h>
19 #include <linux/module.h>
20 #include <linux/overflow.h>
21 #include <linux/err.h>
22 #include <linux/iopoll.h>
23 
24 #define WZRD_NUM_OUTPUTS	7
25 #define WZRD_ACLK_MAX_FREQ	250000000UL
26 
27 #define WZRD_CLK_CFG_REG(v, n)	(0x200 + 0x130 * (v) + 4 * (n))
28 
29 #define WZRD_CLKOUT0_FRAC_EN	BIT(18)
30 #define WZRD_CLKFBOUT_1		0
31 #define WZRD_CLKFBOUT_2		1
32 #define WZRD_CLKOUT0_1		2
33 #define WZRD_CLKOUT0_2		3
34 #define WZRD_DESKEW_2		20
35 #define WZRD_DIVCLK		21
36 #define WZRD_CLKFBOUT_4		51
37 #define WZRD_CLKFBOUT_3		48
38 #define WZRD_DUTY_CYCLE		2
39 #define WZRD_O_DIV		4
40 
41 #define WZRD_CLKFBOUT_FRAC_EN	BIT(1)
42 #define WZRD_CLKFBOUT_PREDIV2	(BIT(11) | BIT(12) | BIT(9))
43 #define WZRD_MULT_PREDIV2	(BIT(10) | BIT(9) | BIT(12))
44 #define WZRD_CLKFBOUT_EDGE	BIT(8)
45 #define WZRD_P5EN		BIT(13)
46 #define WZRD_P5EN_SHIFT		13
47 #define WZRD_P5FEDGE		BIT(15)
48 #define WZRD_DIVCLK_EDGE	BIT(10)
49 #define WZRD_P5FEDGE_SHIFT	15
50 #define WZRD_CLKOUT0_PREDIV2	BIT(11)
51 #define WZRD_EDGE_SHIFT		8
52 
53 #define WZRD_CLKFBOUT_MULT_SHIFT	8
54 #define WZRD_CLKFBOUT_MULT_MASK		(0xff << WZRD_CLKFBOUT_MULT_SHIFT)
55 #define WZRD_CLKFBOUT_L_SHIFT	0
56 #define WZRD_CLKFBOUT_H_SHIFT	8
57 #define WZRD_CLKFBOUT_L_MASK	GENMASK(7, 0)
58 #define WZRD_CLKFBOUT_H_MASK	GENMASK(15, 8)
59 #define WZRD_CLKFBOUT_FRAC_SHIFT	16
60 #define WZRD_CLKFBOUT_FRAC_MASK		(0x3ff << WZRD_CLKFBOUT_FRAC_SHIFT)
61 #define WZRD_VERSAL_FRAC_MASK		GENMASK(5, 0)
62 #define WZRD_DIVCLK_DIVIDE_SHIFT	0
63 #define WZRD_DIVCLK_DIVIDE_MASK		(0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
64 #define WZRD_CLKOUT_DIVIDE_SHIFT	0
65 #define WZRD_CLKOUT_DIVIDE_WIDTH	8
66 #define WZRD_CLKOUT_DIVIDE_MASK		(0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
67 #define WZRD_CLKOUT_FRAC_SHIFT		8
68 #define WZRD_CLKOUT_FRAC_MASK		0x3ff
69 #define WZRD_CLKOUT0_FRAC_MASK		GENMASK(17, 8)
70 
71 #define WZRD_DR_MAX_INT_DIV_VALUE	255
72 #define WZRD_DR_STATUS_REG_OFFSET	0x04
73 #define WZRD_DR_LOCK_BIT_MASK		0x00000001
74 #define WZRD_DR_INIT_REG_OFFSET		0x25C
75 #define WZRD_DR_INIT_VERSAL_OFFSET	0x14
76 #define WZRD_DR_DIV_TO_PHASE_OFFSET	4
77 #define WZRD_DR_BEGIN_DYNA_RECONF	0x03
78 #define WZRD_DR_BEGIN_DYNA_RECONF_5_2	0x07
79 #define WZRD_DR_BEGIN_DYNA_RECONF1_5_2	0x02
80 
81 #define WZRD_USEC_POLL		10
82 #define WZRD_TIMEOUT_POLL		1000
83 #define WZRD_FRAC_GRADIENT		64
84 #define PREDIV2_MULT			2
85 
86 /* Divider limits, from UG572 Table 3-4 for Ultrascale+ */
87 #define DIV_O				0x01
88 #define DIV_ALL				0x03
89 
90 #define WZRD_M_MIN			2
91 #define WZRD_M_MAX			128
92 #define WZRD_D_MIN			1
93 #define WZRD_D_MAX			106
94 #define WZRD_VCO_MIN			800000000
95 #define WZRD_VCO_MAX			1600000000
96 #define WZRD_O_MIN			1
97 #define WZRD_O_MAX			128
98 #define VER_WZRD_M_MIN			4
99 #define VER_WZRD_M_MAX			432
100 #define VER_WZRD_D_MIN			1
101 #define VER_WZRD_D_MAX			123
102 #define VER_WZRD_VCO_MIN		2160000000ULL
103 #define VER_WZRD_VCO_MAX		4320000000ULL
104 #define VER_WZRD_O_MIN			2
105 #define VER_WZRD_O_MAX			511
106 #define WZRD_MIN_ERR			20000
107 #define WZRD_FRAC_POINTS		1000
108 
109 /* Get the mask from width */
110 #define div_mask(width)			((1 << (width)) - 1)
111 
112 /* Extract divider instance from clock hardware instance */
113 #define to_clk_wzrd_divider(_hw) container_of(_hw, struct clk_wzrd_divider, hw)
114 
115 enum clk_wzrd_int_clks {
116 	wzrd_clk_mul,
117 	wzrd_clk_mul_div,
118 	wzrd_clk_mul_frac,
119 	wzrd_clk_int_max
120 };
121 
122 /**
123  * struct clk_wzrd - Clock wizard private data structure
124  *
125  * @nb:			Notifier block
126  * @base:		Memory base
127  * @clk_in1:		Handle to input clock 'clk_in1'
128  * @axi_clk:		Handle to input clock 's_axi_aclk'
129  * @clks_internal:	Internal clocks
130  * @speed_grade:	Speed grade of the device
131  * @suspended:		Flag indicating power state of the device
132  * @clk_data:		Output clock data
133  */
134 struct clk_wzrd {
135 	struct notifier_block nb;
136 	void __iomem *base;
137 	struct clk *clk_in1;
138 	struct clk *axi_clk;
139 	struct clk_hw *clks_internal[wzrd_clk_int_max];
140 	unsigned int speed_grade;
141 	bool suspended;
142 	struct clk_hw_onecell_data clk_data;
143 };
144 
145 /**
146  * struct clk_wzrd_divider - clock divider specific to clk_wzrd
147  *
148  * @hw:		handle between common and hardware-specific interfaces
149  * @base:	base address of register containing the divider
150  * @offset:	offset address of register containing the divider
151  * @shift:	shift to the divider bit field
152  * @width:	width of the divider bit field
153  * @flags:	clk_wzrd divider flags
154  * @table:	array of value/divider pairs, last entry should have div = 0
155  * @m:	value of the multiplier
156  * @d:	value of the common divider
157  * @o:	value of the leaf divider
158  * @lock:	register lock
159  */
160 struct clk_wzrd_divider {
161 	struct clk_hw hw;
162 	void __iomem *base;
163 	u16 offset;
164 	u8 shift;
165 	u8 width;
166 	u8 flags;
167 	const struct clk_div_table *table;
168 	u32 m;
169 	u32 d;
170 	u32 o;
171 	spinlock_t *lock;  /* divider lock */
172 };
173 
174 struct versal_clk_data {
175 	bool is_versal;
176 };
177 
178 #define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
179 
180 /* maximum frequencies for input/output clocks per speed grade */
181 static const unsigned long clk_wzrd_max_freq[] = {
182 	800000000UL,
183 	933000000UL,
184 	1066000000UL
185 };
186 
187 /* spin lock variable for clk_wzrd */
188 static DEFINE_SPINLOCK(clkwzrd_lock);
189 
clk_wzrd_recalc_rate_ver(struct clk_hw * hw,unsigned long parent_rate)190 static unsigned long clk_wzrd_recalc_rate_ver(struct clk_hw *hw,
191 					      unsigned long parent_rate)
192 {
193 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
194 	void __iomem *div_addr = divider->base + divider->offset;
195 	u32 div, p5en, edge, prediv2, all;
196 	unsigned int vall, valh;
197 
198 	edge = !!(readl(div_addr) & WZRD_CLKFBOUT_EDGE);
199 	p5en = !!(readl(div_addr) & WZRD_P5EN);
200 	prediv2 = !!(readl(div_addr) & WZRD_CLKOUT0_PREDIV2);
201 	vall = readl(div_addr + 4) & WZRD_CLKFBOUT_L_MASK;
202 	valh = readl(div_addr + 4) >> WZRD_CLKFBOUT_H_SHIFT;
203 	all = valh + vall + edge;
204 	if (!all)
205 		all = 1;
206 
207 	if (prediv2)
208 		div = 2 * all + prediv2 * p5en;
209 	else
210 		div = all;
211 
212 	return DIV_ROUND_UP_ULL((u64)parent_rate, div);
213 }
214 
clk_wzrd_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)215 static unsigned long clk_wzrd_recalc_rate(struct clk_hw *hw,
216 					  unsigned long parent_rate)
217 {
218 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
219 	void __iomem *div_addr = divider->base + divider->offset;
220 	unsigned int val;
221 
222 	val = readl(div_addr) >> divider->shift;
223 	val &= div_mask(divider->width);
224 
225 	return divider_recalc_rate(hw, parent_rate, val, divider->table,
226 			divider->flags, divider->width);
227 }
228 
clk_wzrd_ver_dynamic_reconfig(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)229 static int clk_wzrd_ver_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
230 					 unsigned long parent_rate)
231 {
232 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
233 	void __iomem *div_addr = divider->base + divider->offset;
234 	u32 value, regh, edged, p5en, p5fedge, regval, regval1;
235 	unsigned long flags;
236 	int err;
237 
238 	spin_lock_irqsave(divider->lock, flags);
239 
240 	value = DIV_ROUND_CLOSEST(parent_rate, rate);
241 
242 	regh = (value / 4);
243 	regval1 = readl(div_addr);
244 	regval1 |= WZRD_CLKFBOUT_PREDIV2;
245 	regval1 = regval1 & ~(WZRD_CLKFBOUT_EDGE | WZRD_P5EN | WZRD_P5FEDGE);
246 	if (value % 4 > 1) {
247 		edged = 1;
248 		regval1 |= (edged << WZRD_EDGE_SHIFT);
249 	}
250 	p5fedge = value % 2;
251 	p5en = value % 2;
252 	regval1 = regval1 | p5en << WZRD_P5EN_SHIFT | p5fedge << WZRD_P5FEDGE_SHIFT;
253 	writel(regval1, div_addr);
254 
255 	regval = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
256 	writel(regval, div_addr + 4);
257 	/* Check status register */
258 	err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
259 					value, value & WZRD_DR_LOCK_BIT_MASK,
260 					WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
261 	if (err)
262 		goto err_reconfig;
263 
264 	/* Initiate reconfiguration */
265 	writel(WZRD_DR_BEGIN_DYNA_RECONF,
266 	       divider->base + WZRD_DR_INIT_VERSAL_OFFSET);
267 
268 	/* Check status register */
269 	err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
270 					value, value & WZRD_DR_LOCK_BIT_MASK,
271 					WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
272 err_reconfig:
273 	spin_unlock_irqrestore(divider->lock, flags);
274 	return err;
275 }
276 
clk_wzrd_dynamic_reconfig(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)277 static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
278 				     unsigned long parent_rate)
279 {
280 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
281 	void __iomem *div_addr = divider->base + divider->offset;
282 	unsigned long flags;
283 	u32 value;
284 	int err;
285 
286 	spin_lock_irqsave(divider->lock, flags);
287 
288 	value = DIV_ROUND_CLOSEST(parent_rate, rate);
289 
290 	/* Cap the value to max */
291 	min_t(u32, value, WZRD_DR_MAX_INT_DIV_VALUE);
292 
293 	/* Set divisor and clear phase offset */
294 	writel(value, div_addr);
295 	writel(0x00, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
296 
297 	/* Check status register */
298 	err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
299 					value, value & WZRD_DR_LOCK_BIT_MASK,
300 					WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
301 	if (err)
302 		goto err_reconfig;
303 
304 	/* Initiate reconfiguration */
305 	writel(WZRD_DR_BEGIN_DYNA_RECONF_5_2,
306 	       divider->base + WZRD_DR_INIT_REG_OFFSET);
307 	writel(WZRD_DR_BEGIN_DYNA_RECONF1_5_2,
308 	       divider->base + WZRD_DR_INIT_REG_OFFSET);
309 
310 	/* Check status register */
311 	err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET,
312 					value, value & WZRD_DR_LOCK_BIT_MASK,
313 					WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
314 err_reconfig:
315 	spin_unlock_irqrestore(divider->lock, flags);
316 	return err;
317 }
318 
clk_wzrd_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)319 static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
320 				unsigned long *prate)
321 {
322 	u8 div;
323 
324 	/*
325 	 * since we don't change parent rate we just round rate to closest
326 	 * achievable
327 	 */
328 	div = DIV_ROUND_CLOSEST(*prate, rate);
329 
330 	return *prate / div;
331 }
332 
clk_wzrd_get_divisors_ver(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)333 static int clk_wzrd_get_divisors_ver(struct clk_hw *hw, unsigned long rate,
334 				     unsigned long parent_rate)
335 {
336 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
337 	u64 vco_freq, freq, diff, vcomin, vcomax;
338 	u32 m, d, o;
339 	u32 mmin, mmax, dmin, dmax, omin, omax;
340 
341 	mmin = VER_WZRD_M_MIN;
342 	mmax = VER_WZRD_M_MAX;
343 	dmin = VER_WZRD_D_MIN;
344 	dmax = VER_WZRD_D_MAX;
345 	omin = VER_WZRD_O_MIN;
346 	omax = VER_WZRD_O_MAX;
347 	vcomin = VER_WZRD_VCO_MIN;
348 	vcomax = VER_WZRD_VCO_MAX;
349 
350 	for (m = mmin; m <= mmax; m++) {
351 		for (d = dmin; d <= dmax; d++) {
352 			vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
353 			if (vco_freq >= vcomin && vco_freq <= vcomax) {
354 				for (o = omin; o <= omax; o++) {
355 					freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
356 					diff = abs(freq - rate);
357 
358 					if (diff < WZRD_MIN_ERR) {
359 						divider->m = m;
360 						divider->d = d;
361 						divider->o = o;
362 						return 0;
363 					}
364 				}
365 			}
366 		}
367 	}
368 	return -EBUSY;
369 }
370 
clk_wzrd_get_divisors(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)371 static int clk_wzrd_get_divisors(struct clk_hw *hw, unsigned long rate,
372 				 unsigned long parent_rate)
373 {
374 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
375 	u64 vco_freq, freq, diff, vcomin, vcomax;
376 	u32 m, d, o;
377 	u32 mmin, mmax, dmin, dmax, omin, omax;
378 
379 	mmin = WZRD_M_MIN;
380 	mmax = WZRD_M_MAX;
381 	dmin = WZRD_D_MIN;
382 	dmax = WZRD_D_MAX;
383 	omin = WZRD_O_MIN;
384 	omax = WZRD_O_MAX;
385 	vcomin = WZRD_VCO_MIN;
386 	vcomax = WZRD_VCO_MAX;
387 
388 	for (m = mmin; m <= mmax; m++) {
389 		for (d = dmin; d <= dmax; d++) {
390 			vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
391 			if (vco_freq >= vcomin && vco_freq <= vcomax) {
392 				for (o = omin; o <= omax; o++) {
393 					freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
394 					diff = abs(freq - rate);
395 
396 					if (diff < WZRD_MIN_ERR) {
397 						divider->m = m;
398 						divider->d = d;
399 						divider->o = o;
400 						return 0;
401 					}
402 				}
403 			}
404 		}
405 	}
406 	return -EBUSY;
407 }
408 
clk_wzrd_reconfig(struct clk_wzrd_divider * divider,void __iomem * div_addr)409 static int clk_wzrd_reconfig(struct clk_wzrd_divider *divider, void __iomem *div_addr)
410 {
411 	u32 value;
412 	int err;
413 
414 	/* Check status register */
415 	err = readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
416 					value & WZRD_DR_LOCK_BIT_MASK,
417 					WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
418 	if (err)
419 		return -ETIMEDOUT;
420 
421 	/* Initiate reconfiguration */
422 	writel(WZRD_DR_BEGIN_DYNA_RECONF, div_addr);
423 	/* Check status register */
424 	return readl_poll_timeout_atomic(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
425 				 value & WZRD_DR_LOCK_BIT_MASK,
426 				 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
427 }
428 
clk_wzrd_dynamic_ver_all_nolock(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)429 static int clk_wzrd_dynamic_ver_all_nolock(struct clk_hw *hw, unsigned long rate,
430 					   unsigned long parent_rate)
431 {
432 	u32 regh, edged, p5en, p5fedge, value2, m, regval, regval1, value;
433 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
434 	void __iomem *div_addr;
435 	int err;
436 
437 	err = clk_wzrd_get_divisors_ver(hw, rate, parent_rate);
438 	if (err)
439 		return err;
440 
441 	writel(0, divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_4));
442 
443 	m = divider->m;
444 	edged = m % WZRD_DUTY_CYCLE;
445 	regh = m / WZRD_DUTY_CYCLE;
446 	regval1 = readl(divider->base + WZRD_CLK_CFG_REG(1,
447 							 WZRD_CLKFBOUT_1));
448 	regval1 |= WZRD_MULT_PREDIV2;
449 	if (edged)
450 		regval1 = regval1 | WZRD_CLKFBOUT_EDGE;
451 	else
452 		regval1 = regval1 & ~WZRD_CLKFBOUT_EDGE;
453 
454 	writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
455 							 WZRD_CLKFBOUT_1));
456 	regval1 = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
457 	writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
458 							 WZRD_CLKFBOUT_2));
459 
460 	value2 = divider->d;
461 	edged = value2 % WZRD_DUTY_CYCLE;
462 	regh = (value2 / WZRD_DUTY_CYCLE);
463 	regval1 = FIELD_PREP(WZRD_DIVCLK_EDGE, edged);
464 	writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
465 							 WZRD_DESKEW_2));
466 	regval1 = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
467 	writel(regval1, divider->base + WZRD_CLK_CFG_REG(1, WZRD_DIVCLK));
468 
469 	value = divider->o;
470 	regh = value / WZRD_O_DIV;
471 	regval1 = readl(divider->base + WZRD_CLK_CFG_REG(1,
472 							 WZRD_CLKOUT0_1));
473 	regval1 |= WZRD_CLKFBOUT_PREDIV2;
474 	regval1 = regval1 & ~(WZRD_CLKFBOUT_EDGE | WZRD_P5EN | WZRD_P5FEDGE);
475 
476 	if (value % WZRD_O_DIV > 1) {
477 		edged = 1;
478 		regval1 |= edged << WZRD_CLKFBOUT_H_SHIFT;
479 	}
480 
481 	p5fedge = value % WZRD_DUTY_CYCLE;
482 	p5en = value % WZRD_DUTY_CYCLE;
483 
484 	regval1 = regval1 | FIELD_PREP(WZRD_P5EN, p5en) | FIELD_PREP(WZRD_P5FEDGE, p5fedge);
485 	writel(regval1, divider->base + WZRD_CLK_CFG_REG(1,
486 							 WZRD_CLKOUT0_1));
487 	regval = regh | regh << WZRD_CLKFBOUT_H_SHIFT;
488 	writel(regval, divider->base + WZRD_CLK_CFG_REG(1,
489 							WZRD_CLKOUT0_2));
490 	div_addr = divider->base + WZRD_DR_INIT_VERSAL_OFFSET;
491 
492 	return clk_wzrd_reconfig(divider, div_addr);
493 }
494 
clk_wzrd_dynamic_all_nolock(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)495 static int clk_wzrd_dynamic_all_nolock(struct clk_hw *hw, unsigned long rate,
496 				       unsigned long parent_rate)
497 {
498 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
499 	unsigned long vco_freq, rate_div, clockout0_div;
500 	void __iomem *div_addr;
501 	u32 reg, pre, f;
502 	int err;
503 
504 	err = clk_wzrd_get_divisors(hw, rate, parent_rate);
505 	if (err)
506 		return err;
507 
508 	vco_freq = DIV_ROUND_CLOSEST(parent_rate * divider->m, divider->d);
509 	rate_div = DIV_ROUND_CLOSEST_ULL((vco_freq * WZRD_FRAC_POINTS), rate);
510 
511 	clockout0_div = div_u64(rate_div,  WZRD_FRAC_POINTS);
512 
513 	pre = DIV_ROUND_CLOSEST_ULL(vco_freq * WZRD_FRAC_POINTS, rate);
514 	f = (pre - (clockout0_div * WZRD_FRAC_POINTS));
515 	f &= WZRD_CLKOUT_FRAC_MASK;
516 
517 	reg = FIELD_PREP(WZRD_CLKOUT_DIVIDE_MASK, clockout0_div) |
518 	      FIELD_PREP(WZRD_CLKOUT0_FRAC_MASK, f);
519 
520 	writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 2));
521 	/* Set divisor and clear phase offset */
522 	reg = FIELD_PREP(WZRD_CLKFBOUT_MULT_MASK, divider->m) |
523 	      FIELD_PREP(WZRD_DIVCLK_DIVIDE_MASK, divider->d);
524 	writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 0));
525 	writel(divider->o, divider->base + WZRD_CLK_CFG_REG(0, 2));
526 	writel(0, divider->base + WZRD_CLK_CFG_REG(0, 3));
527 	div_addr = divider->base + WZRD_DR_INIT_REG_OFFSET;
528 	return clk_wzrd_reconfig(divider, div_addr);
529 }
530 
clk_wzrd_dynamic_all(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)531 static int clk_wzrd_dynamic_all(struct clk_hw *hw, unsigned long rate,
532 				unsigned long parent_rate)
533 {
534 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
535 	unsigned long flags;
536 	int ret;
537 
538 	spin_lock_irqsave(divider->lock, flags);
539 
540 	ret = clk_wzrd_dynamic_all_nolock(hw, rate, parent_rate);
541 
542 	spin_unlock_irqrestore(divider->lock, flags);
543 
544 	return ret;
545 }
546 
clk_wzrd_dynamic_all_ver(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)547 static int clk_wzrd_dynamic_all_ver(struct clk_hw *hw, unsigned long rate,
548 				    unsigned long parent_rate)
549 {
550 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
551 	unsigned long flags;
552 	int ret;
553 
554 	spin_lock_irqsave(divider->lock, flags);
555 
556 	ret = clk_wzrd_dynamic_ver_all_nolock(hw, rate, parent_rate);
557 
558 	spin_unlock_irqrestore(divider->lock, flags);
559 
560 	return ret;
561 }
562 
clk_wzrd_recalc_rate_all(struct clk_hw * hw,unsigned long parent_rate)563 static unsigned long clk_wzrd_recalc_rate_all(struct clk_hw *hw,
564 					      unsigned long parent_rate)
565 {
566 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
567 	u32 m, d, o, div, reg, f;
568 
569 	reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 0));
570 	d = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
571 	m = FIELD_GET(WZRD_CLKFBOUT_MULT_MASK, reg);
572 	reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 2));
573 	o = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
574 	f = FIELD_GET(WZRD_CLKOUT0_FRAC_MASK, reg);
575 
576 	div = DIV_ROUND_CLOSEST(d * (WZRD_FRAC_POINTS * o + f), WZRD_FRAC_POINTS);
577 	return divider_recalc_rate(hw, parent_rate * m, div, divider->table,
578 		divider->flags, divider->width);
579 }
580 
clk_wzrd_recalc_rate_all_ver(struct clk_hw * hw,unsigned long parent_rate)581 static unsigned long clk_wzrd_recalc_rate_all_ver(struct clk_hw *hw,
582 						  unsigned long parent_rate)
583 {
584 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
585 	u32 edged, div2, p5en, edge, prediv2, all, regl, regh, mult;
586 	u32 div, reg;
587 
588 	edge = !!(readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_1)) &
589 			WZRD_CLKFBOUT_EDGE);
590 
591 	reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_2));
592 	regl = FIELD_GET(WZRD_CLKFBOUT_L_MASK, reg);
593 	regh = FIELD_GET(WZRD_CLKFBOUT_H_MASK, reg);
594 
595 	mult = regl + regh + edge;
596 	if (!mult)
597 		mult = 1;
598 
599 	regl = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_4)) &
600 		     WZRD_CLKFBOUT_FRAC_EN;
601 	if (regl) {
602 		regl = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKFBOUT_3))
603 				& WZRD_VERSAL_FRAC_MASK;
604 		mult = mult * WZRD_FRAC_GRADIENT + regl;
605 		parent_rate = DIV_ROUND_CLOSEST((parent_rate * mult), WZRD_FRAC_GRADIENT);
606 	} else {
607 		parent_rate = parent_rate * mult;
608 	}
609 
610 	/* O Calculation */
611 	reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKOUT0_1));
612 	edged = FIELD_GET(WZRD_CLKFBOUT_EDGE, reg);
613 	p5en = FIELD_GET(WZRD_P5EN, reg);
614 	prediv2 = FIELD_GET(WZRD_CLKOUT0_PREDIV2, reg);
615 
616 	reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_CLKOUT0_2));
617 	/* Low time */
618 	regl = FIELD_GET(WZRD_CLKFBOUT_L_MASK, reg);
619 	/* High time */
620 	regh = FIELD_GET(WZRD_CLKFBOUT_H_MASK, reg);
621 	all = regh + regl + edged;
622 	if (!all)
623 		all = 1;
624 
625 	if (prediv2)
626 		div2 = PREDIV2_MULT * all + p5en;
627 	else
628 		div2 = all;
629 
630 	/* D calculation */
631 	edged = !!(readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_DESKEW_2)) &
632 		     WZRD_DIVCLK_EDGE);
633 	reg = readl(divider->base + WZRD_CLK_CFG_REG(1, WZRD_DIVCLK));
634 	/* Low time */
635 	regl = FIELD_GET(WZRD_CLKFBOUT_L_MASK, reg);
636 	/* High time */
637 	regh = FIELD_GET(WZRD_CLKFBOUT_H_MASK, reg);
638 	div = regl + regh + edged;
639 	if (!div)
640 		div = 1;
641 
642 	div = div * div2;
643 	return divider_recalc_rate(hw, parent_rate, div, divider->table,
644 			divider->flags, divider->width);
645 }
646 
clk_wzrd_round_rate_all(struct clk_hw * hw,unsigned long rate,unsigned long * prate)647 static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate,
648 				    unsigned long *prate)
649 {
650 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
651 	unsigned long int_freq;
652 	u32 m, d, o, div, f;
653 	int err;
654 
655 	err = clk_wzrd_get_divisors(hw, rate, *prate);
656 	if (err)
657 		return err;
658 
659 	m = divider->m;
660 	d = divider->d;
661 	o = divider->o;
662 
663 	div = d * o;
664 	int_freq =  divider_recalc_rate(hw, *prate * m, div, divider->table,
665 					divider->flags, divider->width);
666 
667 	if (rate > int_freq) {
668 		f = DIV_ROUND_CLOSEST_ULL(rate * WZRD_FRAC_POINTS, int_freq);
669 		rate = DIV_ROUND_CLOSEST(int_freq * f, WZRD_FRAC_POINTS);
670 	}
671 	return rate;
672 }
673 
674 static const struct clk_ops clk_wzrd_ver_divider_ops = {
675 	.round_rate = clk_wzrd_round_rate,
676 	.set_rate = clk_wzrd_ver_dynamic_reconfig,
677 	.recalc_rate = clk_wzrd_recalc_rate_ver,
678 };
679 
680 static const struct clk_ops clk_wzrd_ver_div_all_ops = {
681 	.round_rate = clk_wzrd_round_rate_all,
682 	.set_rate = clk_wzrd_dynamic_all_ver,
683 	.recalc_rate = clk_wzrd_recalc_rate_all_ver,
684 };
685 
686 static const struct clk_ops clk_wzrd_clk_divider_ops = {
687 	.round_rate = clk_wzrd_round_rate,
688 	.set_rate = clk_wzrd_dynamic_reconfig,
689 	.recalc_rate = clk_wzrd_recalc_rate,
690 };
691 
692 static const struct clk_ops clk_wzrd_clk_div_all_ops = {
693 	.round_rate = clk_wzrd_round_rate_all,
694 	.set_rate = clk_wzrd_dynamic_all,
695 	.recalc_rate = clk_wzrd_recalc_rate_all,
696 };
697 
clk_wzrd_recalc_ratef(struct clk_hw * hw,unsigned long parent_rate)698 static unsigned long clk_wzrd_recalc_ratef(struct clk_hw *hw,
699 					   unsigned long parent_rate)
700 {
701 	unsigned int val;
702 	u32 div, frac;
703 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
704 	void __iomem *div_addr = divider->base + divider->offset;
705 
706 	val = readl(div_addr);
707 	div = val & div_mask(divider->width);
708 	frac = (val >> WZRD_CLKOUT_FRAC_SHIFT) & WZRD_CLKOUT_FRAC_MASK;
709 
710 	return mult_frac(parent_rate, 1000, (div * 1000) + frac);
711 }
712 
clk_wzrd_dynamic_reconfig_f(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)713 static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
714 				       unsigned long parent_rate)
715 {
716 	int err;
717 	u32 value, pre;
718 	unsigned long rate_div, f, clockout0_div;
719 	struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
720 	void __iomem *div_addr = divider->base + divider->offset;
721 
722 	rate_div = DIV_ROUND_DOWN_ULL(parent_rate * 1000, rate);
723 	clockout0_div = rate_div / 1000;
724 
725 	pre = DIV_ROUND_CLOSEST((parent_rate * 1000), rate);
726 	f = (u32)(pre - (clockout0_div * 1000));
727 	f = f & WZRD_CLKOUT_FRAC_MASK;
728 	f = f << WZRD_CLKOUT_DIVIDE_WIDTH;
729 
730 	value = (f  | (clockout0_div & WZRD_CLKOUT_DIVIDE_MASK));
731 
732 	/* Set divisor and clear phase offset */
733 	writel(value, div_addr);
734 	writel(0x0, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
735 
736 	/* Check status register */
737 	err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
738 				 value & WZRD_DR_LOCK_BIT_MASK,
739 				 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
740 	if (err)
741 		return err;
742 
743 	/* Initiate reconfiguration */
744 	writel(WZRD_DR_BEGIN_DYNA_RECONF_5_2,
745 	       divider->base + WZRD_DR_INIT_REG_OFFSET);
746 	writel(WZRD_DR_BEGIN_DYNA_RECONF1_5_2,
747 	       divider->base + WZRD_DR_INIT_REG_OFFSET);
748 
749 	/* Check status register */
750 	return readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
751 				value & WZRD_DR_LOCK_BIT_MASK,
752 				WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
753 }
754 
clk_wzrd_round_rate_f(struct clk_hw * hw,unsigned long rate,unsigned long * prate)755 static long clk_wzrd_round_rate_f(struct clk_hw *hw, unsigned long rate,
756 				  unsigned long *prate)
757 {
758 	return rate;
759 }
760 
761 static const struct clk_ops clk_wzrd_clk_divider_ops_f = {
762 	.round_rate = clk_wzrd_round_rate_f,
763 	.set_rate = clk_wzrd_dynamic_reconfig_f,
764 	.recalc_rate = clk_wzrd_recalc_ratef,
765 };
766 
clk_wzrd_register_divf(struct device * dev,const char * name,const char * parent_name,unsigned long flags,void __iomem * base,u16 offset,u8 shift,u8 width,u8 clk_divider_flags,u32 div_type,spinlock_t * lock)767 static struct clk_hw *clk_wzrd_register_divf(struct device *dev,
768 					  const char *name,
769 					  const char *parent_name,
770 					  unsigned long flags,
771 					  void __iomem *base, u16 offset,
772 					  u8 shift, u8 width,
773 					  u8 clk_divider_flags,
774 					  u32 div_type,
775 					  spinlock_t *lock)
776 {
777 	struct clk_wzrd_divider *div;
778 	struct clk_hw *hw;
779 	struct clk_init_data init;
780 	int ret;
781 
782 	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
783 	if (!div)
784 		return ERR_PTR(-ENOMEM);
785 
786 	init.name = name;
787 
788 	init.ops = &clk_wzrd_clk_divider_ops_f;
789 
790 	init.flags = flags;
791 	init.parent_names = &parent_name;
792 	init.num_parents = 1;
793 
794 	div->base = base;
795 	div->offset = offset;
796 	div->shift = shift;
797 	div->width = width;
798 	div->flags = clk_divider_flags;
799 	div->lock = lock;
800 	div->hw.init = &init;
801 
802 	hw = &div->hw;
803 	ret =  devm_clk_hw_register(dev, hw);
804 	if (ret)
805 		return ERR_PTR(ret);
806 
807 	return hw;
808 }
809 
clk_wzrd_ver_register_divider(struct device * dev,const char * name,const char * parent_name,unsigned long flags,void __iomem * base,u16 offset,u8 shift,u8 width,u8 clk_divider_flags,u32 div_type,spinlock_t * lock)810 static struct clk_hw *clk_wzrd_ver_register_divider(struct device *dev,
811 						 const char *name,
812 						 const char *parent_name,
813 						 unsigned long flags,
814 						 void __iomem *base,
815 						 u16 offset,
816 						 u8 shift, u8 width,
817 						 u8 clk_divider_flags,
818 						 u32 div_type,
819 						 spinlock_t *lock)
820 {
821 	struct clk_wzrd_divider *div;
822 	struct clk_hw *hw;
823 	struct clk_init_data init;
824 	int ret;
825 
826 	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
827 	if (!div)
828 		return ERR_PTR(-ENOMEM);
829 
830 	init.name = name;
831 	if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
832 		init.ops = &clk_divider_ro_ops;
833 	else if (div_type == DIV_O)
834 		init.ops = &clk_wzrd_ver_divider_ops;
835 	else
836 		init.ops = &clk_wzrd_ver_div_all_ops;
837 	init.flags = flags;
838 	init.parent_names =  &parent_name;
839 	init.num_parents =  1;
840 
841 	div->base = base;
842 	div->offset = offset;
843 	div->shift = shift;
844 	div->width = width;
845 	div->flags = clk_divider_flags;
846 	div->lock = lock;
847 	div->hw.init = &init;
848 
849 	hw = &div->hw;
850 	ret = devm_clk_hw_register(dev, hw);
851 	if (ret)
852 		return ERR_PTR(ret);
853 
854 	return hw;
855 }
856 
clk_wzrd_register_divider(struct device * dev,const char * name,const char * parent_name,unsigned long flags,void __iomem * base,u16 offset,u8 shift,u8 width,u8 clk_divider_flags,u32 div_type,spinlock_t * lock)857 static struct clk_hw *clk_wzrd_register_divider(struct device *dev,
858 					     const char *name,
859 					     const char *parent_name,
860 					     unsigned long flags,
861 					     void __iomem *base, u16 offset,
862 					     u8 shift, u8 width,
863 					     u8 clk_divider_flags,
864 					     u32 div_type,
865 					     spinlock_t *lock)
866 {
867 	struct clk_wzrd_divider *div;
868 	struct clk_hw *hw;
869 	struct clk_init_data init;
870 	int ret;
871 
872 	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
873 	if (!div)
874 		return ERR_PTR(-ENOMEM);
875 
876 	init.name = name;
877 	if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
878 		init.ops = &clk_divider_ro_ops;
879 	else if (div_type == DIV_O)
880 		init.ops = &clk_wzrd_clk_divider_ops;
881 	else
882 		init.ops = &clk_wzrd_clk_div_all_ops;
883 	init.flags = flags;
884 	init.parent_names =  &parent_name;
885 	init.num_parents =  1;
886 
887 	div->base = base;
888 	div->offset = offset;
889 	div->shift = shift;
890 	div->width = width;
891 	div->flags = clk_divider_flags;
892 	div->lock = lock;
893 	div->hw.init = &init;
894 
895 	hw = &div->hw;
896 	ret = devm_clk_hw_register(dev, hw);
897 	if (ret)
898 		return ERR_PTR(ret);
899 
900 	return hw;
901 }
902 
clk_wzrd_clk_notifier(struct notifier_block * nb,unsigned long event,void * data)903 static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
904 				 void *data)
905 {
906 	unsigned long max;
907 	struct clk_notifier_data *ndata = data;
908 	struct clk_wzrd *clk_wzrd = to_clk_wzrd(nb);
909 
910 	if (clk_wzrd->suspended)
911 		return NOTIFY_OK;
912 
913 	if (ndata->clk == clk_wzrd->clk_in1)
914 		max = clk_wzrd_max_freq[clk_wzrd->speed_grade - 1];
915 	else if (ndata->clk == clk_wzrd->axi_clk)
916 		max = WZRD_ACLK_MAX_FREQ;
917 	else
918 		return NOTIFY_DONE;	/* should never happen */
919 
920 	switch (event) {
921 	case PRE_RATE_CHANGE:
922 		if (ndata->new_rate > max)
923 			return NOTIFY_BAD;
924 		return NOTIFY_OK;
925 	case POST_RATE_CHANGE:
926 	case ABORT_RATE_CHANGE:
927 	default:
928 		return NOTIFY_DONE;
929 	}
930 }
931 
clk_wzrd_suspend(struct device * dev)932 static int __maybe_unused clk_wzrd_suspend(struct device *dev)
933 {
934 	struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
935 
936 	clk_disable_unprepare(clk_wzrd->axi_clk);
937 	clk_wzrd->suspended = true;
938 
939 	return 0;
940 }
941 
clk_wzrd_resume(struct device * dev)942 static int __maybe_unused clk_wzrd_resume(struct device *dev)
943 {
944 	int ret;
945 	struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
946 
947 	ret = clk_prepare_enable(clk_wzrd->axi_clk);
948 	if (ret) {
949 		dev_err(dev, "unable to enable s_axi_aclk\n");
950 		return ret;
951 	}
952 
953 	clk_wzrd->suspended = false;
954 
955 	return 0;
956 }
957 
958 static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops, clk_wzrd_suspend,
959 			 clk_wzrd_resume);
960 
961 static const struct versal_clk_data versal_data = {
962 	.is_versal	= true,
963 };
964 
clk_wzrd_register_output_clocks(struct device * dev,int nr_outputs)965 static int clk_wzrd_register_output_clocks(struct device *dev, int nr_outputs)
966 {
967 	const char *clkout_name, *clk_name, *clk_mul_name;
968 	struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
969 	u32 regl, regh, edge, regld, reghd, edged, div;
970 	const struct versal_clk_data *data;
971 	unsigned long flags = 0;
972 	bool is_versal = false;
973 	void __iomem *ctrl_reg;
974 	u32 reg, reg_f, mult;
975 	int i;
976 
977 	data = device_get_match_data(dev);
978 	if (data)
979 		is_versal = data->is_versal;
980 
981 	clkout_name = devm_kasprintf(dev, GFP_KERNEL, "%s_out0", dev_name(dev));
982 	if (!clkout_name)
983 		return -ENOMEM;
984 
985 	if (is_versal) {
986 		if (nr_outputs == 1) {
987 			clk_wzrd->clk_data.hws[0] = clk_wzrd_ver_register_divider
988 				(dev, clkout_name,
989 				__clk_get_name(clk_wzrd->clk_in1), 0,
990 				clk_wzrd->base, WZRD_CLK_CFG_REG(is_versal, 3),
991 				WZRD_CLKOUT_DIVIDE_SHIFT,
992 				WZRD_CLKOUT_DIVIDE_WIDTH,
993 				CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
994 				DIV_ALL, &clkwzrd_lock);
995 
996 			return 0;
997 		}
998 		/* register multiplier */
999 		edge = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0)) &
1000 				BIT(8));
1001 		regl = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 1)) &
1002 			     WZRD_CLKFBOUT_L_MASK) >> WZRD_CLKFBOUT_L_SHIFT;
1003 		regh = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 1)) &
1004 			     WZRD_CLKFBOUT_H_MASK) >> WZRD_CLKFBOUT_H_SHIFT;
1005 		mult = regl + regh + edge;
1006 		if (!mult)
1007 			mult = 1;
1008 		mult = mult * WZRD_FRAC_GRADIENT;
1009 
1010 		regl = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 51)) &
1011 			     WZRD_CLKFBOUT_FRAC_EN;
1012 		if (regl) {
1013 			regl = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 48)) &
1014 				WZRD_VERSAL_FRAC_MASK;
1015 			mult = mult + regl;
1016 		}
1017 		div = 64;
1018 	} else {
1019 		if (nr_outputs == 1) {
1020 			clk_wzrd->clk_data.hws[0] = clk_wzrd_register_divider
1021 				(dev, clkout_name,
1022 				__clk_get_name(clk_wzrd->clk_in1), 0,
1023 				clk_wzrd->base, WZRD_CLK_CFG_REG(is_versal, 3),
1024 				WZRD_CLKOUT_DIVIDE_SHIFT,
1025 				WZRD_CLKOUT_DIVIDE_WIDTH,
1026 				CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
1027 				DIV_ALL, &clkwzrd_lock);
1028 
1029 			return 0;
1030 		}
1031 		reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0));
1032 		reg_f = reg & WZRD_CLKFBOUT_FRAC_MASK;
1033 		reg_f =  reg_f >> WZRD_CLKFBOUT_FRAC_SHIFT;
1034 
1035 		reg = reg & WZRD_CLKFBOUT_MULT_MASK;
1036 		reg =  reg >> WZRD_CLKFBOUT_MULT_SHIFT;
1037 		mult = (reg * 1000) + reg_f;
1038 		div = 1000;
1039 	}
1040 	clk_name = devm_kasprintf(dev, GFP_KERNEL, "%s_mul", dev_name(dev));
1041 	if (!clk_name)
1042 		return -ENOMEM;
1043 	clk_wzrd->clks_internal[wzrd_clk_mul] = devm_clk_hw_register_fixed_factor
1044 			(dev, clk_name,
1045 			 __clk_get_name(clk_wzrd->clk_in1),
1046 			0, mult, div);
1047 	if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
1048 		dev_err(dev, "unable to register fixed-factor clock\n");
1049 		return PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
1050 	}
1051 
1052 	clk_name = devm_kasprintf(dev, GFP_KERNEL, "%s_mul_div", dev_name(dev));
1053 	if (!clk_name)
1054 		return -ENOMEM;
1055 
1056 	if (is_versal) {
1057 		edged = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 20)) &
1058 			     BIT(10));
1059 		regld = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 21)) &
1060 			     WZRD_CLKFBOUT_L_MASK) >> WZRD_CLKFBOUT_L_SHIFT;
1061 		reghd = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 21)) &
1062 		     WZRD_CLKFBOUT_H_MASK) >> WZRD_CLKFBOUT_H_SHIFT;
1063 		div = (regld  + reghd + edged);
1064 		if (!div)
1065 			div = 1;
1066 
1067 		clk_mul_name = clk_hw_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]);
1068 		clk_wzrd->clks_internal[wzrd_clk_mul_div] =
1069 			devm_clk_hw_register_fixed_factor(dev, clk_name, clk_mul_name, 0, 1, div);
1070 	} else {
1071 		ctrl_reg = clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0);
1072 		clk_wzrd->clks_internal[wzrd_clk_mul_div] = devm_clk_hw_register_divider
1073 			(dev, clk_name,
1074 			 clk_hw_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
1075 			flags, ctrl_reg, 0, 8, CLK_DIVIDER_ONE_BASED |
1076 			CLK_DIVIDER_ALLOW_ZERO, &clkwzrd_lock);
1077 	}
1078 	if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) {
1079 		dev_err(dev, "unable to register divider clock\n");
1080 		return PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
1081 	}
1082 
1083 	/* register div per output */
1084 	for (i = nr_outputs - 1; i >= 0 ; i--) {
1085 		clkout_name = devm_kasprintf(dev, GFP_KERNEL, "%s_out%d", dev_name(dev), i);
1086 		if (!clkout_name)
1087 			return -ENOMEM;
1088 
1089 		if (is_versal) {
1090 			clk_wzrd->clk_data.hws[i] = clk_wzrd_ver_register_divider
1091 						(dev,
1092 						 clkout_name, clk_name, 0,
1093 						 clk_wzrd->base,
1094 						 (WZRD_CLK_CFG_REG(is_versal, 3) + i * 8),
1095 						 WZRD_CLKOUT_DIVIDE_SHIFT,
1096 						 WZRD_CLKOUT_DIVIDE_WIDTH,
1097 						 CLK_DIVIDER_ONE_BASED |
1098 						 CLK_DIVIDER_ALLOW_ZERO,
1099 						 DIV_O, &clkwzrd_lock);
1100 		} else {
1101 			if (!i)
1102 				clk_wzrd->clk_data.hws[i] = clk_wzrd_register_divf
1103 					(dev, clkout_name, clk_name, flags, clk_wzrd->base,
1104 					(WZRD_CLK_CFG_REG(is_versal, 2) + i * 12),
1105 					WZRD_CLKOUT_DIVIDE_SHIFT,
1106 					WZRD_CLKOUT_DIVIDE_WIDTH,
1107 					CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
1108 					DIV_O, &clkwzrd_lock);
1109 			else
1110 				clk_wzrd->clk_data.hws[i] = clk_wzrd_register_divider
1111 					(dev, clkout_name, clk_name, 0, clk_wzrd->base,
1112 					(WZRD_CLK_CFG_REG(is_versal, 2) + i * 12),
1113 					WZRD_CLKOUT_DIVIDE_SHIFT,
1114 					WZRD_CLKOUT_DIVIDE_WIDTH,
1115 					CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
1116 					DIV_O, &clkwzrd_lock);
1117 		}
1118 		if (IS_ERR(clk_wzrd->clk_data.hws[i])) {
1119 			dev_err(dev, "unable to register divider clock\n");
1120 			return PTR_ERR(clk_wzrd->clk_data.hws[i]);
1121 		}
1122 	}
1123 
1124 	return 0;
1125 }
1126 
clk_wzrd_probe(struct platform_device * pdev)1127 static int clk_wzrd_probe(struct platform_device *pdev)
1128 {
1129 	struct device_node *np = pdev->dev.of_node;
1130 	struct clk_wzrd *clk_wzrd;
1131 	unsigned long rate;
1132 	int nr_outputs;
1133 	int ret;
1134 
1135 	ret = of_property_read_u32(np, "xlnx,nr-outputs", &nr_outputs);
1136 	if (ret || nr_outputs > WZRD_NUM_OUTPUTS)
1137 		return -EINVAL;
1138 
1139 	clk_wzrd = devm_kzalloc(&pdev->dev, struct_size(clk_wzrd, clk_data.hws, nr_outputs),
1140 				GFP_KERNEL);
1141 	if (!clk_wzrd)
1142 		return -ENOMEM;
1143 	platform_set_drvdata(pdev, clk_wzrd);
1144 
1145 	clk_wzrd->base = devm_platform_ioremap_resource(pdev, 0);
1146 	if (IS_ERR(clk_wzrd->base))
1147 		return PTR_ERR(clk_wzrd->base);
1148 
1149 	clk_wzrd->axi_clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
1150 	if (IS_ERR(clk_wzrd->axi_clk))
1151 		return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->axi_clk),
1152 				     "s_axi_aclk not found\n");
1153 	rate = clk_get_rate(clk_wzrd->axi_clk);
1154 	if (rate > WZRD_ACLK_MAX_FREQ) {
1155 		dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n", rate);
1156 		return -EINVAL;
1157 	}
1158 
1159 	if (!of_property_present(np, "xlnx,static-config")) {
1160 		ret = of_property_read_u32(np, "xlnx,speed-grade", &clk_wzrd->speed_grade);
1161 		if (!ret) {
1162 			if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) {
1163 				dev_warn(&pdev->dev, "invalid speed grade '%d'\n",
1164 					 clk_wzrd->speed_grade);
1165 				clk_wzrd->speed_grade = 0;
1166 			}
1167 		}
1168 
1169 		clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
1170 		if (IS_ERR(clk_wzrd->clk_in1))
1171 			return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->clk_in1),
1172 					     "clk_in1 not found\n");
1173 
1174 		ret = clk_wzrd_register_output_clocks(&pdev->dev, nr_outputs);
1175 		if (ret)
1176 			return ret;
1177 
1178 		clk_wzrd->clk_data.num = nr_outputs;
1179 		ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
1180 						  &clk_wzrd->clk_data);
1181 		if (ret) {
1182 			dev_err(&pdev->dev, "unable to register clock provider\n");
1183 			return ret;
1184 		}
1185 
1186 		if (clk_wzrd->speed_grade) {
1187 			clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier;
1188 
1189 			ret = devm_clk_notifier_register(&pdev->dev, clk_wzrd->clk_in1,
1190 							 &clk_wzrd->nb);
1191 			if (ret)
1192 				dev_warn(&pdev->dev,
1193 					 "unable to register clock notifier\n");
1194 
1195 			ret = devm_clk_notifier_register(&pdev->dev, clk_wzrd->axi_clk,
1196 							 &clk_wzrd->nb);
1197 			if (ret)
1198 				dev_warn(&pdev->dev,
1199 					 "unable to register clock notifier\n");
1200 		}
1201 	}
1202 
1203 	return 0;
1204 }
1205 
1206 static const struct of_device_id clk_wzrd_ids[] = {
1207 	{ .compatible = "xlnx,versal-clk-wizard", .data = &versal_data },
1208 	{ .compatible = "xlnx,clocking-wizard"   },
1209 	{ .compatible = "xlnx,clocking-wizard-v5.2"   },
1210 	{ .compatible = "xlnx,clocking-wizard-v6.0"  },
1211 	{ },
1212 };
1213 MODULE_DEVICE_TABLE(of, clk_wzrd_ids);
1214 
1215 static struct platform_driver clk_wzrd_driver = {
1216 	.driver = {
1217 		.name = "clk-wizard",
1218 		.of_match_table = clk_wzrd_ids,
1219 		.pm = &clk_wzrd_dev_pm_ops,
1220 	},
1221 	.probe = clk_wzrd_probe,
1222 };
1223 module_platform_driver(clk_wzrd_driver);
1224 
1225 MODULE_LICENSE("GPL");
1226 MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com");
1227 MODULE_DESCRIPTION("Driver for the Xilinx Clocking Wizard IP core");
1228