xref: /linux/drivers/clk/microchip/clk-core.c (revision df136764e86e4d271133359e2ecd2b6717cc5040)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Purna Chandra Mandal,<purna.mandal@microchip.com>
4  * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
5  */
6 #include <linux/clk-provider.h>
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/iopoll.h>
12 #include <linux/platform_data/pic32.h>
13 #include <asm/traps.h>
14 
15 #include "clk-core.h"
16 
17 /* OSCCON Reg fields */
18 #define OSC_CUR_MASK		0x07
19 #define OSC_CUR_SHIFT		12
20 #define OSC_NEW_MASK		0x07
21 #define OSC_NEW_SHIFT		8
22 #define OSC_SWEN		BIT(0)
23 
24 /* SPLLCON Reg fields */
25 #define PLL_RANGE_MASK		0x07
26 #define PLL_RANGE_SHIFT		0
27 #define PLL_ICLK_MASK		0x01
28 #define PLL_ICLK_SHIFT		7
29 #define PLL_IDIV_MASK		0x07
30 #define PLL_IDIV_SHIFT		8
31 #define PLL_ODIV_MASK		0x07
32 #define PLL_ODIV_SHIFT		24
33 #define PLL_MULT_MASK		0x7F
34 #define PLL_MULT_SHIFT		16
35 #define PLL_MULT_MAX		128
36 #define PLL_ODIV_MIN		1
37 #define PLL_ODIV_MAX		5
38 
39 /* Peripheral Bus Clock Reg Fields */
40 #define PB_DIV_MASK		0x7f
41 #define PB_DIV_SHIFT		0
42 #define PB_DIV_READY		BIT(11)
43 #define PB_DIV_ENABLE		BIT(15)
44 #define PB_DIV_MAX		128
45 #define PB_DIV_MIN		0
46 
47 /* Reference Oscillator Control Reg fields */
48 #define REFO_SEL_MASK		0x0f
49 #define REFO_SEL_SHIFT		0
50 #define REFO_ACTIVE		BIT(8)
51 #define REFO_DIVSW_EN		BIT(9)
52 #define REFO_OE			BIT(12)
53 #define REFO_ON			BIT(15)
54 #define REFO_DIV_SHIFT		16
55 #define REFO_DIV_MASK		0x7fff
56 
57 /* Reference Oscillator Trim Register Fields */
58 #define REFO_TRIM_REG		0x10
59 #define REFO_TRIM_MASK		0x1ff
60 #define REFO_TRIM_SHIFT		23
61 #define REFO_TRIM_MAX		511
62 
63 /* Mux Slew Control Register fields */
64 #define SLEW_BUSY		BIT(0)
65 #define SLEW_DOWNEN		BIT(1)
66 #define SLEW_UPEN		BIT(2)
67 #define SLEW_DIV		0x07
68 #define SLEW_DIV_SHIFT		8
69 #define SLEW_SYSDIV		0x0f
70 #define SLEW_SYSDIV_SHIFT	20
71 
72 /* Clock Poll Timeout */
73 #define LOCK_TIMEOUT_US         USEC_PER_MSEC
74 
75 /* SoC specific clock needed during SPLL clock rate switch */
76 static struct clk_hw *pic32_sclk_hw;
77 
78 #ifdef CONFIG_MATCH_PIC32
79 /* add instruction pipeline delay while CPU clock is in-transition. */
80 #define cpu_nop5()			\
81 do {					\
82 	__asm__ __volatile__("nop");	\
83 	__asm__ __volatile__("nop");	\
84 	__asm__ __volatile__("nop");	\
85 	__asm__ __volatile__("nop");	\
86 	__asm__ __volatile__("nop");	\
87 } while (0)
88 #else
89 #define cpu_nop5()
90 #endif
91 
92 /* Perpheral bus clocks */
93 struct pic32_periph_clk {
94 	struct clk_hw hw;
95 	void __iomem *ctrl_reg;
96 	struct pic32_clk_common *core;
97 };
98 
99 #define clkhw_to_pbclk(_hw)	container_of(_hw, struct pic32_periph_clk, hw)
100 
101 static int pbclk_is_enabled(struct clk_hw *hw)
102 {
103 	struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
104 
105 	return readl(pb->ctrl_reg) & PB_DIV_ENABLE;
106 }
107 
108 static int pbclk_enable(struct clk_hw *hw)
109 {
110 	struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
111 
112 	writel(PB_DIV_ENABLE, PIC32_SET(pb->ctrl_reg));
113 	return 0;
114 }
115 
116 static void pbclk_disable(struct clk_hw *hw)
117 {
118 	struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
119 
120 	writel(PB_DIV_ENABLE, PIC32_CLR(pb->ctrl_reg));
121 }
122 
123 static unsigned long calc_best_divided_rate(unsigned long rate,
124 					    unsigned long parent_rate,
125 					    u32 divider_max,
126 					    u32 divider_min)
127 {
128 	unsigned long divided_rate, divided_rate_down, best_rate;
129 	unsigned long div, div_up;
130 
131 	/* eq. clk_rate = parent_rate / divider.
132 	 *
133 	 * Find best divider to produce closest of target divided rate.
134 	 */
135 	div = parent_rate / rate;
136 	div = clamp_val(div, divider_min, divider_max);
137 	div_up = clamp_val(div + 1, divider_min, divider_max);
138 
139 	divided_rate = parent_rate / div;
140 	divided_rate_down = parent_rate / div_up;
141 	if (abs(rate - divided_rate_down) < abs(rate - divided_rate))
142 		best_rate = divided_rate_down;
143 	else
144 		best_rate = divided_rate;
145 
146 	return best_rate;
147 }
148 
149 static inline u32 pbclk_read_pbdiv(struct pic32_periph_clk *pb)
150 {
151 	return ((readl(pb->ctrl_reg) >> PB_DIV_SHIFT) & PB_DIV_MASK) + 1;
152 }
153 
154 static unsigned long pbclk_recalc_rate(struct clk_hw *hw,
155 				       unsigned long parent_rate)
156 {
157 	struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
158 
159 	return parent_rate / pbclk_read_pbdiv(pb);
160 }
161 
162 static int pbclk_determine_rate(struct clk_hw *hw,
163 				struct clk_rate_request *req)
164 {
165 	req->rate = calc_best_divided_rate(req->rate, req->best_parent_rate,
166 					   PB_DIV_MAX, PB_DIV_MIN);
167 
168 	return 0;
169 }
170 
171 static int pbclk_set_rate(struct clk_hw *hw, unsigned long rate,
172 			  unsigned long parent_rate)
173 {
174 	struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
175 	unsigned long flags;
176 	u32 v, div;
177 	int err;
178 
179 	/* check & wait for DIV_READY */
180 	err = readl_poll_timeout(pb->ctrl_reg, v, v & PB_DIV_READY,
181 				 1, LOCK_TIMEOUT_US);
182 	if (err)
183 		return err;
184 
185 	/* calculate clkdiv and best rate */
186 	div = DIV_ROUND_CLOSEST(parent_rate, rate);
187 
188 	spin_lock_irqsave(&pb->core->reg_lock, flags);
189 
190 	/* apply new div */
191 	v = readl(pb->ctrl_reg);
192 	v &= ~PB_DIV_MASK;
193 	v |= (div - 1);
194 
195 	pic32_syskey_unlock();
196 
197 	writel(v, pb->ctrl_reg);
198 
199 	spin_unlock_irqrestore(&pb->core->reg_lock, flags);
200 
201 	/* wait again for DIV_READY */
202 	err = readl_poll_timeout(pb->ctrl_reg, v, v & PB_DIV_READY,
203 				 1, LOCK_TIMEOUT_US);
204 	if (err)
205 		return err;
206 
207 	/* confirm that new div is applied correctly */
208 	return (pbclk_read_pbdiv(pb) == div) ? 0 : -EBUSY;
209 }
210 
211 const struct clk_ops pic32_pbclk_ops = {
212 	.enable		= pbclk_enable,
213 	.disable	= pbclk_disable,
214 	.is_enabled	= pbclk_is_enabled,
215 	.recalc_rate	= pbclk_recalc_rate,
216 	.determine_rate = pbclk_determine_rate,
217 	.set_rate	= pbclk_set_rate,
218 };
219 
220 struct clk *pic32_periph_clk_register(const struct pic32_periph_clk_data *desc,
221 				      struct pic32_clk_common *core)
222 {
223 	struct pic32_periph_clk *pbclk;
224 	struct clk *clk;
225 
226 	pbclk = devm_kzalloc(core->dev, sizeof(*pbclk), GFP_KERNEL);
227 	if (!pbclk)
228 		return ERR_PTR(-ENOMEM);
229 
230 	pbclk->hw.init = &desc->init_data;
231 	pbclk->core = core;
232 	pbclk->ctrl_reg = desc->ctrl_reg + core->iobase;
233 
234 	clk = devm_clk_register(core->dev, &pbclk->hw);
235 	if (IS_ERR(clk)) {
236 		dev_err(core->dev, "%s: clk_register() failed\n", __func__);
237 		devm_kfree(core->dev, pbclk);
238 	}
239 
240 	return clk;
241 }
242 
243 /* Reference oscillator operations */
244 struct pic32_ref_osc {
245 	struct clk_hw hw;
246 	void __iomem *ctrl_reg;
247 	const u32 *parent_map;
248 	struct pic32_clk_common *core;
249 };
250 
251 #define clkhw_to_refosc(_hw)	container_of(_hw, struct pic32_ref_osc, hw)
252 
253 static int roclk_is_enabled(struct clk_hw *hw)
254 {
255 	struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
256 
257 	return readl(refo->ctrl_reg) & REFO_ON;
258 }
259 
260 static int roclk_enable(struct clk_hw *hw)
261 {
262 	struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
263 
264 	writel(REFO_ON | REFO_OE, PIC32_SET(refo->ctrl_reg));
265 	return 0;
266 }
267 
268 static void roclk_disable(struct clk_hw *hw)
269 {
270 	struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
271 
272 	writel(REFO_ON | REFO_OE, PIC32_CLR(refo->ctrl_reg));
273 }
274 
275 static int roclk_init(struct clk_hw *hw)
276 {
277 	/* initialize clock in disabled state */
278 	roclk_disable(hw);
279 
280 	return 0;
281 }
282 
283 static u8 roclk_get_parent(struct clk_hw *hw)
284 {
285 	struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
286 	u32 v, i;
287 
288 	v = (readl(refo->ctrl_reg) >> REFO_SEL_SHIFT) & REFO_SEL_MASK;
289 
290 	if (!refo->parent_map)
291 		return v;
292 
293 	for (i = 0; i < clk_hw_get_num_parents(hw); i++)
294 		if (refo->parent_map[i] == v)
295 			return i;
296 
297 	return -EINVAL;
298 }
299 
300 static unsigned long roclk_calc_rate(unsigned long parent_rate,
301 				     u32 rodiv, u32 rotrim)
302 {
303 	u64 rate64;
304 
305 	/* fout = fin / [2 * {div + (trim / 512)}]
306 	 *	= fin * 512 / [1024 * div + 2 * trim]
307 	 *	= fin * 256 / (512 * div + trim)
308 	 *	= (fin << 8) / ((div << 9) + trim)
309 	 */
310 	if (rotrim) {
311 		rodiv = (rodiv << 9) + rotrim;
312 		rate64 = parent_rate;
313 		rate64 <<= 8;
314 		do_div(rate64, rodiv);
315 	} else if (rodiv) {
316 		rate64 = parent_rate / (rodiv << 1);
317 	} else {
318 		rate64 = parent_rate;
319 	}
320 	return rate64;
321 }
322 
323 static void roclk_calc_div_trim(unsigned long rate,
324 				unsigned long parent_rate,
325 				u32 *rodiv_p, u32 *rotrim_p)
326 {
327 	u32 div, rotrim, rodiv;
328 	u64 frac;
329 
330 	/* Find integer approximation of floating-point arithmetic.
331 	 *      fout = fin / [2 * {rodiv + (rotrim / 512)}] ... (1)
332 	 * i.e. fout = fin / 2 * DIV
333 	 *      whereas DIV = rodiv + (rotrim / 512)
334 	 *
335 	 * Since kernel does not perform floating-point arithmetic so
336 	 * (rotrim/512) will be zero. And DIV & rodiv will result same.
337 	 *
338 	 * ie. fout = (fin * 256) / [(512 * rodiv) + rotrim]  ... from (1)
339 	 * ie. rotrim = ((fin * 256) / fout) - (512 * DIV)
340 	 */
341 	if (parent_rate <= rate) {
342 		div = 0;
343 		frac = 0;
344 		rodiv = 0;
345 		rotrim = 0;
346 	} else {
347 		div = parent_rate / (rate << 1);
348 		frac = parent_rate;
349 		frac <<= 8;
350 		do_div(frac, rate);
351 		frac -= (u64)(div << 9);
352 
353 		rodiv = (div > REFO_DIV_MASK) ? REFO_DIV_MASK : div;
354 		rotrim = (frac >= REFO_TRIM_MAX) ? REFO_TRIM_MAX : frac;
355 	}
356 
357 	if (rodiv_p)
358 		*rodiv_p = rodiv;
359 
360 	if (rotrim_p)
361 		*rotrim_p = rotrim;
362 }
363 
364 static unsigned long roclk_recalc_rate(struct clk_hw *hw,
365 				       unsigned long parent_rate)
366 {
367 	struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
368 	u32 v, rodiv, rotrim;
369 
370 	/* get rodiv */
371 	v = readl(refo->ctrl_reg);
372 	rodiv = (v >> REFO_DIV_SHIFT) & REFO_DIV_MASK;
373 
374 	/* get trim */
375 	v = readl(refo->ctrl_reg + REFO_TRIM_REG);
376 	rotrim = (v >> REFO_TRIM_SHIFT) & REFO_TRIM_MASK;
377 
378 	return roclk_calc_rate(parent_rate, rodiv, rotrim);
379 }
380 
381 static int roclk_determine_rate(struct clk_hw *hw,
382 				struct clk_rate_request *req)
383 {
384 	struct clk_hw *parent_clk, *best_parent_clk = NULL;
385 	unsigned int i, delta, best_delta = -1;
386 	unsigned long parent_rate, best_parent_rate = 0;
387 	unsigned long best = 0, nearest_rate;
388 
389 	/* find a parent which can generate nearest clkrate >= rate */
390 	for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
391 		u32 rotrim, rodiv;
392 
393 		/* get parent */
394 		parent_clk = clk_hw_get_parent_by_index(hw, i);
395 		if (!parent_clk)
396 			continue;
397 
398 		/* skip if parent runs slower than target rate */
399 		parent_rate = clk_hw_get_rate(parent_clk);
400 		if (req->rate > parent_rate)
401 			continue;
402 
403 		/* calculate dividers for new rate */
404 		roclk_calc_div_trim(req->rate, req->best_parent_rate, &rodiv, &rotrim);
405 
406 		/* caclulate new rate (rounding) based on new rodiv & rotrim */
407 		nearest_rate = roclk_calc_rate(req->best_parent_rate, rodiv, rotrim);
408 
409 		delta = abs(nearest_rate - req->rate);
410 		if ((nearest_rate >= req->rate) && (delta < best_delta)) {
411 			best_parent_clk = parent_clk;
412 			best_parent_rate = parent_rate;
413 			best = nearest_rate;
414 			best_delta = delta;
415 
416 			if (delta == 0)
417 				break;
418 		}
419 	}
420 
421 	/* if no match found, retain old rate */
422 	if (!best_parent_clk) {
423 		pr_err("%s:%s, no parent found for rate %lu.\n",
424 		       __func__, clk_hw_get_name(hw), req->rate);
425 		return clk_hw_get_rate(hw);
426 	}
427 
428 	pr_debug("%s,rate %lu, best_parent(%s, %lu), best %lu, delta %d\n",
429 		 clk_hw_get_name(hw), req->rate,
430 		 clk_hw_get_name(best_parent_clk), best_parent_rate,
431 		 best, best_delta);
432 
433 	if (req->best_parent_rate)
434 		req->best_parent_rate = best_parent_rate;
435 
436 	if (req->best_parent_hw)
437 		req->best_parent_hw = best_parent_clk;
438 
439 	return best;
440 }
441 
442 static int roclk_set_parent(struct clk_hw *hw, u8 index)
443 {
444 	struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
445 	unsigned long flags;
446 	u32 v;
447 	int err;
448 
449 	if (refo->parent_map)
450 		index = refo->parent_map[index];
451 
452 	/* wait until ACTIVE bit is zero or timeout */
453 	err = readl_poll_timeout(refo->ctrl_reg, v, !(v & REFO_ACTIVE),
454 				 1, LOCK_TIMEOUT_US);
455 	if (err) {
456 		pr_err("%s: poll failed, clk active\n", clk_hw_get_name(hw));
457 		return err;
458 	}
459 
460 	spin_lock_irqsave(&refo->core->reg_lock, flags);
461 
462 	pic32_syskey_unlock();
463 
464 	/* calculate & apply new */
465 	v = readl(refo->ctrl_reg);
466 	v &= ~(REFO_SEL_MASK << REFO_SEL_SHIFT);
467 	v |= index << REFO_SEL_SHIFT;
468 
469 	writel(v, refo->ctrl_reg);
470 
471 	spin_unlock_irqrestore(&refo->core->reg_lock, flags);
472 
473 	return 0;
474 }
475 
476 static int roclk_set_rate_and_parent(struct clk_hw *hw,
477 				     unsigned long rate,
478 				     unsigned long parent_rate,
479 				     u8 index)
480 {
481 	struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
482 	unsigned long flags;
483 	u32 trim, rodiv, v;
484 	int err;
485 
486 	/* calculate new rodiv & rotrim for new rate */
487 	roclk_calc_div_trim(rate, parent_rate, &rodiv, &trim);
488 
489 	pr_debug("parent_rate = %lu, rate = %lu, div = %d, trim = %d\n",
490 		 parent_rate, rate, rodiv, trim);
491 
492 	/* wait till source change is active */
493 	err = readl_poll_timeout(refo->ctrl_reg, v,
494 				 !(v & (REFO_ACTIVE | REFO_DIVSW_EN)),
495 				 1, LOCK_TIMEOUT_US);
496 	if (err) {
497 		pr_err("%s: poll timedout, clock is still active\n", __func__);
498 		return err;
499 	}
500 
501 	spin_lock_irqsave(&refo->core->reg_lock, flags);
502 	v = readl(refo->ctrl_reg);
503 
504 	pic32_syskey_unlock();
505 
506 	/* apply parent, if required */
507 	if (refo->parent_map)
508 		index = refo->parent_map[index];
509 
510 	v &= ~(REFO_SEL_MASK << REFO_SEL_SHIFT);
511 	v |= index << REFO_SEL_SHIFT;
512 
513 	/* apply RODIV */
514 	v &= ~(REFO_DIV_MASK << REFO_DIV_SHIFT);
515 	v |= rodiv << REFO_DIV_SHIFT;
516 	writel(v, refo->ctrl_reg);
517 
518 	/* apply ROTRIM */
519 	v = readl(refo->ctrl_reg + REFO_TRIM_REG);
520 	v &= ~(REFO_TRIM_MASK << REFO_TRIM_SHIFT);
521 	v |= trim << REFO_TRIM_SHIFT;
522 	writel(v, refo->ctrl_reg + REFO_TRIM_REG);
523 
524 	/* enable & activate divider switching */
525 	writel(REFO_ON | REFO_DIVSW_EN, PIC32_SET(refo->ctrl_reg));
526 
527 	/* wait till divswen is in-progress */
528 	err = readl_poll_timeout_atomic(refo->ctrl_reg, v, !(v & REFO_DIVSW_EN),
529 					1, LOCK_TIMEOUT_US);
530 	/* leave the clk gated as it was */
531 	writel(REFO_ON, PIC32_CLR(refo->ctrl_reg));
532 
533 	spin_unlock_irqrestore(&refo->core->reg_lock, flags);
534 
535 	return err;
536 }
537 
538 static int roclk_set_rate(struct clk_hw *hw, unsigned long rate,
539 			  unsigned long parent_rate)
540 {
541 	u8 index = roclk_get_parent(hw);
542 
543 	return roclk_set_rate_and_parent(hw, rate, parent_rate, index);
544 }
545 
546 const struct clk_ops pic32_roclk_ops = {
547 	.enable			= roclk_enable,
548 	.disable		= roclk_disable,
549 	.is_enabled		= roclk_is_enabled,
550 	.get_parent		= roclk_get_parent,
551 	.set_parent		= roclk_set_parent,
552 	.determine_rate		= roclk_determine_rate,
553 	.recalc_rate		= roclk_recalc_rate,
554 	.set_rate_and_parent	= roclk_set_rate_and_parent,
555 	.set_rate		= roclk_set_rate,
556 	.init			= roclk_init,
557 };
558 
559 struct clk *pic32_refo_clk_register(const struct pic32_ref_osc_data *data,
560 				    struct pic32_clk_common *core)
561 {
562 	struct pic32_ref_osc *refo;
563 	struct clk *clk;
564 
565 	refo = devm_kzalloc(core->dev, sizeof(*refo), GFP_KERNEL);
566 	if (!refo)
567 		return ERR_PTR(-ENOMEM);
568 
569 	refo->core = core;
570 	refo->hw.init = &data->init_data;
571 	refo->ctrl_reg = data->ctrl_reg + core->iobase;
572 	refo->parent_map = data->parent_map;
573 
574 	clk = devm_clk_register(core->dev, &refo->hw);
575 	if (IS_ERR(clk))
576 		dev_err(core->dev, "%s: clk_register() failed\n", __func__);
577 
578 	return clk;
579 }
580 
581 struct pic32_sys_pll {
582 	struct clk_hw hw;
583 	void __iomem *ctrl_reg;
584 	void __iomem *status_reg;
585 	u32 lock_mask;
586 	u32 idiv; /* PLL iclk divider, treated fixed */
587 	struct pic32_clk_common *core;
588 };
589 
590 #define clkhw_to_spll(_hw)	container_of(_hw, struct pic32_sys_pll, hw)
591 
592 static inline u32 spll_odiv_to_divider(u32 odiv)
593 {
594 	odiv = clamp_val(odiv, PLL_ODIV_MIN, PLL_ODIV_MAX);
595 
596 	return 1 << odiv;
597 }
598 
599 static unsigned long spll_calc_mult_div(struct pic32_sys_pll *pll,
600 					unsigned long rate,
601 					unsigned long parent_rate,
602 					u32 *mult_p, u32 *odiv_p)
603 {
604 	u32 mul, div, best_mul = 1, best_div = 1;
605 	unsigned long new_rate, best_rate = rate;
606 	unsigned int best_delta = -1, delta, match_found = 0;
607 	u64 rate64;
608 
609 	parent_rate /= pll->idiv;
610 
611 	for (mul = 1; mul <= PLL_MULT_MAX; mul++) {
612 		for (div = PLL_ODIV_MIN; div <= PLL_ODIV_MAX; div++) {
613 			rate64 = parent_rate;
614 			rate64 *= mul;
615 			do_div(rate64, 1 << div);
616 			new_rate = rate64;
617 			delta = abs(rate - new_rate);
618 			if ((new_rate >= rate) && (delta < best_delta)) {
619 				best_delta = delta;
620 				best_rate = new_rate;
621 				best_mul = mul;
622 				best_div = div;
623 				match_found = 1;
624 			}
625 		}
626 	}
627 
628 	if (!match_found) {
629 		pr_warn("spll: no match found\n");
630 		return 0;
631 	}
632 
633 	pr_debug("rate %lu, par_rate %lu/mult %u, div %u, best_rate %lu\n",
634 		 rate, parent_rate, best_mul, best_div, best_rate);
635 
636 	if (mult_p)
637 		*mult_p = best_mul - 1;
638 
639 	if (odiv_p)
640 		*odiv_p = best_div;
641 
642 	return best_rate;
643 }
644 
645 static unsigned long spll_clk_recalc_rate(struct clk_hw *hw,
646 					  unsigned long parent_rate)
647 {
648 	struct pic32_sys_pll *pll = clkhw_to_spll(hw);
649 	unsigned long pll_in_rate;
650 	u32 mult, odiv, div, v;
651 	u64 rate64;
652 
653 	v = readl(pll->ctrl_reg);
654 	odiv = ((v >> PLL_ODIV_SHIFT) & PLL_ODIV_MASK);
655 	mult = ((v >> PLL_MULT_SHIFT) & PLL_MULT_MASK) + 1;
656 	div = spll_odiv_to_divider(odiv);
657 
658 	/* pll_in_rate = parent_rate / idiv
659 	 * pll_out_rate = pll_in_rate * mult / div;
660 	 */
661 	pll_in_rate = parent_rate / pll->idiv;
662 	rate64 = pll_in_rate;
663 	rate64 *= mult;
664 	do_div(rate64, div);
665 
666 	return rate64;
667 }
668 
669 static int spll_clk_determine_rate(struct clk_hw *hw,
670 				   struct clk_rate_request *req)
671 {
672 	struct pic32_sys_pll *pll = clkhw_to_spll(hw);
673 
674 	req->rate = spll_calc_mult_div(pll, req->rate, req->best_parent_rate,
675 				       NULL, NULL);
676 
677 	return 0;
678 }
679 
680 static int spll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
681 			     unsigned long parent_rate)
682 {
683 	struct pic32_sys_pll *pll = clkhw_to_spll(hw);
684 	unsigned long ret, flags;
685 	u32 mult, odiv, v;
686 	int err;
687 
688 	ret = spll_calc_mult_div(pll, rate, parent_rate, &mult, &odiv);
689 	if (!ret)
690 		return -EINVAL;
691 
692 	/*
693 	 * We can't change SPLL counters when it is in-active use
694 	 * by SYSCLK. So check before applying new counters/rate.
695 	 */
696 
697 	/* Is spll_clk active parent of sys_clk ? */
698 	if (unlikely(clk_hw_get_parent(pic32_sclk_hw) == hw)) {
699 		pr_err("%s: failed, clk in-use\n", __func__);
700 		return -EBUSY;
701 	}
702 
703 	spin_lock_irqsave(&pll->core->reg_lock, flags);
704 
705 	/* apply new multiplier & divisor */
706 	v = readl(pll->ctrl_reg);
707 	v &= ~(PLL_MULT_MASK << PLL_MULT_SHIFT);
708 	v &= ~(PLL_ODIV_MASK << PLL_ODIV_SHIFT);
709 	v |= (mult << PLL_MULT_SHIFT) | (odiv << PLL_ODIV_SHIFT);
710 
711 	/* sys unlock before write */
712 	pic32_syskey_unlock();
713 
714 	writel(v, pll->ctrl_reg);
715 	cpu_relax();
716 
717 	/* insert few nops (5-stage) to ensure CPU does not hang */
718 	cpu_nop5();
719 	cpu_nop5();
720 
721 	/* Wait until PLL is locked (maximum 100 usecs). */
722 	err = readl_poll_timeout_atomic(pll->status_reg, v,
723 					v & pll->lock_mask, 1, 100);
724 	spin_unlock_irqrestore(&pll->core->reg_lock, flags);
725 
726 	return err;
727 }
728 
729 /* SPLL clock operation */
730 const struct clk_ops pic32_spll_ops = {
731 	.recalc_rate	= spll_clk_recalc_rate,
732 	.determine_rate = spll_clk_determine_rate,
733 	.set_rate	= spll_clk_set_rate,
734 };
735 
736 struct clk *pic32_spll_clk_register(const struct pic32_sys_pll_data *data,
737 				    struct pic32_clk_common *core)
738 {
739 	struct pic32_sys_pll *spll;
740 	struct clk *clk;
741 
742 	spll = devm_kzalloc(core->dev, sizeof(*spll), GFP_KERNEL);
743 	if (!spll)
744 		return ERR_PTR(-ENOMEM);
745 
746 	spll->core = core;
747 	spll->hw.init = &data->init_data;
748 	spll->ctrl_reg = data->ctrl_reg + core->iobase;
749 	spll->status_reg = data->status_reg + core->iobase;
750 	spll->lock_mask = data->lock_mask;
751 
752 	/* cache PLL idiv; PLL driver uses it as constant.*/
753 	spll->idiv = (readl(spll->ctrl_reg) >> PLL_IDIV_SHIFT) & PLL_IDIV_MASK;
754 	spll->idiv += 1;
755 
756 	clk = devm_clk_register(core->dev, &spll->hw);
757 	if (IS_ERR(clk))
758 		dev_err(core->dev, "sys_pll: clk_register() failed\n");
759 
760 	return clk;
761 }
762 
763 /* System mux clock(aka SCLK) */
764 
765 struct pic32_sys_clk {
766 	struct clk_hw hw;
767 	void __iomem *mux_reg;
768 	void __iomem *slew_reg;
769 	u32 slew_div;
770 	const u32 *parent_map;
771 	struct pic32_clk_common *core;
772 };
773 
774 #define clkhw_to_sys_clk(_hw)	container_of(_hw, struct pic32_sys_clk, hw)
775 
776 static unsigned long sclk_get_rate(struct clk_hw *hw, unsigned long parent_rate)
777 {
778 	struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
779 	u32 div;
780 
781 	div = (readl(sclk->slew_reg) >> SLEW_SYSDIV_SHIFT) & SLEW_SYSDIV;
782 	div += 1; /* sys-div to divider */
783 
784 	return parent_rate / div;
785 }
786 
787 static int sclk_determine_rate(struct clk_hw *hw,
788 			       struct clk_rate_request *req)
789 {
790 	req->rate = calc_best_divided_rate(req->rate, req->best_parent_rate,
791 					   SLEW_SYSDIV, 1);
792 
793 	return 0;
794 }
795 
796 static int sclk_set_rate(struct clk_hw *hw,
797 			 unsigned long rate, unsigned long parent_rate)
798 {
799 	struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
800 	unsigned long flags;
801 	u32 v, div;
802 	int err;
803 
804 	div = parent_rate / rate;
805 
806 	spin_lock_irqsave(&sclk->core->reg_lock, flags);
807 
808 	/* apply new div */
809 	v = readl(sclk->slew_reg);
810 	v &= ~(SLEW_SYSDIV << SLEW_SYSDIV_SHIFT);
811 	v |= (div - 1) << SLEW_SYSDIV_SHIFT;
812 
813 	pic32_syskey_unlock();
814 
815 	writel(v, sclk->slew_reg);
816 
817 	/* wait until BUSY is cleared */
818 	err = readl_poll_timeout_atomic(sclk->slew_reg, v,
819 					!(v & SLEW_BUSY), 1, LOCK_TIMEOUT_US);
820 
821 	spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
822 
823 	return err;
824 }
825 
826 static u8 sclk_get_parent(struct clk_hw *hw)
827 {
828 	struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
829 	u32 i, v;
830 
831 	v = (readl(sclk->mux_reg) >> OSC_CUR_SHIFT) & OSC_CUR_MASK;
832 
833 	if (!sclk->parent_map)
834 		return v;
835 
836 	for (i = 0; i < clk_hw_get_num_parents(hw); i++)
837 		if (sclk->parent_map[i] == v)
838 			return i;
839 	return -EINVAL;
840 }
841 
842 static int sclk_set_parent(struct clk_hw *hw, u8 index)
843 {
844 	struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
845 	unsigned long flags;
846 	u32 nosc, cosc, v;
847 	int err;
848 
849 	spin_lock_irqsave(&sclk->core->reg_lock, flags);
850 
851 	/* find new_osc */
852 	nosc = sclk->parent_map ? sclk->parent_map[index] : index;
853 
854 	/* set new parent */
855 	v = readl(sclk->mux_reg);
856 	v &= ~(OSC_NEW_MASK << OSC_NEW_SHIFT);
857 	v |= nosc << OSC_NEW_SHIFT;
858 
859 	pic32_syskey_unlock();
860 
861 	writel(v, sclk->mux_reg);
862 
863 	/* initate switch */
864 	writel(OSC_SWEN, PIC32_SET(sclk->mux_reg));
865 	cpu_relax();
866 
867 	/* add nop to flush pipeline (as cpu_clk is in-flux) */
868 	cpu_nop5();
869 
870 	/* wait for SWEN bit to clear */
871 	err = readl_poll_timeout_atomic(sclk->slew_reg, v,
872 					!(v & OSC_SWEN), 1, LOCK_TIMEOUT_US);
873 
874 	spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
875 
876 	/*
877 	 * SCLK clock-switching logic might reject a clock switching request
878 	 * if pre-requisites (like new clk_src not present or unstable) are
879 	 * not met.
880 	 * So confirm before claiming success.
881 	 */
882 	cosc = (readl(sclk->mux_reg) >> OSC_CUR_SHIFT) & OSC_CUR_MASK;
883 	if (cosc != nosc) {
884 		pr_err("%s: err, failed to set_parent() to %d, current %d\n",
885 		       clk_hw_get_name(hw), nosc, cosc);
886 		err = -EBUSY;
887 	}
888 
889 	return err;
890 }
891 
892 static int sclk_init(struct clk_hw *hw)
893 {
894 	struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
895 	unsigned long flags;
896 	u32 v;
897 
898 	/* Maintain reference to this clk, required in spll_clk_set_rate() */
899 	pic32_sclk_hw = hw;
900 
901 	/* apply slew divider on both up and down scaling */
902 	if (sclk->slew_div) {
903 		spin_lock_irqsave(&sclk->core->reg_lock, flags);
904 		v = readl(sclk->slew_reg);
905 		v &= ~(SLEW_DIV << SLEW_DIV_SHIFT);
906 		v |= sclk->slew_div << SLEW_DIV_SHIFT;
907 		v |= SLEW_DOWNEN | SLEW_UPEN;
908 		writel(v, sclk->slew_reg);
909 		spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
910 	}
911 
912 	return 0;
913 }
914 
915 /* sclk with post-divider */
916 const struct clk_ops pic32_sclk_ops = {
917 	.get_parent	= sclk_get_parent,
918 	.set_parent	= sclk_set_parent,
919 	.determine_rate = sclk_determine_rate,
920 	.set_rate	= sclk_set_rate,
921 	.recalc_rate	= sclk_get_rate,
922 	.init		= sclk_init,
923 	.determine_rate = __clk_mux_determine_rate,
924 };
925 
926 /* sclk with no slew and no post-divider */
927 const struct clk_ops pic32_sclk_no_div_ops = {
928 	.get_parent	= sclk_get_parent,
929 	.set_parent	= sclk_set_parent,
930 	.init		= sclk_init,
931 	.determine_rate = __clk_mux_determine_rate,
932 };
933 
934 struct clk *pic32_sys_clk_register(const struct pic32_sys_clk_data *data,
935 				   struct pic32_clk_common *core)
936 {
937 	struct pic32_sys_clk *sclk;
938 	struct clk *clk;
939 
940 	sclk = devm_kzalloc(core->dev, sizeof(*sclk), GFP_KERNEL);
941 	if (!sclk)
942 		return ERR_PTR(-ENOMEM);
943 
944 	sclk->core = core;
945 	sclk->hw.init = &data->init_data;
946 	sclk->mux_reg = data->mux_reg + core->iobase;
947 	sclk->slew_reg = data->slew_reg + core->iobase;
948 	sclk->slew_div = data->slew_div;
949 	sclk->parent_map = data->parent_map;
950 
951 	clk = devm_clk_register(core->dev, &sclk->hw);
952 	if (IS_ERR(clk))
953 		dev_err(core->dev, "%s: clk register failed\n", __func__);
954 
955 	return clk;
956 }
957 
958 /* secondary oscillator */
959 struct pic32_sec_osc {
960 	struct clk_hw hw;
961 	void __iomem *enable_reg;
962 	void __iomem *status_reg;
963 	u32 enable_mask;
964 	u32 status_mask;
965 	unsigned long fixed_rate;
966 	struct pic32_clk_common *core;
967 };
968 
969 #define clkhw_to_sosc(_hw)	container_of(_hw, struct pic32_sec_osc, hw)
970 static int sosc_clk_enable(struct clk_hw *hw)
971 {
972 	struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
973 	u32 v;
974 
975 	/* enable SOSC */
976 	pic32_syskey_unlock();
977 	writel(sosc->enable_mask, PIC32_SET(sosc->enable_reg));
978 
979 	/* wait till warm-up period expires or ready-status is updated */
980 	return readl_poll_timeout_atomic(sosc->status_reg, v,
981 					 v & sosc->status_mask, 1, 100);
982 }
983 
984 static void sosc_clk_disable(struct clk_hw *hw)
985 {
986 	struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
987 
988 	pic32_syskey_unlock();
989 	writel(sosc->enable_mask, PIC32_CLR(sosc->enable_reg));
990 }
991 
992 static int sosc_clk_is_enabled(struct clk_hw *hw)
993 {
994 	struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
995 	u32 enabled, ready;
996 
997 	/* check enabled and ready status */
998 	enabled = readl(sosc->enable_reg) & sosc->enable_mask;
999 	ready = readl(sosc->status_reg) & sosc->status_mask;
1000 
1001 	return enabled && ready;
1002 }
1003 
1004 static unsigned long sosc_clk_calc_rate(struct clk_hw *hw,
1005 					unsigned long parent_rate)
1006 {
1007 	return clkhw_to_sosc(hw)->fixed_rate;
1008 }
1009 
1010 const struct clk_ops pic32_sosc_ops = {
1011 	.enable = sosc_clk_enable,
1012 	.disable = sosc_clk_disable,
1013 	.is_enabled = sosc_clk_is_enabled,
1014 	.recalc_rate = sosc_clk_calc_rate,
1015 };
1016 
1017 struct clk *pic32_sosc_clk_register(const struct pic32_sec_osc_data *data,
1018 				    struct pic32_clk_common *core)
1019 {
1020 	struct pic32_sec_osc *sosc;
1021 
1022 	sosc = devm_kzalloc(core->dev, sizeof(*sosc), GFP_KERNEL);
1023 	if (!sosc)
1024 		return ERR_PTR(-ENOMEM);
1025 
1026 	sosc->core = core;
1027 	sosc->hw.init = &data->init_data;
1028 	sosc->fixed_rate = data->fixed_rate;
1029 	sosc->enable_mask = data->enable_mask;
1030 	sosc->status_mask = data->status_mask;
1031 	sosc->enable_reg = data->enable_reg + core->iobase;
1032 	sosc->status_reg = data->status_reg + core->iobase;
1033 
1034 	return devm_clk_register(core->dev, &sosc->hw);
1035 }
1036