xref: /linux/drivers/clk/clk-divider.c (revision 26fbb4c8c7c3ee9a4c3b4de555a8587b5a19154e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
4  * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
5  * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
6  *
7  * Adjustable divider clock implementation
8  */
9 
10 #include <linux/clk-provider.h>
11 #include <linux/device.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/io.h>
15 #include <linux/err.h>
16 #include <linux/string.h>
17 #include <linux/log2.h>
18 
19 /*
20  * DOC: basic adjustable divider clock that cannot gate
21  *
22  * Traits of this clock:
23  * prepare - clk_prepare only ensures that parents are prepared
24  * enable - clk_enable only ensures that parents are enabled
25  * rate - rate is adjustable.  clk->rate = ceiling(parent->rate / divisor)
26  * parent - fixed parent.  No clk_set_parent support
27  */
28 
29 static inline u32 clk_div_readl(struct clk_divider *divider)
30 {
31 	if (divider->flags & CLK_DIVIDER_BIG_ENDIAN)
32 		return ioread32be(divider->reg);
33 
34 	return readl(divider->reg);
35 }
36 
37 static inline void clk_div_writel(struct clk_divider *divider, u32 val)
38 {
39 	if (divider->flags & CLK_DIVIDER_BIG_ENDIAN)
40 		iowrite32be(val, divider->reg);
41 	else
42 		writel(val, divider->reg);
43 }
44 
45 static unsigned int _get_table_maxdiv(const struct clk_div_table *table,
46 				      u8 width)
47 {
48 	unsigned int maxdiv = 0, mask = clk_div_mask(width);
49 	const struct clk_div_table *clkt;
50 
51 	for (clkt = table; clkt->div; clkt++)
52 		if (clkt->div > maxdiv && clkt->val <= mask)
53 			maxdiv = clkt->div;
54 	return maxdiv;
55 }
56 
57 static unsigned int _get_table_mindiv(const struct clk_div_table *table)
58 {
59 	unsigned int mindiv = UINT_MAX;
60 	const struct clk_div_table *clkt;
61 
62 	for (clkt = table; clkt->div; clkt++)
63 		if (clkt->div < mindiv)
64 			mindiv = clkt->div;
65 	return mindiv;
66 }
67 
68 static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width,
69 				unsigned long flags)
70 {
71 	if (flags & CLK_DIVIDER_ONE_BASED)
72 		return clk_div_mask(width);
73 	if (flags & CLK_DIVIDER_POWER_OF_TWO)
74 		return 1 << clk_div_mask(width);
75 	if (table)
76 		return _get_table_maxdiv(table, width);
77 	return clk_div_mask(width) + 1;
78 }
79 
80 static unsigned int _get_table_div(const struct clk_div_table *table,
81 							unsigned int val)
82 {
83 	const struct clk_div_table *clkt;
84 
85 	for (clkt = table; clkt->div; clkt++)
86 		if (clkt->val == val)
87 			return clkt->div;
88 	return 0;
89 }
90 
91 static unsigned int _get_div(const struct clk_div_table *table,
92 			     unsigned int val, unsigned long flags, u8 width)
93 {
94 	if (flags & CLK_DIVIDER_ONE_BASED)
95 		return val;
96 	if (flags & CLK_DIVIDER_POWER_OF_TWO)
97 		return 1 << val;
98 	if (flags & CLK_DIVIDER_MAX_AT_ZERO)
99 		return val ? val : clk_div_mask(width) + 1;
100 	if (table)
101 		return _get_table_div(table, val);
102 	return val + 1;
103 }
104 
105 static unsigned int _get_table_val(const struct clk_div_table *table,
106 							unsigned int div)
107 {
108 	const struct clk_div_table *clkt;
109 
110 	for (clkt = table; clkt->div; clkt++)
111 		if (clkt->div == div)
112 			return clkt->val;
113 	return 0;
114 }
115 
116 static unsigned int _get_val(const struct clk_div_table *table,
117 			     unsigned int div, unsigned long flags, u8 width)
118 {
119 	if (flags & CLK_DIVIDER_ONE_BASED)
120 		return div;
121 	if (flags & CLK_DIVIDER_POWER_OF_TWO)
122 		return __ffs(div);
123 	if (flags & CLK_DIVIDER_MAX_AT_ZERO)
124 		return (div == clk_div_mask(width) + 1) ? 0 : div;
125 	if (table)
126 		return  _get_table_val(table, div);
127 	return div - 1;
128 }
129 
130 unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
131 				  unsigned int val,
132 				  const struct clk_div_table *table,
133 				  unsigned long flags, unsigned long width)
134 {
135 	unsigned int div;
136 
137 	div = _get_div(table, val, flags, width);
138 	if (!div) {
139 		WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO),
140 			"%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
141 			clk_hw_get_name(hw));
142 		return parent_rate;
143 	}
144 
145 	return DIV_ROUND_UP_ULL((u64)parent_rate, div);
146 }
147 EXPORT_SYMBOL_GPL(divider_recalc_rate);
148 
149 static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
150 		unsigned long parent_rate)
151 {
152 	struct clk_divider *divider = to_clk_divider(hw);
153 	unsigned int val;
154 
155 	val = clk_div_readl(divider) >> divider->shift;
156 	val &= clk_div_mask(divider->width);
157 
158 	return divider_recalc_rate(hw, parent_rate, val, divider->table,
159 				   divider->flags, divider->width);
160 }
161 
162 static bool _is_valid_table_div(const struct clk_div_table *table,
163 							 unsigned int div)
164 {
165 	const struct clk_div_table *clkt;
166 
167 	for (clkt = table; clkt->div; clkt++)
168 		if (clkt->div == div)
169 			return true;
170 	return false;
171 }
172 
173 static bool _is_valid_div(const struct clk_div_table *table, unsigned int div,
174 			  unsigned long flags)
175 {
176 	if (flags & CLK_DIVIDER_POWER_OF_TWO)
177 		return is_power_of_2(div);
178 	if (table)
179 		return _is_valid_table_div(table, div);
180 	return true;
181 }
182 
183 static int _round_up_table(const struct clk_div_table *table, int div)
184 {
185 	const struct clk_div_table *clkt;
186 	int up = INT_MAX;
187 
188 	for (clkt = table; clkt->div; clkt++) {
189 		if (clkt->div == div)
190 			return clkt->div;
191 		else if (clkt->div < div)
192 			continue;
193 
194 		if ((clkt->div - div) < (up - div))
195 			up = clkt->div;
196 	}
197 
198 	return up;
199 }
200 
201 static int _round_down_table(const struct clk_div_table *table, int div)
202 {
203 	const struct clk_div_table *clkt;
204 	int down = _get_table_mindiv(table);
205 
206 	for (clkt = table; clkt->div; clkt++) {
207 		if (clkt->div == div)
208 			return clkt->div;
209 		else if (clkt->div > div)
210 			continue;
211 
212 		if ((div - clkt->div) < (div - down))
213 			down = clkt->div;
214 	}
215 
216 	return down;
217 }
218 
219 static int _div_round_up(const struct clk_div_table *table,
220 			 unsigned long parent_rate, unsigned long rate,
221 			 unsigned long flags)
222 {
223 	int div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
224 
225 	if (flags & CLK_DIVIDER_POWER_OF_TWO)
226 		div = __roundup_pow_of_two(div);
227 	if (table)
228 		div = _round_up_table(table, div);
229 
230 	return div;
231 }
232 
233 static int _div_round_closest(const struct clk_div_table *table,
234 			      unsigned long parent_rate, unsigned long rate,
235 			      unsigned long flags)
236 {
237 	int up, down;
238 	unsigned long up_rate, down_rate;
239 
240 	up = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
241 	down = parent_rate / rate;
242 
243 	if (flags & CLK_DIVIDER_POWER_OF_TWO) {
244 		up = __roundup_pow_of_two(up);
245 		down = __rounddown_pow_of_two(down);
246 	} else if (table) {
247 		up = _round_up_table(table, up);
248 		down = _round_down_table(table, down);
249 	}
250 
251 	up_rate = DIV_ROUND_UP_ULL((u64)parent_rate, up);
252 	down_rate = DIV_ROUND_UP_ULL((u64)parent_rate, down);
253 
254 	return (rate - up_rate) <= (down_rate - rate) ? up : down;
255 }
256 
257 static int _div_round(const struct clk_div_table *table,
258 		      unsigned long parent_rate, unsigned long rate,
259 		      unsigned long flags)
260 {
261 	if (flags & CLK_DIVIDER_ROUND_CLOSEST)
262 		return _div_round_closest(table, parent_rate, rate, flags);
263 
264 	return _div_round_up(table, parent_rate, rate, flags);
265 }
266 
267 static bool _is_best_div(unsigned long rate, unsigned long now,
268 			 unsigned long best, unsigned long flags)
269 {
270 	if (flags & CLK_DIVIDER_ROUND_CLOSEST)
271 		return abs(rate - now) < abs(rate - best);
272 
273 	return now <= rate && now > best;
274 }
275 
276 static int _next_div(const struct clk_div_table *table, int div,
277 		     unsigned long flags)
278 {
279 	div++;
280 
281 	if (flags & CLK_DIVIDER_POWER_OF_TWO)
282 		return __roundup_pow_of_two(div);
283 	if (table)
284 		return _round_up_table(table, div);
285 
286 	return div;
287 }
288 
289 static int clk_divider_bestdiv(struct clk_hw *hw, struct clk_hw *parent,
290 			       unsigned long rate,
291 			       unsigned long *best_parent_rate,
292 			       const struct clk_div_table *table, u8 width,
293 			       unsigned long flags)
294 {
295 	int i, bestdiv = 0;
296 	unsigned long parent_rate, best = 0, now, maxdiv;
297 	unsigned long parent_rate_saved = *best_parent_rate;
298 
299 	if (!rate)
300 		rate = 1;
301 
302 	maxdiv = _get_maxdiv(table, width, flags);
303 
304 	if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
305 		parent_rate = *best_parent_rate;
306 		bestdiv = _div_round(table, parent_rate, rate, flags);
307 		bestdiv = bestdiv == 0 ? 1 : bestdiv;
308 		bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
309 		return bestdiv;
310 	}
311 
312 	/*
313 	 * The maximum divider we can use without overflowing
314 	 * unsigned long in rate * i below
315 	 */
316 	maxdiv = min(ULONG_MAX / rate, maxdiv);
317 
318 	for (i = _next_div(table, 0, flags); i <= maxdiv;
319 					     i = _next_div(table, i, flags)) {
320 		if (rate * i == parent_rate_saved) {
321 			/*
322 			 * It's the most ideal case if the requested rate can be
323 			 * divided from parent clock without needing to change
324 			 * parent rate, so return the divider immediately.
325 			 */
326 			*best_parent_rate = parent_rate_saved;
327 			return i;
328 		}
329 		parent_rate = clk_hw_round_rate(parent, rate * i);
330 		now = DIV_ROUND_UP_ULL((u64)parent_rate, i);
331 		if (_is_best_div(rate, now, best, flags)) {
332 			bestdiv = i;
333 			best = now;
334 			*best_parent_rate = parent_rate;
335 		}
336 	}
337 
338 	if (!bestdiv) {
339 		bestdiv = _get_maxdiv(table, width, flags);
340 		*best_parent_rate = clk_hw_round_rate(parent, 1);
341 	}
342 
343 	return bestdiv;
344 }
345 
346 long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
347 			       unsigned long rate, unsigned long *prate,
348 			       const struct clk_div_table *table,
349 			       u8 width, unsigned long flags)
350 {
351 	int div;
352 
353 	div = clk_divider_bestdiv(hw, parent, rate, prate, table, width, flags);
354 
355 	return DIV_ROUND_UP_ULL((u64)*prate, div);
356 }
357 EXPORT_SYMBOL_GPL(divider_round_rate_parent);
358 
359 long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
360 				  unsigned long rate, unsigned long *prate,
361 				  const struct clk_div_table *table, u8 width,
362 				  unsigned long flags, unsigned int val)
363 {
364 	int div;
365 
366 	div = _get_div(table, val, flags, width);
367 
368 	/* Even a read-only clock can propagate a rate change */
369 	if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
370 		if (!parent)
371 			return -EINVAL;
372 
373 		*prate = clk_hw_round_rate(parent, rate * div);
374 	}
375 
376 	return DIV_ROUND_UP_ULL((u64)*prate, div);
377 }
378 EXPORT_SYMBOL_GPL(divider_ro_round_rate_parent);
379 
380 
381 static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
382 				unsigned long *prate)
383 {
384 	struct clk_divider *divider = to_clk_divider(hw);
385 
386 	/* if read only, just return current value */
387 	if (divider->flags & CLK_DIVIDER_READ_ONLY) {
388 		u32 val;
389 
390 		val = clk_div_readl(divider) >> divider->shift;
391 		val &= clk_div_mask(divider->width);
392 
393 		return divider_ro_round_rate(hw, rate, prate, divider->table,
394 					     divider->width, divider->flags,
395 					     val);
396 	}
397 
398 	return divider_round_rate(hw, rate, prate, divider->table,
399 				  divider->width, divider->flags);
400 }
401 
402 int divider_get_val(unsigned long rate, unsigned long parent_rate,
403 		    const struct clk_div_table *table, u8 width,
404 		    unsigned long flags)
405 {
406 	unsigned int div, value;
407 
408 	div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
409 
410 	if (!_is_valid_div(table, div, flags))
411 		return -EINVAL;
412 
413 	value = _get_val(table, div, flags, width);
414 
415 	return min_t(unsigned int, value, clk_div_mask(width));
416 }
417 EXPORT_SYMBOL_GPL(divider_get_val);
418 
419 static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
420 				unsigned long parent_rate)
421 {
422 	struct clk_divider *divider = to_clk_divider(hw);
423 	int value;
424 	unsigned long flags = 0;
425 	u32 val;
426 
427 	value = divider_get_val(rate, parent_rate, divider->table,
428 				divider->width, divider->flags);
429 	if (value < 0)
430 		return value;
431 
432 	if (divider->lock)
433 		spin_lock_irqsave(divider->lock, flags);
434 	else
435 		__acquire(divider->lock);
436 
437 	if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
438 		val = clk_div_mask(divider->width) << (divider->shift + 16);
439 	} else {
440 		val = clk_div_readl(divider);
441 		val &= ~(clk_div_mask(divider->width) << divider->shift);
442 	}
443 	val |= (u32)value << divider->shift;
444 	clk_div_writel(divider, val);
445 
446 	if (divider->lock)
447 		spin_unlock_irqrestore(divider->lock, flags);
448 	else
449 		__release(divider->lock);
450 
451 	return 0;
452 }
453 
454 const struct clk_ops clk_divider_ops = {
455 	.recalc_rate = clk_divider_recalc_rate,
456 	.round_rate = clk_divider_round_rate,
457 	.set_rate = clk_divider_set_rate,
458 };
459 EXPORT_SYMBOL_GPL(clk_divider_ops);
460 
461 const struct clk_ops clk_divider_ro_ops = {
462 	.recalc_rate = clk_divider_recalc_rate,
463 	.round_rate = clk_divider_round_rate,
464 };
465 EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
466 
467 struct clk_hw *__clk_hw_register_divider(struct device *dev,
468 		struct device_node *np, const char *name,
469 		const char *parent_name, const struct clk_hw *parent_hw,
470 		const struct clk_parent_data *parent_data, unsigned long flags,
471 		void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
472 		const struct clk_div_table *table, spinlock_t *lock)
473 {
474 	struct clk_divider *div;
475 	struct clk_hw *hw;
476 	struct clk_init_data init = {};
477 	int ret;
478 
479 	if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
480 		if (width + shift > 16) {
481 			pr_warn("divider value exceeds LOWORD field\n");
482 			return ERR_PTR(-EINVAL);
483 		}
484 	}
485 
486 	/* allocate the divider */
487 	div = kzalloc(sizeof(*div), GFP_KERNEL);
488 	if (!div)
489 		return ERR_PTR(-ENOMEM);
490 
491 	init.name = name;
492 	if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
493 		init.ops = &clk_divider_ro_ops;
494 	else
495 		init.ops = &clk_divider_ops;
496 	init.flags = flags;
497 	init.parent_names = (parent_name ? &parent_name: NULL);
498 	init.num_parents = (parent_name ? 1 : 0);
499 
500 	/* struct clk_divider assignments */
501 	div->reg = reg;
502 	div->shift = shift;
503 	div->width = width;
504 	div->flags = clk_divider_flags;
505 	div->lock = lock;
506 	div->hw.init = &init;
507 	div->table = table;
508 
509 	/* register the clock */
510 	hw = &div->hw;
511 	ret = clk_hw_register(dev, hw);
512 	if (ret) {
513 		kfree(div);
514 		hw = ERR_PTR(ret);
515 	}
516 
517 	return hw;
518 }
519 EXPORT_SYMBOL_GPL(__clk_hw_register_divider);
520 
521 /**
522  * clk_register_divider_table - register a table based divider clock with
523  * the clock framework
524  * @dev: device registering this clock
525  * @name: name of this clock
526  * @parent_name: name of clock's parent
527  * @flags: framework-specific flags
528  * @reg: register address to adjust divider
529  * @shift: number of bits to shift the bitfield
530  * @width: width of the bitfield
531  * @clk_divider_flags: divider-specific flags for this clock
532  * @table: array of divider/value pairs ending with a div set to 0
533  * @lock: shared register lock for this clock
534  */
535 struct clk *clk_register_divider_table(struct device *dev, const char *name,
536 		const char *parent_name, unsigned long flags,
537 		void __iomem *reg, u8 shift, u8 width,
538 		u8 clk_divider_flags, const struct clk_div_table *table,
539 		spinlock_t *lock)
540 {
541 	struct clk_hw *hw;
542 
543 	hw =  __clk_hw_register_divider(dev, NULL, name, parent_name, NULL,
544 			NULL, flags, reg, shift, width, clk_divider_flags,
545 			table, lock);
546 	if (IS_ERR(hw))
547 		return ERR_CAST(hw);
548 	return hw->clk;
549 }
550 EXPORT_SYMBOL_GPL(clk_register_divider_table);
551 
552 void clk_unregister_divider(struct clk *clk)
553 {
554 	struct clk_divider *div;
555 	struct clk_hw *hw;
556 
557 	hw = __clk_get_hw(clk);
558 	if (!hw)
559 		return;
560 
561 	div = to_clk_divider(hw);
562 
563 	clk_unregister(clk);
564 	kfree(div);
565 }
566 EXPORT_SYMBOL_GPL(clk_unregister_divider);
567 
568 /**
569  * clk_hw_unregister_divider - unregister a clk divider
570  * @hw: hardware-specific clock data to unregister
571  */
572 void clk_hw_unregister_divider(struct clk_hw *hw)
573 {
574 	struct clk_divider *div;
575 
576 	div = to_clk_divider(hw);
577 
578 	clk_hw_unregister(hw);
579 	kfree(div);
580 }
581 EXPORT_SYMBOL_GPL(clk_hw_unregister_divider);
582 
583 static void devm_clk_hw_release_divider(struct device *dev, void *res)
584 {
585 	clk_hw_unregister_divider(*(struct clk_hw **)res);
586 }
587 
588 struct clk_hw *__devm_clk_hw_register_divider(struct device *dev,
589 		struct device_node *np, const char *name,
590 		const char *parent_name, const struct clk_hw *parent_hw,
591 		const struct clk_parent_data *parent_data, unsigned long flags,
592 		void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
593 		const struct clk_div_table *table, spinlock_t *lock)
594 {
595 	struct clk_hw **ptr, *hw;
596 
597 	ptr = devres_alloc(devm_clk_hw_release_divider, sizeof(*ptr), GFP_KERNEL);
598 	if (!ptr)
599 		return ERR_PTR(-ENOMEM);
600 
601 	hw = __clk_hw_register_divider(dev, np, name, parent_name, parent_hw,
602 				       parent_data, flags, reg, shift, width,
603 				       clk_divider_flags, table, lock);
604 
605 	if (!IS_ERR(hw)) {
606 		*ptr = hw;
607 		devres_add(dev, ptr);
608 	} else {
609 		devres_free(ptr);
610 	}
611 
612 	return hw;
613 }
614 EXPORT_SYMBOL_GPL(__devm_clk_hw_register_divider);
615