xref: /linux/drivers/clk/zynqmp/divider.c (revision 0a94608f0f7de9b1135ffea3546afe68eafef57f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Zynq UltraScale+ MPSoC Divider support
4  *
5  *  Copyright (C) 2016-2019 Xilinx
6  *
7  * Adjustable divider clock implementation
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/slab.h>
13 #include "clk-zynqmp.h"
14 
15 /*
16  * DOC: basic adjustable divider clock that cannot gate
17  *
18  * Traits of this clock:
19  * prepare - clk_prepare only ensures that parents are prepared
20  * enable - clk_enable only ensures that parents are enabled
21  * rate - rate is adjustable.  clk->rate = ceiling(parent->rate / divisor)
22  * parent - fixed parent.  No clk_set_parent support
23  */
24 
25 #define to_zynqmp_clk_divider(_hw)		\
26 	container_of(_hw, struct zynqmp_clk_divider, hw)
27 
28 #define CLK_FRAC		BIT(13) /* has a fractional parent */
29 #define CUSTOM_FLAG_CLK_FRAC	BIT(0) /* has a fractional parent in custom type flag */
30 
31 /**
32  * struct zynqmp_clk_divider - adjustable divider clock
33  * @hw:		handle between common and hardware-specific interfaces
34  * @flags:	Hardware specific flags
35  * @is_frac:	The divider is a fractional divider
36  * @clk_id:	Id of clock
37  * @div_type:	divisor type (TYPE_DIV1 or TYPE_DIV2)
38  * @max_div:	maximum supported divisor (fetched from firmware)
39  */
40 struct zynqmp_clk_divider {
41 	struct clk_hw hw;
42 	u8 flags;
43 	bool is_frac;
44 	u32 clk_id;
45 	u32 div_type;
46 	u16 max_div;
47 };
48 
49 static inline int zynqmp_divider_get_val(unsigned long parent_rate,
50 					 unsigned long rate, u16 flags)
51 {
52 	int up, down;
53 	unsigned long up_rate, down_rate;
54 
55 	if (flags & CLK_DIVIDER_POWER_OF_TWO) {
56 		up = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
57 		down = DIV_ROUND_DOWN_ULL((u64)parent_rate, rate);
58 
59 		up = __roundup_pow_of_two(up);
60 		down = __rounddown_pow_of_two(down);
61 
62 		up_rate = DIV_ROUND_UP_ULL((u64)parent_rate, up);
63 		down_rate = DIV_ROUND_UP_ULL((u64)parent_rate, down);
64 
65 		return (rate - up_rate) <= (down_rate - rate) ? up : down;
66 
67 	} else {
68 		return DIV_ROUND_CLOSEST(parent_rate, rate);
69 	}
70 }
71 
72 /**
73  * zynqmp_clk_divider_recalc_rate() - Recalc rate of divider clock
74  * @hw:			handle between common and hardware-specific interfaces
75  * @parent_rate:	rate of parent clock
76  *
77  * Return: 0 on success else error+reason
78  */
79 static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
80 						    unsigned long parent_rate)
81 {
82 	struct zynqmp_clk_divider *divider = to_zynqmp_clk_divider(hw);
83 	const char *clk_name = clk_hw_get_name(hw);
84 	u32 clk_id = divider->clk_id;
85 	u32 div_type = divider->div_type;
86 	u32 div, value;
87 	int ret;
88 
89 	ret = zynqmp_pm_clock_getdivider(clk_id, &div);
90 
91 	if (ret)
92 		pr_debug("%s() get divider failed for %s, ret = %d\n",
93 			 __func__, clk_name, ret);
94 
95 	if (div_type == TYPE_DIV1)
96 		value = div & 0xFFFF;
97 	else
98 		value = div >> 16;
99 
100 	if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
101 		value = 1 << value;
102 
103 	if (!value) {
104 		WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
105 		     "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
106 		     clk_name);
107 		return parent_rate;
108 	}
109 
110 	return DIV_ROUND_UP_ULL(parent_rate, value);
111 }
112 
113 static void zynqmp_get_divider2_val(struct clk_hw *hw,
114 				    unsigned long rate,
115 				    struct zynqmp_clk_divider *divider,
116 				    int *bestdiv)
117 {
118 	int div1;
119 	int div2;
120 	long error = LONG_MAX;
121 	unsigned long div1_prate;
122 	struct clk_hw *div1_parent_hw;
123 	struct clk_hw *div2_parent_hw = clk_hw_get_parent(hw);
124 	struct zynqmp_clk_divider *pdivider =
125 				to_zynqmp_clk_divider(div2_parent_hw);
126 
127 	if (!pdivider)
128 		return;
129 
130 	div1_parent_hw = clk_hw_get_parent(div2_parent_hw);
131 	if (!div1_parent_hw)
132 		return;
133 
134 	div1_prate = clk_hw_get_rate(div1_parent_hw);
135 	*bestdiv = 1;
136 	for (div1 = 1; div1 <= pdivider->max_div;) {
137 		for (div2 = 1; div2 <= divider->max_div;) {
138 			long new_error = ((div1_prate / div1) / div2) - rate;
139 
140 			if (abs(new_error) < abs(error)) {
141 				*bestdiv = div2;
142 				error = new_error;
143 			}
144 			if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
145 				div2 = div2 << 1;
146 			else
147 				div2++;
148 		}
149 		if (pdivider->flags & CLK_DIVIDER_POWER_OF_TWO)
150 			div1 = div1 << 1;
151 		else
152 			div1++;
153 	}
154 }
155 
156 /**
157  * zynqmp_clk_divider_round_rate() - Round rate of divider clock
158  * @hw:			handle between common and hardware-specific interfaces
159  * @rate:		rate of clock to be set
160  * @prate:		rate of parent clock
161  *
162  * Return: 0 on success else error+reason
163  */
164 static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
165 					  unsigned long rate,
166 					  unsigned long *prate)
167 {
168 	struct zynqmp_clk_divider *divider = to_zynqmp_clk_divider(hw);
169 	const char *clk_name = clk_hw_get_name(hw);
170 	u32 clk_id = divider->clk_id;
171 	u32 div_type = divider->div_type;
172 	u32 bestdiv;
173 	int ret;
174 
175 	/* if read only, just return current value */
176 	if (divider->flags & CLK_DIVIDER_READ_ONLY) {
177 		ret = zynqmp_pm_clock_getdivider(clk_id, &bestdiv);
178 
179 		if (ret)
180 			pr_debug("%s() get divider failed for %s, ret = %d\n",
181 				 __func__, clk_name, ret);
182 		if (div_type == TYPE_DIV1)
183 			bestdiv = bestdiv & 0xFFFF;
184 		else
185 			bestdiv  = bestdiv >> 16;
186 
187 		if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
188 			bestdiv = 1 << bestdiv;
189 
190 		return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
191 	}
192 
193 	bestdiv = zynqmp_divider_get_val(*prate, rate, divider->flags);
194 
195 	/*
196 	 * In case of two divisors, compute best divider values and return
197 	 * divider2 value based on compute value. div1 will  be automatically
198 	 * set to optimum based on required total divider value.
199 	 */
200 	if (div_type == TYPE_DIV2 &&
201 	    (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
202 		zynqmp_get_divider2_val(hw, rate, divider, &bestdiv);
203 	}
204 
205 	if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
206 		bestdiv = rate % *prate ? 1 : bestdiv;
207 
208 	bestdiv = min_t(u32, bestdiv, divider->max_div);
209 	*prate = rate * bestdiv;
210 
211 	return rate;
212 }
213 
214 /**
215  * zynqmp_clk_divider_set_rate() - Set rate of divider clock
216  * @hw:			handle between common and hardware-specific interfaces
217  * @rate:		rate of clock to be set
218  * @parent_rate:	rate of parent clock
219  *
220  * Return: 0 on success else error+reason
221  */
222 static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
223 				       unsigned long parent_rate)
224 {
225 	struct zynqmp_clk_divider *divider = to_zynqmp_clk_divider(hw);
226 	const char *clk_name = clk_hw_get_name(hw);
227 	u32 clk_id = divider->clk_id;
228 	u32 div_type = divider->div_type;
229 	u32 value, div;
230 	int ret;
231 
232 	value = zynqmp_divider_get_val(parent_rate, rate, divider->flags);
233 	if (div_type == TYPE_DIV1) {
234 		div = value & 0xFFFF;
235 		div |= 0xffff << 16;
236 	} else {
237 		div = 0xffff;
238 		div |= value << 16;
239 	}
240 
241 	if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
242 		div = __ffs(div);
243 
244 	ret = zynqmp_pm_clock_setdivider(clk_id, div);
245 
246 	if (ret)
247 		pr_debug("%s() set divider failed for %s, ret = %d\n",
248 			 __func__, clk_name, ret);
249 
250 	return ret;
251 }
252 
253 static const struct clk_ops zynqmp_clk_divider_ops = {
254 	.recalc_rate = zynqmp_clk_divider_recalc_rate,
255 	.round_rate = zynqmp_clk_divider_round_rate,
256 	.set_rate = zynqmp_clk_divider_set_rate,
257 };
258 
259 static const struct clk_ops zynqmp_clk_divider_ro_ops = {
260 	.recalc_rate = zynqmp_clk_divider_recalc_rate,
261 	.round_rate = zynqmp_clk_divider_round_rate,
262 };
263 
264 /**
265  * zynqmp_clk_get_max_divisor() - Get maximum supported divisor from firmware.
266  * @clk_id:		Id of clock
267  * @type:		Divider type
268  *
269  * Return: Maximum divisor of a clock if query data is successful
270  *	   U16_MAX in case of query data is not success
271  */
272 static u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type)
273 {
274 	struct zynqmp_pm_query_data qdata = {0};
275 	u32 ret_payload[PAYLOAD_ARG_CNT];
276 	int ret;
277 
278 	qdata.qid = PM_QID_CLOCK_GET_MAX_DIVISOR;
279 	qdata.arg1 = clk_id;
280 	qdata.arg2 = type;
281 	ret = zynqmp_pm_query_data(qdata, ret_payload);
282 	/*
283 	 * To maintain backward compatibility return maximum possible value
284 	 * (0xFFFF) if query for max divisor is not successful.
285 	 */
286 	if (ret)
287 		return U16_MAX;
288 
289 	return ret_payload[1];
290 }
291 
292 static inline unsigned long zynqmp_clk_map_divider_ccf_flags(
293 					       const u32 zynqmp_type_flag)
294 {
295 	unsigned long ccf_flag = 0;
296 
297 	if (zynqmp_type_flag & ZYNQMP_CLK_DIVIDER_ONE_BASED)
298 		ccf_flag |= CLK_DIVIDER_ONE_BASED;
299 	if (zynqmp_type_flag & ZYNQMP_CLK_DIVIDER_POWER_OF_TWO)
300 		ccf_flag |= CLK_DIVIDER_POWER_OF_TWO;
301 	if (zynqmp_type_flag & ZYNQMP_CLK_DIVIDER_ALLOW_ZERO)
302 		ccf_flag |= CLK_DIVIDER_ALLOW_ZERO;
303 	if (zynqmp_type_flag & ZYNQMP_CLK_DIVIDER_POWER_OF_TWO)
304 		ccf_flag |= CLK_DIVIDER_HIWORD_MASK;
305 	if (zynqmp_type_flag & ZYNQMP_CLK_DIVIDER_ROUND_CLOSEST)
306 		ccf_flag |= CLK_DIVIDER_ROUND_CLOSEST;
307 	if (zynqmp_type_flag & ZYNQMP_CLK_DIVIDER_READ_ONLY)
308 		ccf_flag |= CLK_DIVIDER_READ_ONLY;
309 	if (zynqmp_type_flag & ZYNQMP_CLK_DIVIDER_MAX_AT_ZERO)
310 		ccf_flag |= CLK_DIVIDER_MAX_AT_ZERO;
311 
312 	return ccf_flag;
313 }
314 
315 /**
316  * zynqmp_clk_register_divider() - Register a divider clock
317  * @name:		Name of this clock
318  * @clk_id:		Id of clock
319  * @parents:		Name of this clock's parents
320  * @num_parents:	Number of parents
321  * @nodes:		Clock topology node
322  *
323  * Return: clock hardware to registered clock divider
324  */
325 struct clk_hw *zynqmp_clk_register_divider(const char *name,
326 					   u32 clk_id,
327 					   const char * const *parents,
328 					   u8 num_parents,
329 					   const struct clock_topology *nodes)
330 {
331 	struct zynqmp_clk_divider *div;
332 	struct clk_hw *hw;
333 	struct clk_init_data init;
334 	int ret;
335 
336 	/* allocate the divider */
337 	div = kzalloc(sizeof(*div), GFP_KERNEL);
338 	if (!div)
339 		return ERR_PTR(-ENOMEM);
340 
341 	init.name = name;
342 	if (nodes->type_flag & CLK_DIVIDER_READ_ONLY)
343 		init.ops = &zynqmp_clk_divider_ro_ops;
344 	else
345 		init.ops = &zynqmp_clk_divider_ops;
346 
347 	init.flags = zynqmp_clk_map_common_ccf_flags(nodes->flag);
348 
349 	init.parent_names = parents;
350 	init.num_parents = 1;
351 
352 	/* struct clk_divider assignments */
353 	div->is_frac = !!((nodes->flag & CLK_FRAC) |
354 			  (nodes->custom_type_flag & CUSTOM_FLAG_CLK_FRAC));
355 	div->flags = zynqmp_clk_map_divider_ccf_flags(nodes->type_flag);
356 	div->hw.init = &init;
357 	div->clk_id = clk_id;
358 	div->div_type = nodes->type;
359 
360 	/*
361 	 * To achieve best possible rate, maximum limit of divider is required
362 	 * while computation.
363 	 */
364 	div->max_div = zynqmp_clk_get_max_divisor(clk_id, nodes->type);
365 
366 	hw = &div->hw;
367 	ret = clk_hw_register(NULL, hw);
368 	if (ret) {
369 		kfree(div);
370 		hw = ERR_PTR(ret);
371 	}
372 
373 	return hw;
374 }
375