xref: /linux/drivers/clk/mmp/clk-mix.c (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1 /*
2  * mmp mix(div and mux) clock operation source file
3  *
4  * Copyright (C) 2014 Marvell
5  * Chao Xie <chao.xie@marvell.com>
6  *
7  * This file is licensed under the terms of the GNU General Public
8  * License version 2. This program is licensed "as is" without any
9  * warranty of any kind, whether express or implied.
10  */
11 
12 #include <linux/clk-provider.h>
13 #include <linux/slab.h>
14 #include <linux/io.h>
15 #include <linux/err.h>
16 
17 #include "clk.h"
18 
19 /*
20  * The mix clock is a clock combined mux and div type clock.
21  * Because the div field and mux field need to be set at same
22  * time, we can not divide it into 2 types of clock
23  */
24 
25 #define to_clk_mix(hw)	container_of(hw, struct mmp_clk_mix, hw)
26 
27 static unsigned int _get_maxdiv(struct mmp_clk_mix *mix)
28 {
29 	unsigned int div_mask = (1 << mix->reg_info.width_div) - 1;
30 	unsigned int maxdiv = 0;
31 	struct clk_div_table *clkt;
32 
33 	if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
34 		return div_mask;
35 	if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
36 		return 1 << div_mask;
37 	if (mix->div_table) {
38 		for (clkt = mix->div_table; clkt->div; clkt++)
39 			if (clkt->div > maxdiv)
40 				maxdiv = clkt->div;
41 		return maxdiv;
42 	}
43 	return div_mask + 1;
44 }
45 
46 static unsigned int _get_div(struct mmp_clk_mix *mix, unsigned int val)
47 {
48 	struct clk_div_table *clkt;
49 
50 	if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
51 		return val;
52 	if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
53 		return 1 << val;
54 	if (mix->div_table) {
55 		for (clkt = mix->div_table; clkt->div; clkt++)
56 			if (clkt->val == val)
57 				return clkt->div;
58 		if (clkt->div == 0)
59 			return 0;
60 	}
61 	return val + 1;
62 }
63 
64 static unsigned int _get_mux(struct mmp_clk_mix *mix, unsigned int val)
65 {
66 	int num_parents = __clk_get_num_parents(mix->hw.clk);
67 	int i;
68 
69 	if (mix->mux_flags & CLK_MUX_INDEX_BIT)
70 		return ffs(val) - 1;
71 	if (mix->mux_flags & CLK_MUX_INDEX_ONE)
72 		return val - 1;
73 	if (mix->mux_table) {
74 		for (i = 0; i < num_parents; i++)
75 			if (mix->mux_table[i] == val)
76 				return i;
77 		if (i == num_parents)
78 			return 0;
79 	}
80 
81 	return val;
82 }
83 static unsigned int _get_div_val(struct mmp_clk_mix *mix, unsigned int div)
84 {
85 	struct clk_div_table *clkt;
86 
87 	if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
88 		return div;
89 	if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
90 		return __ffs(div);
91 	if (mix->div_table) {
92 		for (clkt = mix->div_table; clkt->div; clkt++)
93 			if (clkt->div == div)
94 				return clkt->val;
95 		if (clkt->div == 0)
96 			return 0;
97 	}
98 
99 	return div - 1;
100 }
101 
102 static unsigned int _get_mux_val(struct mmp_clk_mix *mix, unsigned int mux)
103 {
104 	if (mix->mux_table)
105 		return mix->mux_table[mux];
106 
107 	return mux;
108 }
109 
110 static void _filter_clk_table(struct mmp_clk_mix *mix,
111 				struct mmp_clk_mix_clk_table *table,
112 				unsigned int table_size)
113 {
114 	int i;
115 	struct mmp_clk_mix_clk_table *item;
116 	struct clk *parent, *clk;
117 	unsigned long parent_rate;
118 
119 	clk = mix->hw.clk;
120 
121 	for (i = 0; i < table_size; i++) {
122 		item = &table[i];
123 		parent = clk_get_parent_by_index(clk, item->parent_index);
124 		parent_rate = __clk_get_rate(parent);
125 		if (parent_rate % item->rate) {
126 			item->valid = 0;
127 		} else {
128 			item->divisor = parent_rate / item->rate;
129 			item->valid = 1;
130 		}
131 	}
132 }
133 
134 static int _set_rate(struct mmp_clk_mix *mix, u32 mux_val, u32 div_val,
135 			unsigned int change_mux, unsigned int change_div)
136 {
137 	struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
138 	u8 width, shift;
139 	u32 mux_div, fc_req;
140 	int ret, timeout = 50;
141 	unsigned long flags = 0;
142 
143 	if (!change_mux && !change_div)
144 		return -EINVAL;
145 
146 	if (mix->lock)
147 		spin_lock_irqsave(mix->lock, flags);
148 
149 	if (mix->type == MMP_CLK_MIX_TYPE_V1
150 		|| mix->type == MMP_CLK_MIX_TYPE_V2)
151 		mux_div = readl(ri->reg_clk_ctrl);
152 	else
153 		mux_div = readl(ri->reg_clk_sel);
154 
155 	if (change_div) {
156 		width = ri->width_div;
157 		shift = ri->shift_div;
158 		mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
159 		mux_div |= MMP_CLK_BITS_SET_VAL(div_val, width, shift);
160 	}
161 
162 	if (change_mux) {
163 		width = ri->width_mux;
164 		shift = ri->shift_mux;
165 		mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
166 		mux_div |= MMP_CLK_BITS_SET_VAL(mux_val, width, shift);
167 	}
168 
169 	if (mix->type == MMP_CLK_MIX_TYPE_V1) {
170 		writel(mux_div, ri->reg_clk_ctrl);
171 	} else if (mix->type == MMP_CLK_MIX_TYPE_V2) {
172 		mux_div |= (1 << ri->bit_fc);
173 		writel(mux_div, ri->reg_clk_ctrl);
174 
175 		do {
176 			fc_req = readl(ri->reg_clk_ctrl);
177 			timeout--;
178 			if (!(fc_req & (1 << ri->bit_fc)))
179 				break;
180 		} while (timeout);
181 
182 		if (timeout == 0) {
183 			pr_err("%s:%s cannot do frequency change\n",
184 				__func__, __clk_get_name(mix->hw.clk));
185 			ret = -EBUSY;
186 			goto error;
187 		}
188 	} else {
189 		fc_req = readl(ri->reg_clk_ctrl);
190 		fc_req |= 1 << ri->bit_fc;
191 		writel(fc_req, ri->reg_clk_ctrl);
192 		writel(mux_div, ri->reg_clk_sel);
193 		fc_req &= ~(1 << ri->bit_fc);
194 	}
195 
196 	ret = 0;
197 error:
198 	if (mix->lock)
199 		spin_unlock_irqrestore(mix->lock, flags);
200 
201 	return ret;
202 }
203 
204 static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
205 					unsigned long *best_parent_rate,
206 					struct clk_hw **best_parent_clk)
207 {
208 	struct mmp_clk_mix *mix = to_clk_mix(hw);
209 	struct mmp_clk_mix_clk_table *item;
210 	struct clk *parent, *parent_best, *mix_clk;
211 	unsigned long parent_rate, mix_rate, mix_rate_best, parent_rate_best;
212 	unsigned long gap, gap_best;
213 	u32 div_val_max;
214 	unsigned int div;
215 	int i, j;
216 
217 	mix_clk = hw->clk;
218 
219 	parent = NULL;
220 	mix_rate_best = 0;
221 	parent_rate_best = 0;
222 	gap_best = rate;
223 	parent_best = NULL;
224 
225 	if (mix->table) {
226 		for (i = 0; i < mix->table_size; i++) {
227 			item = &mix->table[i];
228 			if (item->valid == 0)
229 				continue;
230 			parent = clk_get_parent_by_index(mix_clk,
231 							item->parent_index);
232 			parent_rate = __clk_get_rate(parent);
233 			mix_rate = parent_rate / item->divisor;
234 			gap = abs(mix_rate - rate);
235 			if (parent_best == NULL || gap < gap_best) {
236 				parent_best = parent;
237 				parent_rate_best = parent_rate;
238 				mix_rate_best = mix_rate;
239 				gap_best = gap;
240 				if (gap_best == 0)
241 					goto found;
242 			}
243 		}
244 	} else {
245 		for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
246 			parent = clk_get_parent_by_index(mix_clk, i);
247 			parent_rate = __clk_get_rate(parent);
248 			div_val_max = _get_maxdiv(mix);
249 			for (j = 0; j < div_val_max; j++) {
250 				div = _get_div(mix, j);
251 				mix_rate = parent_rate / div;
252 				gap = abs(mix_rate - rate);
253 				if (parent_best == NULL || gap < gap_best) {
254 					parent_best = parent;
255 					parent_rate_best = parent_rate;
256 					mix_rate_best = mix_rate;
257 					gap_best = gap;
258 					if (gap_best == 0)
259 						goto found;
260 				}
261 			}
262 		}
263 	}
264 
265 found:
266 	*best_parent_rate = parent_rate_best;
267 	*best_parent_clk = __clk_get_hw(parent_best);
268 
269 	return mix_rate_best;
270 }
271 
272 static int mmp_clk_mix_set_rate_and_parent(struct clk_hw *hw,
273 						unsigned long rate,
274 						unsigned long parent_rate,
275 						u8 index)
276 {
277 	struct mmp_clk_mix *mix = to_clk_mix(hw);
278 	unsigned int div;
279 	u32 div_val, mux_val;
280 
281 	div = parent_rate / rate;
282 	div_val = _get_div_val(mix, div);
283 	mux_val = _get_mux_val(mix, index);
284 
285 	return _set_rate(mix, mux_val, div_val, 1, 1);
286 }
287 
288 static u8 mmp_clk_mix_get_parent(struct clk_hw *hw)
289 {
290 	struct mmp_clk_mix *mix = to_clk_mix(hw);
291 	struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
292 	unsigned long flags = 0;
293 	u32 mux_div = 0;
294 	u8 width, shift;
295 	u32 mux_val;
296 
297 	if (mix->lock)
298 		spin_lock_irqsave(mix->lock, flags);
299 
300 	if (mix->type == MMP_CLK_MIX_TYPE_V1
301 		|| mix->type == MMP_CLK_MIX_TYPE_V2)
302 		mux_div = readl(ri->reg_clk_ctrl);
303 	else
304 		mux_div = readl(ri->reg_clk_sel);
305 
306 	if (mix->lock)
307 		spin_unlock_irqrestore(mix->lock, flags);
308 
309 	width = mix->reg_info.width_mux;
310 	shift = mix->reg_info.shift_mux;
311 
312 	mux_val = MMP_CLK_BITS_GET_VAL(mux_div, width, shift);
313 
314 	return _get_mux(mix, mux_val);
315 }
316 
317 static unsigned long mmp_clk_mix_recalc_rate(struct clk_hw *hw,
318 					unsigned long parent_rate)
319 {
320 	struct mmp_clk_mix *mix = to_clk_mix(hw);
321 	struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
322 	unsigned long flags = 0;
323 	u32 mux_div = 0;
324 	u8 width, shift;
325 	unsigned int div;
326 
327 	if (mix->lock)
328 		spin_lock_irqsave(mix->lock, flags);
329 
330 	if (mix->type == MMP_CLK_MIX_TYPE_V1
331 		|| mix->type == MMP_CLK_MIX_TYPE_V2)
332 		mux_div = readl(ri->reg_clk_ctrl);
333 	else
334 		mux_div = readl(ri->reg_clk_sel);
335 
336 	if (mix->lock)
337 		spin_unlock_irqrestore(mix->lock, flags);
338 
339 	width = mix->reg_info.width_div;
340 	shift = mix->reg_info.shift_div;
341 
342 	div = _get_div(mix, MMP_CLK_BITS_GET_VAL(mux_div, width, shift));
343 
344 	return parent_rate / div;
345 }
346 
347 static int mmp_clk_set_parent(struct clk_hw *hw, u8 index)
348 {
349 	struct mmp_clk_mix *mix = to_clk_mix(hw);
350 	struct mmp_clk_mix_clk_table *item;
351 	int i;
352 	u32 div_val, mux_val;
353 
354 	if (mix->table) {
355 		for (i = 0; i < mix->table_size; i++) {
356 			item = &mix->table[i];
357 			if (item->valid == 0)
358 				continue;
359 			if (item->parent_index == index)
360 				break;
361 		}
362 		if (i < mix->table_size) {
363 			div_val = _get_div_val(mix, item->divisor);
364 			mux_val = _get_mux_val(mix, item->parent_index);
365 		} else
366 			return -EINVAL;
367 	} else {
368 		mux_val = _get_mux_val(mix, index);
369 		div_val = 0;
370 	}
371 
372 	return _set_rate(mix, mux_val, div_val, 1, div_val ? 1 : 0);
373 }
374 
375 static int mmp_clk_set_rate(struct clk_hw *hw, unsigned long rate,
376 				unsigned long best_parent_rate)
377 {
378 	struct mmp_clk_mix *mix = to_clk_mix(hw);
379 	struct mmp_clk_mix_clk_table *item;
380 	unsigned long parent_rate;
381 	unsigned int best_divisor;
382 	struct clk *mix_clk, *parent;
383 	int i;
384 
385 	best_divisor = best_parent_rate / rate;
386 
387 	mix_clk = hw->clk;
388 	if (mix->table) {
389 		for (i = 0; i < mix->table_size; i++) {
390 			item = &mix->table[i];
391 			if (item->valid == 0)
392 				continue;
393 			parent = clk_get_parent_by_index(mix_clk,
394 							item->parent_index);
395 			parent_rate = __clk_get_rate(parent);
396 			if (parent_rate == best_parent_rate
397 				&& item->divisor == best_divisor)
398 				break;
399 		}
400 		if (i < mix->table_size)
401 			return _set_rate(mix,
402 					_get_mux_val(mix, item->parent_index),
403 					_get_div_val(mix, item->divisor),
404 					1, 1);
405 		else
406 			return -EINVAL;
407 	} else {
408 		for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
409 			parent = clk_get_parent_by_index(mix_clk, i);
410 			parent_rate = __clk_get_rate(parent);
411 			if (parent_rate == best_parent_rate)
412 				break;
413 		}
414 		if (i < __clk_get_num_parents(mix_clk))
415 			return _set_rate(mix, _get_mux_val(mix, i),
416 					_get_div_val(mix, best_divisor), 1, 1);
417 		else
418 			return -EINVAL;
419 	}
420 }
421 
422 static void mmp_clk_mix_init(struct clk_hw *hw)
423 {
424 	struct mmp_clk_mix *mix = to_clk_mix(hw);
425 
426 	if (mix->table)
427 		_filter_clk_table(mix, mix->table, mix->table_size);
428 }
429 
430 const struct clk_ops mmp_clk_mix_ops = {
431 	.determine_rate = mmp_clk_mix_determine_rate,
432 	.set_rate_and_parent = mmp_clk_mix_set_rate_and_parent,
433 	.set_rate = mmp_clk_set_rate,
434 	.set_parent = mmp_clk_set_parent,
435 	.get_parent = mmp_clk_mix_get_parent,
436 	.recalc_rate = mmp_clk_mix_recalc_rate,
437 	.init = mmp_clk_mix_init,
438 };
439 
440 struct clk *mmp_clk_register_mix(struct device *dev,
441 					const char *name,
442 					const char **parent_names,
443 					u8 num_parents,
444 					unsigned long flags,
445 					struct mmp_clk_mix_config *config,
446 					spinlock_t *lock)
447 {
448 	struct mmp_clk_mix *mix;
449 	struct clk *clk;
450 	struct clk_init_data init;
451 	size_t table_bytes;
452 
453 	mix = kzalloc(sizeof(*mix), GFP_KERNEL);
454 	if (!mix) {
455 		pr_err("%s:%s: could not allocate mmp mix clk\n",
456 			__func__, name);
457 		return ERR_PTR(-ENOMEM);
458 	}
459 
460 	init.name = name;
461 	init.flags = flags | CLK_GET_RATE_NOCACHE;
462 	init.parent_names = parent_names;
463 	init.num_parents = num_parents;
464 	init.ops = &mmp_clk_mix_ops;
465 
466 	memcpy(&mix->reg_info, &config->reg_info, sizeof(config->reg_info));
467 	if (config->table) {
468 		table_bytes = sizeof(*config->table) * config->table_size;
469 		mix->table = kzalloc(table_bytes, GFP_KERNEL);
470 		if (!mix->table) {
471 			pr_err("%s:%s: could not allocate mmp mix table\n",
472 				__func__, name);
473 			kfree(mix);
474 			return ERR_PTR(-ENOMEM);
475 		}
476 		memcpy(mix->table, config->table, table_bytes);
477 		mix->table_size = config->table_size;
478 	}
479 
480 	if (config->mux_table) {
481 		table_bytes = sizeof(u32) * num_parents;
482 		mix->mux_table = kzalloc(table_bytes, GFP_KERNEL);
483 		if (!mix->mux_table) {
484 			pr_err("%s:%s: could not allocate mmp mix mux-table\n",
485 				__func__, name);
486 			kfree(mix->table);
487 			kfree(mix);
488 			return ERR_PTR(-ENOMEM);
489 		}
490 		memcpy(mix->mux_table, config->mux_table, table_bytes);
491 	}
492 
493 	mix->div_flags = config->div_flags;
494 	mix->mux_flags = config->mux_flags;
495 	mix->lock = lock;
496 	mix->hw.init = &init;
497 
498 	if (config->reg_info.bit_fc >= 32)
499 		mix->type = MMP_CLK_MIX_TYPE_V1;
500 	else if (config->reg_info.reg_clk_sel)
501 		mix->type = MMP_CLK_MIX_TYPE_V3;
502 	else
503 		mix->type = MMP_CLK_MIX_TYPE_V2;
504 	clk = clk_register(dev, &mix->hw);
505 
506 	if (IS_ERR(clk)) {
507 		kfree(mix->mux_table);
508 		kfree(mix->table);
509 		kfree(mix);
510 	}
511 
512 	return clk;
513 }
514