xref: /linux/drivers/sh/clk/cpg.c (revision 3a90a72aca0a98125f0c7350ffb7cc63665f8047)
1 /*
2  * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3  *
4  *  Copyright (C) 2010  Magnus Damm
5  *  Copyright (C) 2010 - 2012  Paul Mundt
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11 #include <linux/clk.h>
12 #include <linux/compiler.h>
13 #include <linux/slab.h>
14 #include <linux/io.h>
15 #include <linux/sh_clk.h>
16 
17 #define CPG_CKSTP_BIT	BIT(8)
18 
sh_clk_read(struct clk * clk)19 static unsigned int sh_clk_read(struct clk *clk)
20 {
21 	if (clk->flags & CLK_ENABLE_REG_8BIT)
22 		return ioread8(clk->mapped_reg);
23 	else if (clk->flags & CLK_ENABLE_REG_16BIT)
24 		return ioread16(clk->mapped_reg);
25 
26 	return ioread32(clk->mapped_reg);
27 }
28 
sh_clk_read_status(struct clk * clk)29 static unsigned int sh_clk_read_status(struct clk *clk)
30 {
31 	void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
32 		(phys_addr_t)clk->enable_reg + clk->mapped_reg;
33 
34 	if (clk->flags & CLK_ENABLE_REG_8BIT)
35 		return ioread8(mapped_status);
36 	else if (clk->flags & CLK_ENABLE_REG_16BIT)
37 		return ioread16(mapped_status);
38 
39 	return ioread32(mapped_status);
40 }
41 
sh_clk_write(int value,struct clk * clk)42 static void sh_clk_write(int value, struct clk *clk)
43 {
44 	if (clk->flags & CLK_ENABLE_REG_8BIT)
45 		iowrite8(value, clk->mapped_reg);
46 	else if (clk->flags & CLK_ENABLE_REG_16BIT)
47 		iowrite16(value, clk->mapped_reg);
48 	else
49 		iowrite32(value, clk->mapped_reg);
50 }
51 
sh_clk_mstp_enable(struct clk * clk)52 static int sh_clk_mstp_enable(struct clk *clk)
53 {
54 	sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
55 	if (clk->status_reg) {
56 		int i;
57 
58 		for (i = 1000;
59 		     (sh_clk_read_status(clk) & (1 << clk->enable_bit)) && i;
60 		     i--)
61 			cpu_relax();
62 		if (!i) {
63 			pr_err("cpg: failed to enable %p[%d]\n",
64 			       clk->enable_reg, clk->enable_bit);
65 			return -ETIMEDOUT;
66 		}
67 	}
68 	return 0;
69 }
70 
sh_clk_mstp_disable(struct clk * clk)71 static void sh_clk_mstp_disable(struct clk *clk)
72 {
73 	sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
74 }
75 
76 static struct sh_clk_ops sh_clk_mstp_clk_ops = {
77 	.enable		= sh_clk_mstp_enable,
78 	.disable	= sh_clk_mstp_disable,
79 	.recalc		= followparent_recalc,
80 };
81 
sh_clk_mstp_register(struct clk * clks,int nr)82 int __init sh_clk_mstp_register(struct clk *clks, int nr)
83 {
84 	struct clk *clkp;
85 	int ret = 0;
86 	int k;
87 
88 	for (k = 0; !ret && (k < nr); k++) {
89 		clkp = clks + k;
90 		clkp->ops = &sh_clk_mstp_clk_ops;
91 		ret |= clk_register(clkp);
92 	}
93 
94 	return ret;
95 }
96 
97 /*
98  * Div/mult table lookup helpers
99  */
clk_to_div_table(struct clk * clk)100 static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
101 {
102 	return clk->priv;
103 }
104 
clk_to_div_mult_table(struct clk * clk)105 static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
106 {
107 	return clk_to_div_table(clk)->div_mult_table;
108 }
109 
110 /*
111  * Common div ops
112  */
sh_clk_div_round_rate(struct clk * clk,unsigned long rate)113 static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
114 {
115 	return clk_rate_table_round(clk, clk->freq_table, rate);
116 }
117 
sh_clk_div_recalc(struct clk * clk)118 static unsigned long sh_clk_div_recalc(struct clk *clk)
119 {
120 	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
121 	unsigned int idx;
122 
123 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
124 			     table, clk->arch_flags ? &clk->arch_flags : NULL);
125 
126 	idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
127 
128 	return clk->freq_table[idx].frequency;
129 }
130 
sh_clk_div_set_rate(struct clk * clk,unsigned long rate)131 static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
132 {
133 	struct clk_div_table *dt = clk_to_div_table(clk);
134 	unsigned long value;
135 	int idx;
136 
137 	idx = clk_rate_table_find(clk, clk->freq_table, rate);
138 	if (idx < 0)
139 		return idx;
140 
141 	value = sh_clk_read(clk);
142 	value &= ~(clk->div_mask << clk->enable_bit);
143 	value |= (idx << clk->enable_bit);
144 	sh_clk_write(value, clk);
145 
146 	/* XXX: Should use a post-change notifier */
147 	if (dt->kick)
148 		dt->kick(clk);
149 
150 	return 0;
151 }
152 
sh_clk_div_enable(struct clk * clk)153 static int sh_clk_div_enable(struct clk *clk)
154 {
155 	if (clk->div_mask == SH_CLK_DIV6_MSK) {
156 		int ret = sh_clk_div_set_rate(clk, clk->rate);
157 		if (ret < 0)
158 			return ret;
159 	}
160 
161 	sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
162 	return 0;
163 }
164 
sh_clk_div_disable(struct clk * clk)165 static void sh_clk_div_disable(struct clk *clk)
166 {
167 	unsigned int val;
168 
169 	val = sh_clk_read(clk);
170 	val |= CPG_CKSTP_BIT;
171 
172 	/*
173 	 * div6 clocks require the divisor field to be non-zero or the
174 	 * above CKSTP toggle silently fails. Ensure that the divisor
175 	 * array is reset to its initial state on disable.
176 	 */
177 	if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
178 		val |= clk->div_mask;
179 
180 	sh_clk_write(val, clk);
181 }
182 
183 static struct sh_clk_ops sh_clk_div_clk_ops = {
184 	.recalc		= sh_clk_div_recalc,
185 	.set_rate	= sh_clk_div_set_rate,
186 	.round_rate	= sh_clk_div_round_rate,
187 };
188 
189 static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
190 	.recalc		= sh_clk_div_recalc,
191 	.set_rate	= sh_clk_div_set_rate,
192 	.round_rate	= sh_clk_div_round_rate,
193 	.enable		= sh_clk_div_enable,
194 	.disable	= sh_clk_div_disable,
195 };
196 
sh_clk_init_parent(struct clk * clk)197 static int __init sh_clk_init_parent(struct clk *clk)
198 {
199 	u32 val;
200 
201 	if (clk->parent)
202 		return 0;
203 
204 	if (!clk->parent_table || !clk->parent_num)
205 		return 0;
206 
207 	if (!clk->src_width) {
208 		pr_err("sh_clk_init_parent: cannot select parent clock\n");
209 		return -EINVAL;
210 	}
211 
212 	val  = (sh_clk_read(clk) >> clk->src_shift);
213 	val &= (1 << clk->src_width) - 1;
214 
215 	if (val >= clk->parent_num) {
216 		pr_err("sh_clk_init_parent: parent table size failed\n");
217 		return -EINVAL;
218 	}
219 
220 	clk_reparent(clk, clk->parent_table[val]);
221 	if (!clk->parent) {
222 		pr_err("sh_clk_init_parent: unable to set parent");
223 		return -EINVAL;
224 	}
225 
226 	return 0;
227 }
228 
sh_clk_div_register_ops(struct clk * clks,int nr,struct clk_div_table * table,struct sh_clk_ops * ops)229 static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
230 			struct clk_div_table *table, struct sh_clk_ops *ops)
231 {
232 	struct clk *clkp;
233 	void *freq_table;
234 	int nr_divs = table->div_mult_table->nr_divisors;
235 	int freq_table_size = sizeof(struct cpufreq_frequency_table);
236 	int ret = 0;
237 	int k;
238 
239 	freq_table_size *= (nr_divs + 1);
240 	freq_table = kcalloc(nr, freq_table_size, GFP_KERNEL);
241 	if (!freq_table) {
242 		pr_err("%s: unable to alloc memory\n", __func__);
243 		return -ENOMEM;
244 	}
245 
246 	for (k = 0; !ret && (k < nr); k++) {
247 		clkp = clks + k;
248 
249 		clkp->ops = ops;
250 		clkp->priv = table;
251 
252 		clkp->freq_table = freq_table + (k * freq_table_size);
253 		clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
254 
255 		ret = clk_register(clkp);
256 		if (ret == 0)
257 			ret = sh_clk_init_parent(clkp);
258 	}
259 
260 	return ret;
261 }
262 
263 /*
264  * div6 support
265  */
266 static int sh_clk_div6_divisors[64] = {
267 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
268 	17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
269 	33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
270 	49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
271 };
272 
273 static struct clk_div_mult_table div6_div_mult_table = {
274 	.divisors = sh_clk_div6_divisors,
275 	.nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
276 };
277 
278 static struct clk_div_table sh_clk_div6_table = {
279 	.div_mult_table	= &div6_div_mult_table,
280 };
281 
sh_clk_div6_set_parent(struct clk * clk,struct clk * parent)282 static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
283 {
284 	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
285 	u32 value;
286 	int ret, i;
287 
288 	if (!clk->parent_table || !clk->parent_num)
289 		return -EINVAL;
290 
291 	/* Search the parent */
292 	for (i = 0; i < clk->parent_num; i++)
293 		if (clk->parent_table[i] == parent)
294 			break;
295 
296 	if (i == clk->parent_num)
297 		return -ENODEV;
298 
299 	ret = clk_reparent(clk, parent);
300 	if (ret < 0)
301 		return ret;
302 
303 	value = sh_clk_read(clk) &
304 		~(((1 << clk->src_width) - 1) << clk->src_shift);
305 
306 	sh_clk_write(value | (i << clk->src_shift), clk);
307 
308 	/* Rebuild the frequency table */
309 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
310 			     table, NULL);
311 
312 	return 0;
313 }
314 
315 static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
316 	.recalc		= sh_clk_div_recalc,
317 	.round_rate	= sh_clk_div_round_rate,
318 	.set_rate	= sh_clk_div_set_rate,
319 	.enable		= sh_clk_div_enable,
320 	.disable	= sh_clk_div_disable,
321 	.set_parent	= sh_clk_div6_set_parent,
322 };
323 
sh_clk_div6_register(struct clk * clks,int nr)324 int __init sh_clk_div6_register(struct clk *clks, int nr)
325 {
326 	return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
327 				       &sh_clk_div_enable_clk_ops);
328 }
329 
sh_clk_div6_reparent_register(struct clk * clks,int nr)330 int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
331 {
332 	return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
333 				       &sh_clk_div6_reparent_clk_ops);
334 }
335 
336 /*
337  * div4 support
338  */
sh_clk_div4_set_parent(struct clk * clk,struct clk * parent)339 static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
340 {
341 	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
342 	u32 value;
343 	int ret;
344 
345 	/* we really need a better way to determine parent index, but for
346 	 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
347 	 * no CLK_ENABLE_ON_INIT means external clock...
348 	 */
349 
350 	if (parent->flags & CLK_ENABLE_ON_INIT)
351 		value = sh_clk_read(clk) & ~(1 << 7);
352 	else
353 		value = sh_clk_read(clk) | (1 << 7);
354 
355 	ret = clk_reparent(clk, parent);
356 	if (ret < 0)
357 		return ret;
358 
359 	sh_clk_write(value, clk);
360 
361 	/* Rebiuld the frequency table */
362 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
363 			     table, &clk->arch_flags);
364 
365 	return 0;
366 }
367 
368 static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
369 	.recalc		= sh_clk_div_recalc,
370 	.set_rate	= sh_clk_div_set_rate,
371 	.round_rate	= sh_clk_div_round_rate,
372 	.enable		= sh_clk_div_enable,
373 	.disable	= sh_clk_div_disable,
374 	.set_parent	= sh_clk_div4_set_parent,
375 };
376 
sh_clk_div4_register(struct clk * clks,int nr,struct clk_div4_table * table)377 int __init sh_clk_div4_register(struct clk *clks, int nr,
378 				struct clk_div4_table *table)
379 {
380 	return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
381 }
382 
sh_clk_div4_enable_register(struct clk * clks,int nr,struct clk_div4_table * table)383 int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
384 				struct clk_div4_table *table)
385 {
386 	return sh_clk_div_register_ops(clks, nr, table,
387 				       &sh_clk_div_enable_clk_ops);
388 }
389 
sh_clk_div4_reparent_register(struct clk * clks,int nr,struct clk_div4_table * table)390 int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
391 				struct clk_div4_table *table)
392 {
393 	return sh_clk_div_register_ops(clks, nr, table,
394 				       &sh_clk_div4_reparent_clk_ops);
395 }
396 
397 /* FSI-DIV */
fsidiv_recalc(struct clk * clk)398 static unsigned long fsidiv_recalc(struct clk *clk)
399 {
400 	u32 value;
401 
402 	value = __raw_readl(clk->mapping->base);
403 
404 	value >>= 16;
405 	if (value < 2)
406 		return clk->parent->rate;
407 
408 	return clk->parent->rate / value;
409 }
410 
fsidiv_round_rate(struct clk * clk,unsigned long rate)411 static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
412 {
413 	return clk_rate_div_range_round(clk, 1, 0xffff, rate);
414 }
415 
fsidiv_disable(struct clk * clk)416 static void fsidiv_disable(struct clk *clk)
417 {
418 	__raw_writel(0, clk->mapping->base);
419 }
420 
fsidiv_enable(struct clk * clk)421 static int fsidiv_enable(struct clk *clk)
422 {
423 	u32 value;
424 
425 	value  = __raw_readl(clk->mapping->base) >> 16;
426 	if (value < 2)
427 		return 0;
428 
429 	__raw_writel((value << 16) | 0x3, clk->mapping->base);
430 
431 	return 0;
432 }
433 
fsidiv_set_rate(struct clk * clk,unsigned long rate)434 static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
435 {
436 	int idx;
437 
438 	idx = (clk->parent->rate / rate) & 0xffff;
439 	if (idx < 2)
440 		__raw_writel(0, clk->mapping->base);
441 	else
442 		__raw_writel(idx << 16, clk->mapping->base);
443 
444 	return 0;
445 }
446 
447 static struct sh_clk_ops fsidiv_clk_ops = {
448 	.recalc		= fsidiv_recalc,
449 	.round_rate	= fsidiv_round_rate,
450 	.set_rate	= fsidiv_set_rate,
451 	.enable		= fsidiv_enable,
452 	.disable	= fsidiv_disable,
453 };
454 
sh_clk_fsidiv_register(struct clk * clks,int nr)455 int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
456 {
457 	struct clk_mapping *map;
458 	int i;
459 
460 	for (i = 0; i < nr; i++) {
461 
462 		map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
463 		if (!map) {
464 			pr_err("%s: unable to alloc memory\n", __func__);
465 			return -ENOMEM;
466 		}
467 
468 		/* clks[i].enable_reg came from SH_CLK_FSIDIV() */
469 		map->phys		= (phys_addr_t)clks[i].enable_reg;
470 		map->len		= 8;
471 
472 		clks[i].enable_reg	= 0; /* remove .enable_reg */
473 		clks[i].ops		= &fsidiv_clk_ops;
474 		clks[i].mapping		= map;
475 
476 		clk_register(&clks[i]);
477 	}
478 
479 	return 0;
480 }
481