xref: /linux/drivers/sh/clk/cpg.c (revision 1111cc1e8080b5ff46f5b945acb2f99d6176b2d1)
1 /*
2  * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3  *
4  *  Copyright (C) 2010  Magnus Damm
5  *  Copyright (C) 2010 - 2012  Paul Mundt
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11 #include <linux/clk.h>
12 #include <linux/compiler.h>
13 #include <linux/slab.h>
14 #include <linux/io.h>
15 #include <linux/sh_clk.h>
16 
17 static unsigned int sh_clk_read(struct clk *clk)
18 {
19 	if (clk->flags & CLK_ENABLE_REG_8BIT)
20 		return ioread8(clk->mapped_reg);
21 	else if (clk->flags & CLK_ENABLE_REG_16BIT)
22 		return ioread16(clk->mapped_reg);
23 
24 	return ioread32(clk->mapped_reg);
25 }
26 
27 static void sh_clk_write(int value, struct clk *clk)
28 {
29 	if (clk->flags & CLK_ENABLE_REG_8BIT)
30 		iowrite8(value, clk->mapped_reg);
31 	else if (clk->flags & CLK_ENABLE_REG_16BIT)
32 		iowrite16(value, clk->mapped_reg);
33 	else
34 		iowrite32(value, clk->mapped_reg);
35 }
36 
37 static int sh_clk_mstp_enable(struct clk *clk)
38 {
39 	sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
40 	return 0;
41 }
42 
43 static void sh_clk_mstp_disable(struct clk *clk)
44 {
45 	sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
46 }
47 
48 static struct sh_clk_ops sh_clk_mstp_clk_ops = {
49 	.enable		= sh_clk_mstp_enable,
50 	.disable	= sh_clk_mstp_disable,
51 	.recalc		= followparent_recalc,
52 };
53 
54 int __init sh_clk_mstp_register(struct clk *clks, int nr)
55 {
56 	struct clk *clkp;
57 	int ret = 0;
58 	int k;
59 
60 	for (k = 0; !ret && (k < nr); k++) {
61 		clkp = clks + k;
62 		clkp->ops = &sh_clk_mstp_clk_ops;
63 		ret |= clk_register(clkp);
64 	}
65 
66 	return ret;
67 }
68 
69 static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
70 {
71 	return clk_rate_table_round(clk, clk->freq_table, rate);
72 }
73 
74 /*
75  * Div/mult table lookup helpers
76  */
77 static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
78 {
79 	return clk->priv;
80 }
81 
82 static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
83 {
84 	return clk_to_div_table(clk)->div_mult_table;
85 }
86 
87 /*
88  * div6 support
89  */
90 static int sh_clk_div6_divisors[64] = {
91 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
92 	17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
93 	33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
94 	49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
95 };
96 
97 static struct clk_div_mult_table div6_div_mult_table = {
98 	.divisors = sh_clk_div6_divisors,
99 	.nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
100 };
101 
102 static struct clk_div_table sh_clk_div6_table = {
103 	.div_mult_table	= &div6_div_mult_table,
104 };
105 
106 static unsigned long sh_clk_div6_recalc(struct clk *clk)
107 {
108 	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
109 	unsigned int idx;
110 
111 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
112 			     table, NULL);
113 
114 	idx = sh_clk_read(clk) & clk->div_mask;
115 
116 	return clk->freq_table[idx].frequency;
117 }
118 
119 static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
120 {
121 	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
122 	u32 value;
123 	int ret, i;
124 
125 	if (!clk->parent_table || !clk->parent_num)
126 		return -EINVAL;
127 
128 	/* Search the parent */
129 	for (i = 0; i < clk->parent_num; i++)
130 		if (clk->parent_table[i] == parent)
131 			break;
132 
133 	if (i == clk->parent_num)
134 		return -ENODEV;
135 
136 	ret = clk_reparent(clk, parent);
137 	if (ret < 0)
138 		return ret;
139 
140 	value = sh_clk_read(clk) &
141 		~(((1 << clk->src_width) - 1) << clk->src_shift);
142 
143 	sh_clk_write(value | (i << clk->src_shift), clk);
144 
145 	/* Rebuild the frequency table */
146 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
147 			     table, NULL);
148 
149 	return 0;
150 }
151 
152 static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
153 {
154 	unsigned long value;
155 	int idx;
156 
157 	idx = clk_rate_table_find(clk, clk->freq_table, rate);
158 	if (idx < 0)
159 		return idx;
160 
161 	value = sh_clk_read(clk);
162 	value &= ~clk->div_mask;
163 	value |= idx;
164 	sh_clk_write(value, clk);
165 	return 0;
166 }
167 
168 static int sh_clk_div6_enable(struct clk *clk)
169 {
170 	unsigned long value;
171 	int ret;
172 
173 	ret = sh_clk_div6_set_rate(clk, clk->rate);
174 	if (ret == 0) {
175 		value = sh_clk_read(clk);
176 		value &= ~0x100; /* clear stop bit to enable clock */
177 		sh_clk_write(value, clk);
178 	}
179 	return ret;
180 }
181 
182 static void sh_clk_div6_disable(struct clk *clk)
183 {
184 	unsigned long value;
185 
186 	value = sh_clk_read(clk);
187 	value |= 0x100; /* stop clock */
188 	value |= clk->div_mask; /* VDIV bits must be non-zero, overwrite divider */
189 	sh_clk_write(value, clk);
190 }
191 
192 static struct sh_clk_ops sh_clk_div6_clk_ops = {
193 	.recalc		= sh_clk_div6_recalc,
194 	.round_rate	= sh_clk_div_round_rate,
195 	.set_rate	= sh_clk_div6_set_rate,
196 	.enable		= sh_clk_div6_enable,
197 	.disable	= sh_clk_div6_disable,
198 };
199 
200 static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
201 	.recalc		= sh_clk_div6_recalc,
202 	.round_rate	= sh_clk_div_round_rate,
203 	.set_rate	= sh_clk_div6_set_rate,
204 	.enable		= sh_clk_div6_enable,
205 	.disable	= sh_clk_div6_disable,
206 	.set_parent	= sh_clk_div6_set_parent,
207 };
208 
209 static int __init sh_clk_init_parent(struct clk *clk)
210 {
211 	u32 val;
212 
213 	if (clk->parent)
214 		return 0;
215 
216 	if (!clk->parent_table || !clk->parent_num)
217 		return 0;
218 
219 	if (!clk->src_width) {
220 		pr_err("sh_clk_init_parent: cannot select parent clock\n");
221 		return -EINVAL;
222 	}
223 
224 	val  = (sh_clk_read(clk) >> clk->src_shift);
225 	val &= (1 << clk->src_width) - 1;
226 
227 	if (val >= clk->parent_num) {
228 		pr_err("sh_clk_init_parent: parent table size failed\n");
229 		return -EINVAL;
230 	}
231 
232 	clk_reparent(clk, clk->parent_table[val]);
233 	if (!clk->parent) {
234 		pr_err("sh_clk_init_parent: unable to set parent");
235 		return -EINVAL;
236 	}
237 
238 	return 0;
239 }
240 
241 static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
242 					   struct sh_clk_ops *ops)
243 {
244 	struct clk *clkp;
245 	void *freq_table;
246 	struct clk_div_table *table = &sh_clk_div6_table;
247 	int nr_divs = table->div_mult_table->nr_divisors;
248 	int freq_table_size = sizeof(struct cpufreq_frequency_table);
249 	int ret = 0;
250 	int k;
251 
252 	freq_table_size *= (nr_divs + 1);
253 	freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
254 	if (!freq_table) {
255 		pr_err("sh_clk_div6_register: unable to alloc memory\n");
256 		return -ENOMEM;
257 	}
258 
259 	for (k = 0; !ret && (k < nr); k++) {
260 		clkp = clks + k;
261 
262 		clkp->ops = ops;
263 		clkp->priv = table;
264 		clkp->freq_table = freq_table + (k * freq_table_size);
265 		clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
266 		ret = clk_register(clkp);
267 		if (ret < 0)
268 			break;
269 
270 		ret = sh_clk_init_parent(clkp);
271 	}
272 
273 	return ret;
274 }
275 
276 int __init sh_clk_div6_register(struct clk *clks, int nr)
277 {
278 	return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
279 }
280 
281 int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
282 {
283 	return sh_clk_div6_register_ops(clks, nr,
284 					&sh_clk_div6_reparent_clk_ops);
285 }
286 
287 /*
288  * div4 support
289  */
290 static unsigned long sh_clk_div4_recalc(struct clk *clk)
291 {
292 	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
293 	unsigned int idx;
294 
295 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
296 			     table, &clk->arch_flags);
297 
298 	idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
299 
300 	return clk->freq_table[idx].frequency;
301 }
302 
303 static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
304 {
305 	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
306 	u32 value;
307 	int ret;
308 
309 	/* we really need a better way to determine parent index, but for
310 	 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
311 	 * no CLK_ENABLE_ON_INIT means external clock...
312 	 */
313 
314 	if (parent->flags & CLK_ENABLE_ON_INIT)
315 		value = sh_clk_read(clk) & ~(1 << 7);
316 	else
317 		value = sh_clk_read(clk) | (1 << 7);
318 
319 	ret = clk_reparent(clk, parent);
320 	if (ret < 0)
321 		return ret;
322 
323 	sh_clk_write(value, clk);
324 
325 	/* Rebiuld the frequency table */
326 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
327 			     table, &clk->arch_flags);
328 
329 	return 0;
330 }
331 
332 static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
333 {
334 	struct clk_div_table *dt = clk_to_div_table(clk);
335 	unsigned long value;
336 	int idx = clk_rate_table_find(clk, clk->freq_table, rate);
337 	if (idx < 0)
338 		return idx;
339 
340 	value = sh_clk_read(clk);
341 	value &= ~(clk->div_mask << clk->enable_bit);
342 	value |= (idx << clk->enable_bit);
343 	sh_clk_write(value, clk);
344 
345 	/* XXX: Should use a post-change notifier */
346 	if (dt->kick)
347 		dt->kick(clk);
348 
349 	return 0;
350 }
351 
352 static int sh_clk_div4_enable(struct clk *clk)
353 {
354 	sh_clk_write(sh_clk_read(clk) & ~(1 << 8), clk);
355 	return 0;
356 }
357 
358 static void sh_clk_div4_disable(struct clk *clk)
359 {
360 	sh_clk_write(sh_clk_read(clk) | (1 << 8), clk);
361 }
362 
363 static struct sh_clk_ops sh_clk_div4_clk_ops = {
364 	.recalc		= sh_clk_div4_recalc,
365 	.set_rate	= sh_clk_div4_set_rate,
366 	.round_rate	= sh_clk_div_round_rate,
367 };
368 
369 static struct sh_clk_ops sh_clk_div4_enable_clk_ops = {
370 	.recalc		= sh_clk_div4_recalc,
371 	.set_rate	= sh_clk_div4_set_rate,
372 	.round_rate	= sh_clk_div_round_rate,
373 	.enable		= sh_clk_div4_enable,
374 	.disable	= sh_clk_div4_disable,
375 };
376 
377 static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
378 	.recalc		= sh_clk_div4_recalc,
379 	.set_rate	= sh_clk_div4_set_rate,
380 	.round_rate	= sh_clk_div_round_rate,
381 	.enable		= sh_clk_div4_enable,
382 	.disable	= sh_clk_div4_disable,
383 	.set_parent	= sh_clk_div4_set_parent,
384 };
385 
386 static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
387 			struct clk_div4_table *table, struct sh_clk_ops *ops)
388 {
389 	struct clk *clkp;
390 	void *freq_table;
391 	int nr_divs = table->div_mult_table->nr_divisors;
392 	int freq_table_size = sizeof(struct cpufreq_frequency_table);
393 	int ret = 0;
394 	int k;
395 
396 	freq_table_size *= (nr_divs + 1);
397 	freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
398 	if (!freq_table) {
399 		pr_err("sh_clk_div4_register: unable to alloc memory\n");
400 		return -ENOMEM;
401 	}
402 
403 	for (k = 0; !ret && (k < nr); k++) {
404 		clkp = clks + k;
405 
406 		clkp->ops = ops;
407 		clkp->priv = table;
408 
409 		clkp->freq_table = freq_table + (k * freq_table_size);
410 		clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
411 
412 		ret = clk_register(clkp);
413 	}
414 
415 	return ret;
416 }
417 
418 int __init sh_clk_div4_register(struct clk *clks, int nr,
419 				struct clk_div4_table *table)
420 {
421 	return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
422 }
423 
424 int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
425 				struct clk_div4_table *table)
426 {
427 	return sh_clk_div4_register_ops(clks, nr, table,
428 					&sh_clk_div4_enable_clk_ops);
429 }
430 
431 int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
432 				struct clk_div4_table *table)
433 {
434 	return sh_clk_div4_register_ops(clks, nr, table,
435 					&sh_clk_div4_reparent_clk_ops);
436 }
437