xref: /linux/drivers/sh/clk/cpg.c (revision 0fa22168e00106797f28b2655aaefd0d16a6e67b)
1 /*
2  * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3  *
4  *  Copyright (C) 2010  Magnus Damm
5  *  Copyright (C) 2010 - 2012  Paul Mundt
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11 #include <linux/clk.h>
12 #include <linux/compiler.h>
13 #include <linux/slab.h>
14 #include <linux/io.h>
15 #include <linux/sh_clk.h>
16 
17 static unsigned int sh_clk_read(struct clk *clk)
18 {
19 	if (clk->flags & CLK_ENABLE_REG_8BIT)
20 		return ioread8(clk->mapped_reg);
21 	else if (clk->flags & CLK_ENABLE_REG_16BIT)
22 		return ioread16(clk->mapped_reg);
23 
24 	return ioread32(clk->mapped_reg);
25 }
26 
27 static void sh_clk_write(int value, struct clk *clk)
28 {
29 	if (clk->flags & CLK_ENABLE_REG_8BIT)
30 		iowrite8(value, clk->mapped_reg);
31 	else if (clk->flags & CLK_ENABLE_REG_16BIT)
32 		iowrite16(value, clk->mapped_reg);
33 	else
34 		iowrite32(value, clk->mapped_reg);
35 }
36 
37 static int sh_clk_mstp_enable(struct clk *clk)
38 {
39 	sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
40 	return 0;
41 }
42 
43 static void sh_clk_mstp_disable(struct clk *clk)
44 {
45 	sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
46 }
47 
48 static struct sh_clk_ops sh_clk_mstp_clk_ops = {
49 	.enable		= sh_clk_mstp_enable,
50 	.disable	= sh_clk_mstp_disable,
51 	.recalc		= followparent_recalc,
52 };
53 
54 int __init sh_clk_mstp_register(struct clk *clks, int nr)
55 {
56 	struct clk *clkp;
57 	int ret = 0;
58 	int k;
59 
60 	for (k = 0; !ret && (k < nr); k++) {
61 		clkp = clks + k;
62 		clkp->ops = &sh_clk_mstp_clk_ops;
63 		ret |= clk_register(clkp);
64 	}
65 
66 	return ret;
67 }
68 
69 /*
70  * Div/mult table lookup helpers
71  */
72 static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
73 {
74 	return clk->priv;
75 }
76 
77 static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
78 {
79 	return clk_to_div_table(clk)->div_mult_table;
80 }
81 
82 /*
83  * Common div ops
84  */
85 static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
86 {
87 	return clk_rate_table_round(clk, clk->freq_table, rate);
88 }
89 
90 static unsigned long sh_clk_div_recalc(struct clk *clk)
91 {
92 	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
93 	unsigned int idx;
94 
95 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
96 			     table, clk->arch_flags ? &clk->arch_flags : NULL);
97 
98 	idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
99 
100 	return clk->freq_table[idx].frequency;
101 }
102 
103 static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
104 {
105 	struct clk_div_table *dt = clk_to_div_table(clk);
106 	unsigned long value;
107 	int idx;
108 
109 	idx = clk_rate_table_find(clk, clk->freq_table, rate);
110 	if (idx < 0)
111 		return idx;
112 
113 	value = sh_clk_read(clk);
114 	value &= ~(clk->div_mask << clk->enable_bit);
115 	value |= (idx << clk->enable_bit);
116 	sh_clk_write(value, clk);
117 
118 	/* XXX: Should use a post-change notifier */
119 	if (dt->kick)
120 		dt->kick(clk);
121 
122 	return 0;
123 }
124 
125 /*
126  * div6 support
127  */
128 static int sh_clk_div6_divisors[64] = {
129 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
130 	17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
131 	33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
132 	49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
133 };
134 
135 static struct clk_div_mult_table div6_div_mult_table = {
136 	.divisors = sh_clk_div6_divisors,
137 	.nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
138 };
139 
140 static struct clk_div_table sh_clk_div6_table = {
141 	.div_mult_table	= &div6_div_mult_table,
142 };
143 
144 static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
145 {
146 	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
147 	u32 value;
148 	int ret, i;
149 
150 	if (!clk->parent_table || !clk->parent_num)
151 		return -EINVAL;
152 
153 	/* Search the parent */
154 	for (i = 0; i < clk->parent_num; i++)
155 		if (clk->parent_table[i] == parent)
156 			break;
157 
158 	if (i == clk->parent_num)
159 		return -ENODEV;
160 
161 	ret = clk_reparent(clk, parent);
162 	if (ret < 0)
163 		return ret;
164 
165 	value = sh_clk_read(clk) &
166 		~(((1 << clk->src_width) - 1) << clk->src_shift);
167 
168 	sh_clk_write(value | (i << clk->src_shift), clk);
169 
170 	/* Rebuild the frequency table */
171 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
172 			     table, NULL);
173 
174 	return 0;
175 }
176 
177 static int sh_clk_div6_enable(struct clk *clk)
178 {
179 	unsigned long value;
180 	int ret;
181 
182 	ret = sh_clk_div_set_rate(clk, clk->rate);
183 	if (ret == 0) {
184 		value = sh_clk_read(clk);
185 		value &= ~0x100; /* clear stop bit to enable clock */
186 		sh_clk_write(value, clk);
187 	}
188 	return ret;
189 }
190 
191 static void sh_clk_div6_disable(struct clk *clk)
192 {
193 	unsigned long value;
194 
195 	value = sh_clk_read(clk);
196 	value |= 0x100; /* stop clock */
197 	value |= clk->div_mask; /* VDIV bits must be non-zero, overwrite divider */
198 	sh_clk_write(value, clk);
199 }
200 
201 static struct sh_clk_ops sh_clk_div6_clk_ops = {
202 	.recalc		= sh_clk_div_recalc,
203 	.round_rate	= sh_clk_div_round_rate,
204 	.set_rate	= sh_clk_div_set_rate,
205 	.enable		= sh_clk_div6_enable,
206 	.disable	= sh_clk_div6_disable,
207 };
208 
209 static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
210 	.recalc		= sh_clk_div_recalc,
211 	.round_rate	= sh_clk_div_round_rate,
212 	.set_rate	= sh_clk_div_set_rate,
213 	.enable		= sh_clk_div6_enable,
214 	.disable	= sh_clk_div6_disable,
215 	.set_parent	= sh_clk_div6_set_parent,
216 };
217 
218 static int __init sh_clk_init_parent(struct clk *clk)
219 {
220 	u32 val;
221 
222 	if (clk->parent)
223 		return 0;
224 
225 	if (!clk->parent_table || !clk->parent_num)
226 		return 0;
227 
228 	if (!clk->src_width) {
229 		pr_err("sh_clk_init_parent: cannot select parent clock\n");
230 		return -EINVAL;
231 	}
232 
233 	val  = (sh_clk_read(clk) >> clk->src_shift);
234 	val &= (1 << clk->src_width) - 1;
235 
236 	if (val >= clk->parent_num) {
237 		pr_err("sh_clk_init_parent: parent table size failed\n");
238 		return -EINVAL;
239 	}
240 
241 	clk_reparent(clk, clk->parent_table[val]);
242 	if (!clk->parent) {
243 		pr_err("sh_clk_init_parent: unable to set parent");
244 		return -EINVAL;
245 	}
246 
247 	return 0;
248 }
249 
250 static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
251 					   struct sh_clk_ops *ops)
252 {
253 	struct clk *clkp;
254 	void *freq_table;
255 	struct clk_div_table *table = &sh_clk_div6_table;
256 	int nr_divs = table->div_mult_table->nr_divisors;
257 	int freq_table_size = sizeof(struct cpufreq_frequency_table);
258 	int ret = 0;
259 	int k;
260 
261 	freq_table_size *= (nr_divs + 1);
262 	freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
263 	if (!freq_table) {
264 		pr_err("sh_clk_div6_register: unable to alloc memory\n");
265 		return -ENOMEM;
266 	}
267 
268 	for (k = 0; !ret && (k < nr); k++) {
269 		clkp = clks + k;
270 
271 		clkp->ops = ops;
272 		clkp->priv = table;
273 		clkp->freq_table = freq_table + (k * freq_table_size);
274 		clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
275 		ret = clk_register(clkp);
276 		if (ret < 0)
277 			break;
278 
279 		ret = sh_clk_init_parent(clkp);
280 	}
281 
282 	return ret;
283 }
284 
285 int __init sh_clk_div6_register(struct clk *clks, int nr)
286 {
287 	return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
288 }
289 
290 int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
291 {
292 	return sh_clk_div6_register_ops(clks, nr,
293 					&sh_clk_div6_reparent_clk_ops);
294 }
295 
296 /*
297  * div4 support
298  */
299 static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
300 {
301 	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
302 	u32 value;
303 	int ret;
304 
305 	/* we really need a better way to determine parent index, but for
306 	 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
307 	 * no CLK_ENABLE_ON_INIT means external clock...
308 	 */
309 
310 	if (parent->flags & CLK_ENABLE_ON_INIT)
311 		value = sh_clk_read(clk) & ~(1 << 7);
312 	else
313 		value = sh_clk_read(clk) | (1 << 7);
314 
315 	ret = clk_reparent(clk, parent);
316 	if (ret < 0)
317 		return ret;
318 
319 	sh_clk_write(value, clk);
320 
321 	/* Rebiuld the frequency table */
322 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
323 			     table, &clk->arch_flags);
324 
325 	return 0;
326 }
327 
328 static int sh_clk_div4_enable(struct clk *clk)
329 {
330 	sh_clk_write(sh_clk_read(clk) & ~(1 << 8), clk);
331 	return 0;
332 }
333 
334 static void sh_clk_div4_disable(struct clk *clk)
335 {
336 	sh_clk_write(sh_clk_read(clk) | (1 << 8), clk);
337 }
338 
339 static struct sh_clk_ops sh_clk_div4_clk_ops = {
340 	.recalc		= sh_clk_div_recalc,
341 	.set_rate	= sh_clk_div_set_rate,
342 	.round_rate	= sh_clk_div_round_rate,
343 };
344 
345 static struct sh_clk_ops sh_clk_div4_enable_clk_ops = {
346 	.recalc		= sh_clk_div_recalc,
347 	.set_rate	= sh_clk_div_set_rate,
348 	.round_rate	= sh_clk_div_round_rate,
349 	.enable		= sh_clk_div4_enable,
350 	.disable	= sh_clk_div4_disable,
351 };
352 
353 static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
354 	.recalc		= sh_clk_div_recalc,
355 	.set_rate	= sh_clk_div_set_rate,
356 	.round_rate	= sh_clk_div_round_rate,
357 	.enable		= sh_clk_div4_enable,
358 	.disable	= sh_clk_div4_disable,
359 	.set_parent	= sh_clk_div4_set_parent,
360 };
361 
362 static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
363 			struct clk_div4_table *table, struct sh_clk_ops *ops)
364 {
365 	struct clk *clkp;
366 	void *freq_table;
367 	int nr_divs = table->div_mult_table->nr_divisors;
368 	int freq_table_size = sizeof(struct cpufreq_frequency_table);
369 	int ret = 0;
370 	int k;
371 
372 	freq_table_size *= (nr_divs + 1);
373 	freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
374 	if (!freq_table) {
375 		pr_err("sh_clk_div4_register: unable to alloc memory\n");
376 		return -ENOMEM;
377 	}
378 
379 	for (k = 0; !ret && (k < nr); k++) {
380 		clkp = clks + k;
381 
382 		clkp->ops = ops;
383 		clkp->priv = table;
384 
385 		clkp->freq_table = freq_table + (k * freq_table_size);
386 		clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
387 
388 		ret = clk_register(clkp);
389 	}
390 
391 	return ret;
392 }
393 
394 int __init sh_clk_div4_register(struct clk *clks, int nr,
395 				struct clk_div4_table *table)
396 {
397 	return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
398 }
399 
400 int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
401 				struct clk_div4_table *table)
402 {
403 	return sh_clk_div4_register_ops(clks, nr, table,
404 					&sh_clk_div4_enable_clk_ops);
405 }
406 
407 int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
408 				struct clk_div4_table *table)
409 {
410 	return sh_clk_div4_register_ops(clks, nr, table,
411 					&sh_clk_div4_reparent_clk_ops);
412 }
413