xref: /linux/drivers/sh/clk/cpg.c (revision 764f4e4e33d18cde4dcaf8a0d860b749c6d6d08b)
1 /*
2  * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3  *
4  *  Copyright (C) 2010  Magnus Damm
5  *  Copyright (C) 2010 - 2012  Paul Mundt
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11 #include <linux/clk.h>
12 #include <linux/compiler.h>
13 #include <linux/slab.h>
14 #include <linux/io.h>
15 #include <linux/sh_clk.h>
16 
17 #define CPG_CKSTP_BIT	BIT(8)
18 
19 static unsigned int sh_clk_read(struct clk *clk)
20 {
21 	if (clk->flags & CLK_ENABLE_REG_8BIT)
22 		return ioread8(clk->mapped_reg);
23 	else if (clk->flags & CLK_ENABLE_REG_16BIT)
24 		return ioread16(clk->mapped_reg);
25 
26 	return ioread32(clk->mapped_reg);
27 }
28 
29 static void sh_clk_write(int value, struct clk *clk)
30 {
31 	if (clk->flags & CLK_ENABLE_REG_8BIT)
32 		iowrite8(value, clk->mapped_reg);
33 	else if (clk->flags & CLK_ENABLE_REG_16BIT)
34 		iowrite16(value, clk->mapped_reg);
35 	else
36 		iowrite32(value, clk->mapped_reg);
37 }
38 
39 static int sh_clk_mstp_enable(struct clk *clk)
40 {
41 	sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
42 	return 0;
43 }
44 
45 static void sh_clk_mstp_disable(struct clk *clk)
46 {
47 	sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
48 }
49 
50 static struct sh_clk_ops sh_clk_mstp_clk_ops = {
51 	.enable		= sh_clk_mstp_enable,
52 	.disable	= sh_clk_mstp_disable,
53 	.recalc		= followparent_recalc,
54 };
55 
56 int __init sh_clk_mstp_register(struct clk *clks, int nr)
57 {
58 	struct clk *clkp;
59 	int ret = 0;
60 	int k;
61 
62 	for (k = 0; !ret && (k < nr); k++) {
63 		clkp = clks + k;
64 		clkp->ops = &sh_clk_mstp_clk_ops;
65 		ret |= clk_register(clkp);
66 	}
67 
68 	return ret;
69 }
70 
71 /*
72  * Div/mult table lookup helpers
73  */
74 static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
75 {
76 	return clk->priv;
77 }
78 
79 static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
80 {
81 	return clk_to_div_table(clk)->div_mult_table;
82 }
83 
84 /*
85  * Common div ops
86  */
87 static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
88 {
89 	return clk_rate_table_round(clk, clk->freq_table, rate);
90 }
91 
92 static unsigned long sh_clk_div_recalc(struct clk *clk)
93 {
94 	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
95 	unsigned int idx;
96 
97 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
98 			     table, clk->arch_flags ? &clk->arch_flags : NULL);
99 
100 	idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
101 
102 	return clk->freq_table[idx].frequency;
103 }
104 
105 static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
106 {
107 	struct clk_div_table *dt = clk_to_div_table(clk);
108 	unsigned long value;
109 	int idx;
110 
111 	idx = clk_rate_table_find(clk, clk->freq_table, rate);
112 	if (idx < 0)
113 		return idx;
114 
115 	value = sh_clk_read(clk);
116 	value &= ~(clk->div_mask << clk->enable_bit);
117 	value |= (idx << clk->enable_bit);
118 	sh_clk_write(value, clk);
119 
120 	/* XXX: Should use a post-change notifier */
121 	if (dt->kick)
122 		dt->kick(clk);
123 
124 	return 0;
125 }
126 
127 static int sh_clk_div_enable(struct clk *clk)
128 {
129 	sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
130 	return 0;
131 }
132 
133 static void sh_clk_div_disable(struct clk *clk)
134 {
135 	unsigned int val;
136 
137 	val = sh_clk_read(clk);
138 	val |= CPG_CKSTP_BIT;
139 
140 	/*
141 	 * div6 clocks require the divisor field to be non-zero or the
142 	 * above CKSTP toggle silently fails. Ensure that the divisor
143 	 * array is reset to its initial state on disable.
144 	 */
145 	if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
146 		val |= clk->div_mask;
147 
148 	sh_clk_write(val, clk);
149 }
150 
151 /*
152  * div6 support
153  */
154 static int sh_clk_div6_divisors[64] = {
155 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
156 	17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
157 	33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
158 	49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
159 };
160 
161 static struct clk_div_mult_table div6_div_mult_table = {
162 	.divisors = sh_clk_div6_divisors,
163 	.nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
164 };
165 
166 static struct clk_div_table sh_clk_div6_table = {
167 	.div_mult_table	= &div6_div_mult_table,
168 };
169 
170 static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
171 {
172 	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
173 	u32 value;
174 	int ret, i;
175 
176 	if (!clk->parent_table || !clk->parent_num)
177 		return -EINVAL;
178 
179 	/* Search the parent */
180 	for (i = 0; i < clk->parent_num; i++)
181 		if (clk->parent_table[i] == parent)
182 			break;
183 
184 	if (i == clk->parent_num)
185 		return -ENODEV;
186 
187 	ret = clk_reparent(clk, parent);
188 	if (ret < 0)
189 		return ret;
190 
191 	value = sh_clk_read(clk) &
192 		~(((1 << clk->src_width) - 1) << clk->src_shift);
193 
194 	sh_clk_write(value | (i << clk->src_shift), clk);
195 
196 	/* Rebuild the frequency table */
197 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
198 			     table, NULL);
199 
200 	return 0;
201 }
202 
203 static struct sh_clk_ops sh_clk_div6_clk_ops = {
204 	.recalc		= sh_clk_div_recalc,
205 	.round_rate	= sh_clk_div_round_rate,
206 	.set_rate	= sh_clk_div_set_rate,
207 	.enable		= sh_clk_div_enable,
208 	.disable	= sh_clk_div_disable,
209 };
210 
211 static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
212 	.recalc		= sh_clk_div_recalc,
213 	.round_rate	= sh_clk_div_round_rate,
214 	.set_rate	= sh_clk_div_set_rate,
215 	.enable		= sh_clk_div_enable,
216 	.disable	= sh_clk_div_disable,
217 	.set_parent	= sh_clk_div6_set_parent,
218 };
219 
220 static int __init sh_clk_init_parent(struct clk *clk)
221 {
222 	u32 val;
223 
224 	if (clk->parent)
225 		return 0;
226 
227 	if (!clk->parent_table || !clk->parent_num)
228 		return 0;
229 
230 	if (!clk->src_width) {
231 		pr_err("sh_clk_init_parent: cannot select parent clock\n");
232 		return -EINVAL;
233 	}
234 
235 	val  = (sh_clk_read(clk) >> clk->src_shift);
236 	val &= (1 << clk->src_width) - 1;
237 
238 	if (val >= clk->parent_num) {
239 		pr_err("sh_clk_init_parent: parent table size failed\n");
240 		return -EINVAL;
241 	}
242 
243 	clk_reparent(clk, clk->parent_table[val]);
244 	if (!clk->parent) {
245 		pr_err("sh_clk_init_parent: unable to set parent");
246 		return -EINVAL;
247 	}
248 
249 	return 0;
250 }
251 
252 static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
253 					   struct sh_clk_ops *ops)
254 {
255 	struct clk *clkp;
256 	void *freq_table;
257 	struct clk_div_table *table = &sh_clk_div6_table;
258 	int nr_divs = table->div_mult_table->nr_divisors;
259 	int freq_table_size = sizeof(struct cpufreq_frequency_table);
260 	int ret = 0;
261 	int k;
262 
263 	freq_table_size *= (nr_divs + 1);
264 	freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
265 	if (!freq_table) {
266 		pr_err("sh_clk_div6_register: unable to alloc memory\n");
267 		return -ENOMEM;
268 	}
269 
270 	for (k = 0; !ret && (k < nr); k++) {
271 		clkp = clks + k;
272 
273 		clkp->ops = ops;
274 		clkp->priv = table;
275 		clkp->freq_table = freq_table + (k * freq_table_size);
276 		clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
277 		ret = clk_register(clkp);
278 		if (ret < 0)
279 			break;
280 
281 		ret = sh_clk_init_parent(clkp);
282 	}
283 
284 	return ret;
285 }
286 
287 int __init sh_clk_div6_register(struct clk *clks, int nr)
288 {
289 	return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
290 }
291 
292 int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
293 {
294 	return sh_clk_div6_register_ops(clks, nr,
295 					&sh_clk_div6_reparent_clk_ops);
296 }
297 
298 /*
299  * div4 support
300  */
301 static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
302 {
303 	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
304 	u32 value;
305 	int ret;
306 
307 	/* we really need a better way to determine parent index, but for
308 	 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
309 	 * no CLK_ENABLE_ON_INIT means external clock...
310 	 */
311 
312 	if (parent->flags & CLK_ENABLE_ON_INIT)
313 		value = sh_clk_read(clk) & ~(1 << 7);
314 	else
315 		value = sh_clk_read(clk) | (1 << 7);
316 
317 	ret = clk_reparent(clk, parent);
318 	if (ret < 0)
319 		return ret;
320 
321 	sh_clk_write(value, clk);
322 
323 	/* Rebiuld the frequency table */
324 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
325 			     table, &clk->arch_flags);
326 
327 	return 0;
328 }
329 
330 static struct sh_clk_ops sh_clk_div4_clk_ops = {
331 	.recalc		= sh_clk_div_recalc,
332 	.set_rate	= sh_clk_div_set_rate,
333 	.round_rate	= sh_clk_div_round_rate,
334 };
335 
336 static struct sh_clk_ops sh_clk_div4_enable_clk_ops = {
337 	.recalc		= sh_clk_div_recalc,
338 	.set_rate	= sh_clk_div_set_rate,
339 	.round_rate	= sh_clk_div_round_rate,
340 	.enable		= sh_clk_div_enable,
341 	.disable	= sh_clk_div_disable,
342 };
343 
344 static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
345 	.recalc		= sh_clk_div_recalc,
346 	.set_rate	= sh_clk_div_set_rate,
347 	.round_rate	= sh_clk_div_round_rate,
348 	.enable		= sh_clk_div_enable,
349 	.disable	= sh_clk_div_disable,
350 	.set_parent	= sh_clk_div4_set_parent,
351 };
352 
353 static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
354 			struct clk_div4_table *table, struct sh_clk_ops *ops)
355 {
356 	struct clk *clkp;
357 	void *freq_table;
358 	int nr_divs = table->div_mult_table->nr_divisors;
359 	int freq_table_size = sizeof(struct cpufreq_frequency_table);
360 	int ret = 0;
361 	int k;
362 
363 	freq_table_size *= (nr_divs + 1);
364 	freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
365 	if (!freq_table) {
366 		pr_err("sh_clk_div4_register: unable to alloc memory\n");
367 		return -ENOMEM;
368 	}
369 
370 	for (k = 0; !ret && (k < nr); k++) {
371 		clkp = clks + k;
372 
373 		clkp->ops = ops;
374 		clkp->priv = table;
375 
376 		clkp->freq_table = freq_table + (k * freq_table_size);
377 		clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
378 
379 		ret = clk_register(clkp);
380 	}
381 
382 	return ret;
383 }
384 
385 int __init sh_clk_div4_register(struct clk *clks, int nr,
386 				struct clk_div4_table *table)
387 {
388 	return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
389 }
390 
391 int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
392 				struct clk_div4_table *table)
393 {
394 	return sh_clk_div4_register_ops(clks, nr, table,
395 					&sh_clk_div4_enable_clk_ops);
396 }
397 
398 int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
399 				struct clk_div4_table *table)
400 {
401 	return sh_clk_div4_register_ops(clks, nr, table,
402 					&sh_clk_div4_reparent_clk_ops);
403 }
404