xref: /linux/arch/sh/kernel/cpu/clock.c (revision 4f5ecaa05493dfddf155b40224b951592bfce325)
1 /*
2  * arch/sh/kernel/cpu/clock.c - SuperH clock framework
3  *
4  *  Copyright (C) 2005, 2006, 2007  Paul Mundt
5  *
6  * This clock framework is derived from the OMAP version by:
7  *
8  *	Copyright (C) 2004 - 2005 Nokia Corporation
9  *	Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10  *
11  *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12  *
13  * This file is subject to the terms and conditions of the GNU General Public
14  * License.  See the file "COPYING" in the main directory of this archive
15  * for more details.
16  */
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/list.h>
22 #include <linux/kobject.h>
23 #include <linux/sysdev.h>
24 #include <linux/seq_file.h>
25 #include <linux/err.h>
26 #include <linux/platform_device.h>
27 #include <linux/proc_fs.h>
28 #include <asm/clock.h>
29 #include <asm/timer.h>
30 
31 static LIST_HEAD(clock_list);
32 static DEFINE_SPINLOCK(clock_lock);
33 static DEFINE_MUTEX(clock_list_sem);
34 
35 /*
36  * Each subtype is expected to define the init routines for these clocks,
37  * as each subtype (or processor family) will have these clocks at the
38  * very least. These are all provided through the CPG, which even some of
39  * the more quirky parts (such as ST40, SH4-202, etc.) still have.
40  *
41  * The processor-specific code is expected to register any additional
42  * clock sources that are of interest.
43  */
44 static struct clk master_clk = {
45 	.name		= "master_clk",
46 	.flags		= CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
47 	.rate		= CONFIG_SH_PCLK_FREQ,
48 };
49 
50 static struct clk module_clk = {
51 	.name		= "module_clk",
52 	.parent		= &master_clk,
53 	.flags		= CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
54 };
55 
56 static struct clk bus_clk = {
57 	.name		= "bus_clk",
58 	.parent		= &master_clk,
59 	.flags		= CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
60 };
61 
62 static struct clk cpu_clk = {
63 	.name		= "cpu_clk",
64 	.parent		= &master_clk,
65 	.flags		= CLK_ALWAYS_ENABLED,
66 };
67 
68 /*
69  * The ordering of these clocks matters, do not change it.
70  */
71 static struct clk *onchip_clocks[] = {
72 	&master_clk,
73 	&module_clk,
74 	&bus_clk,
75 	&cpu_clk,
76 };
77 
78 static void propagate_rate(struct clk *clk)
79 {
80 	struct clk *clkp;
81 
82 	list_for_each_entry(clkp, &clock_list, node) {
83 		if (likely(clkp->parent != clk))
84 			continue;
85 		if (likely(clkp->ops && clkp->ops->recalc))
86 			clkp->ops->recalc(clkp);
87 		if (unlikely(clkp->flags & CLK_RATE_PROPAGATES))
88 			propagate_rate(clkp);
89 	}
90 }
91 
92 static void __clk_init(struct clk *clk)
93 {
94 	/*
95 	 * See if this is the first time we're enabling the clock, some
96 	 * clocks that are always enabled still require "special"
97 	 * initialization. This is especially true if the clock mode
98 	 * changes and the clock needs to hunt for the proper set of
99 	 * divisors to use before it can effectively recalc.
100 	 */
101 
102 	if (clk->flags & CLK_NEEDS_INIT) {
103 		if (clk->ops && clk->ops->init)
104 			clk->ops->init(clk);
105 
106 		clk->flags &= ~CLK_NEEDS_INIT;
107 	}
108 }
109 
110 static int __clk_enable(struct clk *clk)
111 {
112 	if (!clk)
113 		return -EINVAL;
114 
115 	clk->usecount++;
116 
117 	/* nothing to do if always enabled */
118 	if (clk->flags & CLK_ALWAYS_ENABLED)
119 		return 0;
120 
121 	if (clk->usecount == 1) {
122 		__clk_init(clk);
123 
124 		__clk_enable(clk->parent);
125 
126 		if (clk->ops && clk->ops->enable)
127 			clk->ops->enable(clk);
128 	}
129 
130 	return 0;
131 }
132 
133 int clk_enable(struct clk *clk)
134 {
135 	unsigned long flags;
136 	int ret;
137 
138 	spin_lock_irqsave(&clock_lock, flags);
139 	ret = __clk_enable(clk);
140 	spin_unlock_irqrestore(&clock_lock, flags);
141 
142 	return ret;
143 }
144 EXPORT_SYMBOL_GPL(clk_enable);
145 
146 static void __clk_disable(struct clk *clk)
147 {
148 	if (!clk)
149 		return;
150 
151 	clk->usecount--;
152 
153 	WARN_ON(clk->usecount < 0);
154 
155 	if (clk->flags & CLK_ALWAYS_ENABLED)
156 		return;
157 
158 	if (clk->usecount == 0) {
159 		if (likely(clk->ops && clk->ops->disable))
160 			clk->ops->disable(clk);
161 
162 		__clk_disable(clk->parent);
163 	}
164 }
165 
166 void clk_disable(struct clk *clk)
167 {
168 	unsigned long flags;
169 
170 	spin_lock_irqsave(&clock_lock, flags);
171 	__clk_disable(clk);
172 	spin_unlock_irqrestore(&clock_lock, flags);
173 }
174 EXPORT_SYMBOL_GPL(clk_disable);
175 
176 int clk_register(struct clk *clk)
177 {
178 	mutex_lock(&clock_list_sem);
179 
180 	list_add(&clk->node, &clock_list);
181 	clk->usecount = 0;
182 	clk->flags |= CLK_NEEDS_INIT;
183 
184 	mutex_unlock(&clock_list_sem);
185 
186 	if (clk->flags & CLK_ALWAYS_ENABLED) {
187 		__clk_init(clk);
188 		pr_debug( "Clock '%s' is ALWAYS_ENABLED\n", clk->name);
189 		if (clk->ops && clk->ops->enable)
190 			clk->ops->enable(clk);
191 		pr_debug( "Enabled.");
192 	}
193 
194 	return 0;
195 }
196 EXPORT_SYMBOL_GPL(clk_register);
197 
198 void clk_unregister(struct clk *clk)
199 {
200 	mutex_lock(&clock_list_sem);
201 	list_del(&clk->node);
202 	mutex_unlock(&clock_list_sem);
203 }
204 EXPORT_SYMBOL_GPL(clk_unregister);
205 
206 unsigned long clk_get_rate(struct clk *clk)
207 {
208 	return clk->rate;
209 }
210 EXPORT_SYMBOL_GPL(clk_get_rate);
211 
212 int clk_set_rate(struct clk *clk, unsigned long rate)
213 {
214 	return clk_set_rate_ex(clk, rate, 0);
215 }
216 EXPORT_SYMBOL_GPL(clk_set_rate);
217 
218 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
219 {
220 	int ret = -EOPNOTSUPP;
221 
222 	if (likely(clk->ops && clk->ops->set_rate)) {
223 		unsigned long flags;
224 
225 		spin_lock_irqsave(&clock_lock, flags);
226 		ret = clk->ops->set_rate(clk, rate, algo_id);
227 		spin_unlock_irqrestore(&clock_lock, flags);
228 	}
229 
230 	if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
231 		propagate_rate(clk);
232 
233 	return ret;
234 }
235 EXPORT_SYMBOL_GPL(clk_set_rate_ex);
236 
237 void clk_recalc_rate(struct clk *clk)
238 {
239 	if (likely(clk->ops && clk->ops->recalc)) {
240 		unsigned long flags;
241 
242 		spin_lock_irqsave(&clock_lock, flags);
243 		clk->ops->recalc(clk);
244 		spin_unlock_irqrestore(&clock_lock, flags);
245 	}
246 
247 	if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
248 		propagate_rate(clk);
249 }
250 EXPORT_SYMBOL_GPL(clk_recalc_rate);
251 
252 int clk_set_parent(struct clk *clk, struct clk *parent)
253 {
254 	int ret = -EINVAL;
255 	struct clk *old;
256 
257 	if (!parent || !clk)
258 		return ret;
259 
260 	old = clk->parent;
261 	if (likely(clk->ops && clk->ops->set_parent)) {
262 		unsigned long flags;
263 		spin_lock_irqsave(&clock_lock, flags);
264 		ret = clk->ops->set_parent(clk, parent);
265 		spin_unlock_irqrestore(&clock_lock, flags);
266 		clk->parent = (ret ? old : parent);
267 	}
268 
269 	if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
270 		propagate_rate(clk);
271 	return ret;
272 }
273 EXPORT_SYMBOL_GPL(clk_set_parent);
274 
275 struct clk *clk_get_parent(struct clk *clk)
276 {
277 	return clk->parent;
278 }
279 EXPORT_SYMBOL_GPL(clk_get_parent);
280 
281 long clk_round_rate(struct clk *clk, unsigned long rate)
282 {
283 	if (likely(clk->ops && clk->ops->round_rate)) {
284 		unsigned long flags, rounded;
285 
286 		spin_lock_irqsave(&clock_lock, flags);
287 		rounded = clk->ops->round_rate(clk, rate);
288 		spin_unlock_irqrestore(&clock_lock, flags);
289 
290 		return rounded;
291 	}
292 
293 	return clk_get_rate(clk);
294 }
295 EXPORT_SYMBOL_GPL(clk_round_rate);
296 
297 /*
298  * Returns a clock. Note that we first try to use device id on the bus
299  * and clock name. If this fails, we try to use clock name only.
300  */
301 struct clk *clk_get(struct device *dev, const char *id)
302 {
303 	struct clk *p, *clk = ERR_PTR(-ENOENT);
304 	int idno;
305 
306 	if (dev == NULL || dev->bus != &platform_bus_type)
307 		idno = -1;
308 	else
309 		idno = to_platform_device(dev)->id;
310 
311 	mutex_lock(&clock_list_sem);
312 	list_for_each_entry(p, &clock_list, node) {
313 		if (p->id == idno &&
314 		    strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
315 			clk = p;
316 			goto found;
317 		}
318 	}
319 
320 	list_for_each_entry(p, &clock_list, node) {
321 		if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
322 			clk = p;
323 			break;
324 		}
325 	}
326 
327 found:
328 	mutex_unlock(&clock_list_sem);
329 
330 	return clk;
331 }
332 EXPORT_SYMBOL_GPL(clk_get);
333 
334 void clk_put(struct clk *clk)
335 {
336 	if (clk && !IS_ERR(clk))
337 		module_put(clk->owner);
338 }
339 EXPORT_SYMBOL_GPL(clk_put);
340 
341 void __init __attribute__ ((weak))
342 arch_init_clk_ops(struct clk_ops **ops, int type)
343 {
344 }
345 
346 int __init __attribute__ ((weak))
347 arch_clk_init(void)
348 {
349 	return 0;
350 }
351 
352 static int show_clocks(char *buf, char **start, off_t off,
353 		       int len, int *eof, void *data)
354 {
355 	struct clk *clk;
356 	char *p = buf;
357 
358 	list_for_each_entry_reverse(clk, &clock_list, node) {
359 		unsigned long rate = clk_get_rate(clk);
360 
361 		p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name,
362 			     rate / 1000000, (rate % 1000000) / 10000,
363 			     ((clk->flags & CLK_ALWAYS_ENABLED) ||
364 			      clk->usecount > 0) ?
365 			     "enabled" : "disabled");
366 	}
367 
368 	return p - buf;
369 }
370 
371 #ifdef CONFIG_PM
372 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
373 {
374 	static pm_message_t prev_state;
375 	struct clk *clkp;
376 
377 	switch (state.event) {
378 	case PM_EVENT_ON:
379 		/* Resumeing from hibernation */
380 		if (prev_state.event == PM_EVENT_FREEZE) {
381 			list_for_each_entry(clkp, &clock_list, node)
382 				if (likely(clkp->ops)) {
383 					unsigned long rate = clkp->rate;
384 
385 					if (likely(clkp->ops->set_parent))
386 						clkp->ops->set_parent(clkp,
387 							clkp->parent);
388 					if (likely(clkp->ops->set_rate))
389 						clkp->ops->set_rate(clkp,
390 							rate, NO_CHANGE);
391 					else if (likely(clkp->ops->recalc))
392 						clkp->ops->recalc(clkp);
393 					}
394 		}
395 		break;
396 	case PM_EVENT_FREEZE:
397 		break;
398 	case PM_EVENT_SUSPEND:
399 		break;
400 	}
401 
402 	prev_state = state;
403 	return 0;
404 }
405 
406 static int clks_sysdev_resume(struct sys_device *dev)
407 {
408 	return clks_sysdev_suspend(dev, PMSG_ON);
409 }
410 
411 static struct sysdev_class clks_sysdev_class = {
412 	.name = "clks",
413 };
414 
415 static struct sysdev_driver clks_sysdev_driver = {
416 	.suspend = clks_sysdev_suspend,
417 	.resume = clks_sysdev_resume,
418 };
419 
420 static struct sys_device clks_sysdev_dev = {
421 	.cls = &clks_sysdev_class,
422 };
423 
424 static int __init clk_sysdev_init(void)
425 {
426 	sysdev_class_register(&clks_sysdev_class);
427 	sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
428 	sysdev_register(&clks_sysdev_dev);
429 
430 	return 0;
431 }
432 subsys_initcall(clk_sysdev_init);
433 #endif
434 
435 int __init clk_init(void)
436 {
437 	int i, ret = 0;
438 
439 	BUG_ON(!master_clk.rate);
440 
441 	for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
442 		struct clk *clk = onchip_clocks[i];
443 
444 		arch_init_clk_ops(&clk->ops, i);
445 		ret |= clk_register(clk);
446 	}
447 
448 	ret |= arch_clk_init();
449 
450 	/* Kick the child clocks.. */
451 	propagate_rate(&master_clk);
452 	propagate_rate(&bus_clk);
453 
454 	return ret;
455 }
456 
457 static int __init clk_proc_init(void)
458 {
459 	struct proc_dir_entry *p;
460 	p = create_proc_read_entry("clocks", S_IRUSR, NULL,
461 				   show_clocks, NULL);
462 	if (unlikely(!p))
463 		return -EINVAL;
464 
465 	return 0;
466 }
467 subsys_initcall(clk_proc_init);
468