xref: /linux/arch/sh/kernel/cpu/clock.c (revision ae891a4264c91246c0b4c22be68b9838747ae48d)
1 /*
2  * arch/sh/kernel/cpu/clock.c - SuperH clock framework
3  *
4  *  Copyright (C) 2005 - 2009  Paul Mundt
5  *
6  * This clock framework is derived from the OMAP version by:
7  *
8  *	Copyright (C) 2004 - 2008 Nokia Corporation
9  *	Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10  *
11  *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12  *
13  * This file is subject to the terms and conditions of the GNU General Public
14  * License.  See the file "COPYING" in the main directory of this archive
15  * for more details.
16  */
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/list.h>
22 #include <linux/kobject.h>
23 #include <linux/sysdev.h>
24 #include <linux/seq_file.h>
25 #include <linux/err.h>
26 #include <linux/platform_device.h>
27 #include <linux/proc_fs.h>
28 #include <asm/clock.h>
29 #include <asm/timer.h>
30 
31 static LIST_HEAD(clock_list);
32 static DEFINE_SPINLOCK(clock_lock);
33 static DEFINE_MUTEX(clock_list_sem);
34 
35 /*
36  * Each subtype is expected to define the init routines for these clocks,
37  * as each subtype (or processor family) will have these clocks at the
38  * very least. These are all provided through the CPG, which even some of
39  * the more quirky parts (such as ST40, SH4-202, etc.) still have.
40  *
41  * The processor-specific code is expected to register any additional
42  * clock sources that are of interest.
43  */
44 static struct clk master_clk = {
45 	.name		= "master_clk",
46 	.flags		= CLK_ENABLE_ON_INIT,
47 	.rate		= CONFIG_SH_PCLK_FREQ,
48 };
49 
50 static struct clk module_clk = {
51 	.name		= "module_clk",
52 	.parent		= &master_clk,
53 	.flags		= CLK_ENABLE_ON_INIT,
54 };
55 
56 static struct clk bus_clk = {
57 	.name		= "bus_clk",
58 	.parent		= &master_clk,
59 	.flags		= CLK_ENABLE_ON_INIT,
60 };
61 
62 static struct clk cpu_clk = {
63 	.name		= "cpu_clk",
64 	.parent		= &master_clk,
65 	.flags		= CLK_ENABLE_ON_INIT,
66 };
67 
68 /*
69  * The ordering of these clocks matters, do not change it.
70  */
71 static struct clk *onchip_clocks[] = {
72 	&master_clk,
73 	&module_clk,
74 	&bus_clk,
75 	&cpu_clk,
76 };
77 
78 /* Used for clocks that always have same value as the parent clock */
79 unsigned long followparent_recalc(struct clk *clk)
80 {
81 	return clk->parent->rate;
82 }
83 
84 /* Propagate rate to children */
85 void propagate_rate(struct clk *tclk)
86 {
87 	struct clk *clkp;
88 
89 	list_for_each_entry(clkp, &tclk->children, sibling) {
90 		if (clkp->ops->recalc)
91 			clkp->rate = clkp->ops->recalc(clkp);
92 		propagate_rate(clkp);
93 	}
94 }
95 
96 static void __clk_disable(struct clk *clk)
97 {
98 	if (clk->usecount == 0) {
99 		printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
100 		       clk->name);
101 		WARN_ON(1);
102 		return;
103 	}
104 
105 	if (!(--clk->usecount)) {
106 		if (likely(clk->ops && clk->ops->disable))
107 			clk->ops->disable(clk);
108 		if (likely(clk->parent))
109 			__clk_disable(clk->parent);
110 	}
111 }
112 
113 void clk_disable(struct clk *clk)
114 {
115 	unsigned long flags;
116 
117 	if (!clk)
118 		return;
119 
120 	spin_lock_irqsave(&clock_lock, flags);
121 	__clk_disable(clk);
122 	spin_unlock_irqrestore(&clock_lock, flags);
123 }
124 EXPORT_SYMBOL_GPL(clk_disable);
125 
126 static int __clk_enable(struct clk *clk)
127 {
128 	int ret = 0;
129 
130 	if (clk->usecount++ == 0) {
131 		if (clk->parent) {
132 			ret = __clk_enable(clk->parent);
133 			if (unlikely(ret))
134 				goto err;
135 		}
136 
137 		if (clk->ops && clk->ops->enable) {
138 			ret = clk->ops->enable(clk);
139 			if (ret) {
140 				if (clk->parent)
141 					__clk_disable(clk->parent);
142 				goto err;
143 			}
144 		}
145 	}
146 
147 	return ret;
148 err:
149 	clk->usecount--;
150 	return ret;
151 }
152 
153 int clk_enable(struct clk *clk)
154 {
155 	unsigned long flags;
156 	int ret;
157 
158 	if (!clk)
159 		return -EINVAL;
160 
161 	spin_lock_irqsave(&clock_lock, flags);
162 	ret = __clk_enable(clk);
163 	spin_unlock_irqrestore(&clock_lock, flags);
164 
165 	return ret;
166 }
167 EXPORT_SYMBOL_GPL(clk_enable);
168 
169 static LIST_HEAD(root_clks);
170 
171 /**
172  * recalculate_root_clocks - recalculate and propagate all root clocks
173  *
174  * Recalculates all root clocks (clocks with no parent), which if the
175  * clock's .recalc is set correctly, should also propagate their rates.
176  * Called at init.
177  */
178 void recalculate_root_clocks(void)
179 {
180 	struct clk *clkp;
181 
182 	list_for_each_entry(clkp, &root_clks, sibling) {
183 		if (clkp->ops->recalc)
184 			clkp->rate = clkp->ops->recalc(clkp);
185 		propagate_rate(clkp);
186 	}
187 }
188 
189 int clk_register(struct clk *clk)
190 {
191 	if (clk == NULL || IS_ERR(clk))
192 		return -EINVAL;
193 
194 	/*
195 	 * trap out already registered clocks
196 	 */
197 	if (clk->node.next || clk->node.prev)
198 		return 0;
199 
200 	mutex_lock(&clock_list_sem);
201 
202 	INIT_LIST_HEAD(&clk->children);
203 	clk->usecount = 0;
204 
205 	if (clk->parent)
206 		list_add(&clk->sibling, &clk->parent->children);
207 	else
208 		list_add(&clk->sibling, &root_clks);
209 
210 	list_add(&clk->node, &clock_list);
211 	if (clk->ops->init)
212 		clk->ops->init(clk);
213 	mutex_unlock(&clock_list_sem);
214 
215 	return 0;
216 }
217 EXPORT_SYMBOL_GPL(clk_register);
218 
219 void clk_unregister(struct clk *clk)
220 {
221 	mutex_lock(&clock_list_sem);
222 	list_del(&clk->sibling);
223 	list_del(&clk->node);
224 	mutex_unlock(&clock_list_sem);
225 }
226 EXPORT_SYMBOL_GPL(clk_unregister);
227 
228 static void clk_enable_init_clocks(void)
229 {
230 	struct clk *clkp;
231 
232 	list_for_each_entry(clkp, &clock_list, node)
233 		if (clkp->flags & CLK_ENABLE_ON_INIT)
234 			clk_enable(clkp);
235 }
236 
237 unsigned long clk_get_rate(struct clk *clk)
238 {
239 	return clk->rate;
240 }
241 EXPORT_SYMBOL_GPL(clk_get_rate);
242 
243 int clk_set_rate(struct clk *clk, unsigned long rate)
244 {
245 	return clk_set_rate_ex(clk, rate, 0);
246 }
247 EXPORT_SYMBOL_GPL(clk_set_rate);
248 
249 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
250 {
251 	int ret = -EOPNOTSUPP;
252 
253 	if (likely(clk->ops && clk->ops->set_rate)) {
254 		unsigned long flags;
255 
256 		spin_lock_irqsave(&clock_lock, flags);
257 		ret = clk->ops->set_rate(clk, rate, algo_id);
258 		if (ret == 0) {
259 			if (clk->ops->recalc)
260 				clk->rate = clk->ops->recalc(clk);
261 			propagate_rate(clk);
262 		}
263 		spin_unlock_irqrestore(&clock_lock, flags);
264 	}
265 
266 	return ret;
267 }
268 EXPORT_SYMBOL_GPL(clk_set_rate_ex);
269 
270 void clk_recalc_rate(struct clk *clk)
271 {
272 	unsigned long flags;
273 
274 	if (!clk->ops->recalc)
275 		return;
276 
277 	spin_lock_irqsave(&clock_lock, flags);
278 	clk->rate = clk->ops->recalc(clk);
279 	propagate_rate(clk);
280 	spin_unlock_irqrestore(&clock_lock, flags);
281 }
282 EXPORT_SYMBOL_GPL(clk_recalc_rate);
283 
284 int clk_set_parent(struct clk *clk, struct clk *parent)
285 {
286 	unsigned long flags;
287 	int ret = -EINVAL;
288 
289 	if (!parent || !clk)
290 		return ret;
291 
292 	spin_lock_irqsave(&clock_lock, flags);
293 	if (clk->usecount == 0) {
294 		if (clk->ops->set_parent)
295 			ret = clk->ops->set_parent(clk, parent);
296 		if (ret == 0) {
297 			if (clk->ops->recalc)
298 				clk->rate = clk->ops->recalc(clk);
299 			propagate_rate(clk);
300 		}
301 	} else
302 		ret = -EBUSY;
303 	spin_unlock_irqrestore(&clock_lock, flags);
304 
305 	return ret;
306 }
307 EXPORT_SYMBOL_GPL(clk_set_parent);
308 
309 struct clk *clk_get_parent(struct clk *clk)
310 {
311 	return clk->parent;
312 }
313 EXPORT_SYMBOL_GPL(clk_get_parent);
314 
315 long clk_round_rate(struct clk *clk, unsigned long rate)
316 {
317 	if (likely(clk->ops && clk->ops->round_rate)) {
318 		unsigned long flags, rounded;
319 
320 		spin_lock_irqsave(&clock_lock, flags);
321 		rounded = clk->ops->round_rate(clk, rate);
322 		spin_unlock_irqrestore(&clock_lock, flags);
323 
324 		return rounded;
325 	}
326 
327 	return clk_get_rate(clk);
328 }
329 EXPORT_SYMBOL_GPL(clk_round_rate);
330 
331 /*
332  * Returns a clock. Note that we first try to use device id on the bus
333  * and clock name. If this fails, we try to use clock name only.
334  */
335 struct clk *clk_get(struct device *dev, const char *id)
336 {
337 	struct clk *p, *clk = ERR_PTR(-ENOENT);
338 	int idno;
339 
340 	if (dev == NULL || dev->bus != &platform_bus_type)
341 		idno = -1;
342 	else
343 		idno = to_platform_device(dev)->id;
344 
345 	mutex_lock(&clock_list_sem);
346 	list_for_each_entry(p, &clock_list, node) {
347 		if (p->id == idno &&
348 		    strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
349 			clk = p;
350 			goto found;
351 		}
352 	}
353 
354 	list_for_each_entry(p, &clock_list, node) {
355 		if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
356 			clk = p;
357 			break;
358 		}
359 	}
360 
361 found:
362 	mutex_unlock(&clock_list_sem);
363 
364 	return clk;
365 }
366 EXPORT_SYMBOL_GPL(clk_get);
367 
368 void clk_put(struct clk *clk)
369 {
370 	if (clk && !IS_ERR(clk))
371 		module_put(clk->owner);
372 }
373 EXPORT_SYMBOL_GPL(clk_put);
374 
375 void __init __attribute__ ((weak))
376 arch_init_clk_ops(struct clk_ops **ops, int type)
377 {
378 }
379 
380 int __init __attribute__ ((weak))
381 arch_clk_init(void)
382 {
383 	return 0;
384 }
385 
386 static int show_clocks(char *buf, char **start, off_t off,
387 		       int len, int *eof, void *data)
388 {
389 	struct clk *clk;
390 	char *p = buf;
391 
392 	list_for_each_entry_reverse(clk, &clock_list, node) {
393 		unsigned long rate = clk_get_rate(clk);
394 
395 		p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name,
396 			     rate / 1000000, (rate % 1000000) / 10000,
397 			      (clk->usecount > 0) ?  "enabled" : "disabled");
398 	}
399 
400 	return p - buf;
401 }
402 
403 #ifdef CONFIG_PM
404 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
405 {
406 	static pm_message_t prev_state;
407 	struct clk *clkp;
408 
409 	switch (state.event) {
410 	case PM_EVENT_ON:
411 		/* Resumeing from hibernation */
412 		if (prev_state.event != PM_EVENT_FREEZE)
413 			break;
414 
415 		list_for_each_entry(clkp, &clock_list, node) {
416 			if (likely(clkp->ops)) {
417 				unsigned long rate = clkp->rate;
418 
419 				if (likely(clkp->ops->set_parent))
420 					clkp->ops->set_parent(clkp,
421 						clkp->parent);
422 				if (likely(clkp->ops->set_rate))
423 					clkp->ops->set_rate(clkp,
424 						rate, NO_CHANGE);
425 				else if (likely(clkp->ops->recalc))
426 					clkp->rate = clkp->ops->recalc(clkp);
427 			}
428 		}
429 		break;
430 	case PM_EVENT_FREEZE:
431 		break;
432 	case PM_EVENT_SUSPEND:
433 		break;
434 	}
435 
436 	prev_state = state;
437 	return 0;
438 }
439 
440 static int clks_sysdev_resume(struct sys_device *dev)
441 {
442 	return clks_sysdev_suspend(dev, PMSG_ON);
443 }
444 
445 static struct sysdev_class clks_sysdev_class = {
446 	.name = "clks",
447 };
448 
449 static struct sysdev_driver clks_sysdev_driver = {
450 	.suspend = clks_sysdev_suspend,
451 	.resume = clks_sysdev_resume,
452 };
453 
454 static struct sys_device clks_sysdev_dev = {
455 	.cls = &clks_sysdev_class,
456 };
457 
458 static int __init clk_sysdev_init(void)
459 {
460 	sysdev_class_register(&clks_sysdev_class);
461 	sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
462 	sysdev_register(&clks_sysdev_dev);
463 
464 	return 0;
465 }
466 subsys_initcall(clk_sysdev_init);
467 #endif
468 
469 int __init clk_init(void)
470 {
471 	int i, ret = 0;
472 
473 	BUG_ON(!master_clk.rate);
474 
475 	for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
476 		struct clk *clk = onchip_clocks[i];
477 
478 		arch_init_clk_ops(&clk->ops, i);
479 		ret |= clk_register(clk);
480 	}
481 
482 	ret |= arch_clk_init();
483 
484 	/* Kick the child clocks.. */
485 	recalculate_root_clocks();
486 
487 	/* Enable the necessary init clocks */
488 	clk_enable_init_clocks();
489 
490 	return ret;
491 }
492 
493 static int __init clk_proc_init(void)
494 {
495 	struct proc_dir_entry *p;
496 	p = create_proc_read_entry("clocks", S_IRUSR, NULL,
497 				   show_clocks, NULL);
498 	if (unlikely(!p))
499 		return -EINVAL;
500 
501 	return 0;
502 }
503 subsys_initcall(clk_proc_init);
504