xref: /linux/arch/sh/kernel/cpu/clock.c (revision aa87aa343f2cd236b5eccd643abd4df918ed5c4f)
1 /*
2  * arch/sh/kernel/cpu/clock.c - SuperH clock framework
3  *
4  *  Copyright (C) 2005 - 2009  Paul Mundt
5  *
6  * This clock framework is derived from the OMAP version by:
7  *
8  *	Copyright (C) 2004 - 2008 Nokia Corporation
9  *	Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10  *
11  *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12  *
13  * This file is subject to the terms and conditions of the GNU General Public
14  * License.  See the file "COPYING" in the main directory of this archive
15  * for more details.
16  */
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/list.h>
22 #include <linux/kobject.h>
23 #include <linux/sysdev.h>
24 #include <linux/seq_file.h>
25 #include <linux/err.h>
26 #include <linux/platform_device.h>
27 #include <linux/proc_fs.h>
28 #include <asm/clock.h>
29 #include <asm/timer.h>
30 
31 static LIST_HEAD(clock_list);
32 static DEFINE_SPINLOCK(clock_lock);
33 static DEFINE_MUTEX(clock_list_sem);
34 
35 /*
36  * Each subtype is expected to define the init routines for these clocks,
37  * as each subtype (or processor family) will have these clocks at the
38  * very least. These are all provided through the CPG, which even some of
39  * the more quirky parts (such as ST40, SH4-202, etc.) still have.
40  *
41  * The processor-specific code is expected to register any additional
42  * clock sources that are of interest.
43  */
44 static struct clk master_clk = {
45 	.name		= "master_clk",
46 	.flags		= CLK_ENABLE_ON_INIT,
47 	.rate		= CONFIG_SH_PCLK_FREQ,
48 };
49 
50 static struct clk module_clk = {
51 	.name		= "module_clk",
52 	.parent		= &master_clk,
53 	.flags		= CLK_ENABLE_ON_INIT,
54 };
55 
56 static struct clk bus_clk = {
57 	.name		= "bus_clk",
58 	.parent		= &master_clk,
59 	.flags		= CLK_ENABLE_ON_INIT,
60 };
61 
62 static struct clk cpu_clk = {
63 	.name		= "cpu_clk",
64 	.parent		= &master_clk,
65 	.flags		= CLK_ENABLE_ON_INIT,
66 };
67 
68 /*
69  * The ordering of these clocks matters, do not change it.
70  */
71 static struct clk *onchip_clocks[] = {
72 	&master_clk,
73 	&module_clk,
74 	&bus_clk,
75 	&cpu_clk,
76 };
77 
78 /* Used for clocks that always have same value as the parent clock */
79 unsigned long followparent_recalc(struct clk *clk)
80 {
81 	return clk->parent->rate;
82 }
83 
84 int clk_reparent(struct clk *child, struct clk *parent)
85 {
86 	list_del_init(&child->sibling);
87 	if (parent)
88 		list_add(&child->sibling, &parent->children);
89 	child->parent = parent;
90 
91 	/* now do the debugfs renaming to reattach the child
92 	   to the proper parent */
93 
94 	return 0;
95 }
96 
97 /* Propagate rate to children */
98 void propagate_rate(struct clk *tclk)
99 {
100 	struct clk *clkp;
101 
102 	list_for_each_entry(clkp, &tclk->children, sibling) {
103 		if (clkp->ops->recalc)
104 			clkp->rate = clkp->ops->recalc(clkp);
105 		propagate_rate(clkp);
106 	}
107 }
108 
109 static void __clk_disable(struct clk *clk)
110 {
111 	if (clk->usecount == 0) {
112 		printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
113 		       clk->name);
114 		WARN_ON(1);
115 		return;
116 	}
117 
118 	if (!(--clk->usecount)) {
119 		if (likely(clk->ops && clk->ops->disable))
120 			clk->ops->disable(clk);
121 		if (likely(clk->parent))
122 			__clk_disable(clk->parent);
123 	}
124 }
125 
126 void clk_disable(struct clk *clk)
127 {
128 	unsigned long flags;
129 
130 	if (!clk)
131 		return;
132 
133 	spin_lock_irqsave(&clock_lock, flags);
134 	__clk_disable(clk);
135 	spin_unlock_irqrestore(&clock_lock, flags);
136 }
137 EXPORT_SYMBOL_GPL(clk_disable);
138 
139 static int __clk_enable(struct clk *clk)
140 {
141 	int ret = 0;
142 
143 	if (clk->usecount++ == 0) {
144 		if (clk->parent) {
145 			ret = __clk_enable(clk->parent);
146 			if (unlikely(ret))
147 				goto err;
148 		}
149 
150 		if (clk->ops && clk->ops->enable) {
151 			ret = clk->ops->enable(clk);
152 			if (ret) {
153 				if (clk->parent)
154 					__clk_disable(clk->parent);
155 				goto err;
156 			}
157 		}
158 	}
159 
160 	return ret;
161 err:
162 	clk->usecount--;
163 	return ret;
164 }
165 
166 int clk_enable(struct clk *clk)
167 {
168 	unsigned long flags;
169 	int ret;
170 
171 	if (!clk)
172 		return -EINVAL;
173 
174 	spin_lock_irqsave(&clock_lock, flags);
175 	ret = __clk_enable(clk);
176 	spin_unlock_irqrestore(&clock_lock, flags);
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(clk_enable);
181 
182 static LIST_HEAD(root_clks);
183 
184 /**
185  * recalculate_root_clocks - recalculate and propagate all root clocks
186  *
187  * Recalculates all root clocks (clocks with no parent), which if the
188  * clock's .recalc is set correctly, should also propagate their rates.
189  * Called at init.
190  */
191 void recalculate_root_clocks(void)
192 {
193 	struct clk *clkp;
194 
195 	list_for_each_entry(clkp, &root_clks, sibling) {
196 		if (clkp->ops->recalc)
197 			clkp->rate = clkp->ops->recalc(clkp);
198 		propagate_rate(clkp);
199 	}
200 }
201 
202 int clk_register(struct clk *clk)
203 {
204 	if (clk == NULL || IS_ERR(clk))
205 		return -EINVAL;
206 
207 	/*
208 	 * trap out already registered clocks
209 	 */
210 	if (clk->node.next || clk->node.prev)
211 		return 0;
212 
213 	mutex_lock(&clock_list_sem);
214 
215 	INIT_LIST_HEAD(&clk->children);
216 	clk->usecount = 0;
217 
218 	if (clk->parent)
219 		list_add(&clk->sibling, &clk->parent->children);
220 	else
221 		list_add(&clk->sibling, &root_clks);
222 
223 	list_add(&clk->node, &clock_list);
224 	if (clk->ops->init)
225 		clk->ops->init(clk);
226 	mutex_unlock(&clock_list_sem);
227 
228 	return 0;
229 }
230 EXPORT_SYMBOL_GPL(clk_register);
231 
232 void clk_unregister(struct clk *clk)
233 {
234 	mutex_lock(&clock_list_sem);
235 	list_del(&clk->sibling);
236 	list_del(&clk->node);
237 	mutex_unlock(&clock_list_sem);
238 }
239 EXPORT_SYMBOL_GPL(clk_unregister);
240 
241 static void clk_enable_init_clocks(void)
242 {
243 	struct clk *clkp;
244 
245 	list_for_each_entry(clkp, &clock_list, node)
246 		if (clkp->flags & CLK_ENABLE_ON_INIT)
247 			clk_enable(clkp);
248 }
249 
250 unsigned long clk_get_rate(struct clk *clk)
251 {
252 	return clk->rate;
253 }
254 EXPORT_SYMBOL_GPL(clk_get_rate);
255 
256 int clk_set_rate(struct clk *clk, unsigned long rate)
257 {
258 	return clk_set_rate_ex(clk, rate, 0);
259 }
260 EXPORT_SYMBOL_GPL(clk_set_rate);
261 
262 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
263 {
264 	int ret = -EOPNOTSUPP;
265 
266 	if (likely(clk->ops && clk->ops->set_rate)) {
267 		unsigned long flags;
268 
269 		spin_lock_irqsave(&clock_lock, flags);
270 		ret = clk->ops->set_rate(clk, rate, algo_id);
271 		if (ret == 0) {
272 			if (clk->ops->recalc)
273 				clk->rate = clk->ops->recalc(clk);
274 			propagate_rate(clk);
275 		}
276 		spin_unlock_irqrestore(&clock_lock, flags);
277 	}
278 
279 	return ret;
280 }
281 EXPORT_SYMBOL_GPL(clk_set_rate_ex);
282 
283 void clk_recalc_rate(struct clk *clk)
284 {
285 	unsigned long flags;
286 
287 	if (!clk->ops->recalc)
288 		return;
289 
290 	spin_lock_irqsave(&clock_lock, flags);
291 	clk->rate = clk->ops->recalc(clk);
292 	propagate_rate(clk);
293 	spin_unlock_irqrestore(&clock_lock, flags);
294 }
295 EXPORT_SYMBOL_GPL(clk_recalc_rate);
296 
297 int clk_set_parent(struct clk *clk, struct clk *parent)
298 {
299 	unsigned long flags;
300 	int ret = -EINVAL;
301 
302 	if (!parent || !clk)
303 		return ret;
304 	if (clk->parent == parent)
305 		return 0;
306 
307 	spin_lock_irqsave(&clock_lock, flags);
308 	if (clk->usecount == 0) {
309 		if (clk->ops->set_parent)
310 			ret = clk->ops->set_parent(clk, parent);
311 		else
312 			ret = clk_reparent(clk, parent);
313 
314 		if (ret == 0) {
315 			pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
316 				 clk->name, clk->parent->name, clk->rate);
317 			if (clk->ops->recalc)
318 				clk->rate = clk->ops->recalc(clk);
319 			propagate_rate(clk);
320 		}
321 	} else
322 		ret = -EBUSY;
323 	spin_unlock_irqrestore(&clock_lock, flags);
324 
325 	return ret;
326 }
327 EXPORT_SYMBOL_GPL(clk_set_parent);
328 
329 struct clk *clk_get_parent(struct clk *clk)
330 {
331 	return clk->parent;
332 }
333 EXPORT_SYMBOL_GPL(clk_get_parent);
334 
335 long clk_round_rate(struct clk *clk, unsigned long rate)
336 {
337 	if (likely(clk->ops && clk->ops->round_rate)) {
338 		unsigned long flags, rounded;
339 
340 		spin_lock_irqsave(&clock_lock, flags);
341 		rounded = clk->ops->round_rate(clk, rate);
342 		spin_unlock_irqrestore(&clock_lock, flags);
343 
344 		return rounded;
345 	}
346 
347 	return clk_get_rate(clk);
348 }
349 EXPORT_SYMBOL_GPL(clk_round_rate);
350 
351 /*
352  * Returns a clock. Note that we first try to use device id on the bus
353  * and clock name. If this fails, we try to use clock name only.
354  */
355 struct clk *clk_get(struct device *dev, const char *id)
356 {
357 	struct clk *p, *clk = ERR_PTR(-ENOENT);
358 	int idno;
359 
360 	if (dev == NULL || dev->bus != &platform_bus_type)
361 		idno = -1;
362 	else
363 		idno = to_platform_device(dev)->id;
364 
365 	mutex_lock(&clock_list_sem);
366 	list_for_each_entry(p, &clock_list, node) {
367 		if (p->id == idno &&
368 		    strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
369 			clk = p;
370 			goto found;
371 		}
372 	}
373 
374 	list_for_each_entry(p, &clock_list, node) {
375 		if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
376 			clk = p;
377 			break;
378 		}
379 	}
380 
381 found:
382 	mutex_unlock(&clock_list_sem);
383 
384 	return clk;
385 }
386 EXPORT_SYMBOL_GPL(clk_get);
387 
388 void clk_put(struct clk *clk)
389 {
390 	if (clk && !IS_ERR(clk))
391 		module_put(clk->owner);
392 }
393 EXPORT_SYMBOL_GPL(clk_put);
394 
395 void __init __attribute__ ((weak))
396 arch_init_clk_ops(struct clk_ops **ops, int type)
397 {
398 }
399 
400 int __init __attribute__ ((weak))
401 arch_clk_init(void)
402 {
403 	return 0;
404 }
405 
406 static int show_clocks(char *buf, char **start, off_t off,
407 		       int len, int *eof, void *data)
408 {
409 	struct clk *clk;
410 	char *p = buf;
411 
412 	list_for_each_entry_reverse(clk, &clock_list, node) {
413 		unsigned long rate = clk_get_rate(clk);
414 
415 		p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name,
416 			     rate / 1000000, (rate % 1000000) / 10000,
417 			      (clk->usecount > 0) ?  "enabled" : "disabled");
418 	}
419 
420 	return p - buf;
421 }
422 
423 #ifdef CONFIG_PM
424 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
425 {
426 	static pm_message_t prev_state;
427 	struct clk *clkp;
428 
429 	switch (state.event) {
430 	case PM_EVENT_ON:
431 		/* Resumeing from hibernation */
432 		if (prev_state.event != PM_EVENT_FREEZE)
433 			break;
434 
435 		list_for_each_entry(clkp, &clock_list, node) {
436 			if (likely(clkp->ops)) {
437 				unsigned long rate = clkp->rate;
438 
439 				if (likely(clkp->ops->set_parent))
440 					clkp->ops->set_parent(clkp,
441 						clkp->parent);
442 				if (likely(clkp->ops->set_rate))
443 					clkp->ops->set_rate(clkp,
444 						rate, NO_CHANGE);
445 				else if (likely(clkp->ops->recalc))
446 					clkp->rate = clkp->ops->recalc(clkp);
447 			}
448 		}
449 		break;
450 	case PM_EVENT_FREEZE:
451 		break;
452 	case PM_EVENT_SUSPEND:
453 		break;
454 	}
455 
456 	prev_state = state;
457 	return 0;
458 }
459 
460 static int clks_sysdev_resume(struct sys_device *dev)
461 {
462 	return clks_sysdev_suspend(dev, PMSG_ON);
463 }
464 
465 static struct sysdev_class clks_sysdev_class = {
466 	.name = "clks",
467 };
468 
469 static struct sysdev_driver clks_sysdev_driver = {
470 	.suspend = clks_sysdev_suspend,
471 	.resume = clks_sysdev_resume,
472 };
473 
474 static struct sys_device clks_sysdev_dev = {
475 	.cls = &clks_sysdev_class,
476 };
477 
478 static int __init clk_sysdev_init(void)
479 {
480 	sysdev_class_register(&clks_sysdev_class);
481 	sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
482 	sysdev_register(&clks_sysdev_dev);
483 
484 	return 0;
485 }
486 subsys_initcall(clk_sysdev_init);
487 #endif
488 
489 int __init clk_init(void)
490 {
491 	int i, ret = 0;
492 
493 	BUG_ON(!master_clk.rate);
494 
495 	for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
496 		struct clk *clk = onchip_clocks[i];
497 
498 		arch_init_clk_ops(&clk->ops, i);
499 		ret |= clk_register(clk);
500 	}
501 
502 	ret |= arch_clk_init();
503 
504 	/* Kick the child clocks.. */
505 	recalculate_root_clocks();
506 
507 	/* Enable the necessary init clocks */
508 	clk_enable_init_clocks();
509 
510 	return ret;
511 }
512 
513 static int __init clk_proc_init(void)
514 {
515 	struct proc_dir_entry *p;
516 	p = create_proc_read_entry("clocks", S_IRUSR, NULL,
517 				   show_clocks, NULL);
518 	if (unlikely(!p))
519 		return -EINVAL;
520 
521 	return 0;
522 }
523 subsys_initcall(clk_proc_init);
524