xref: /linux/arch/sh/kernel/cpu/clock.c (revision 0dae89572cbcd5f676ea52a9448d9639d97a53d6)
1 /*
2  * arch/sh/kernel/cpu/clock.c - SuperH clock framework
3  *
4  *  Copyright (C) 2005 - 2009  Paul Mundt
5  *
6  * This clock framework is derived from the OMAP version by:
7  *
8  *	Copyright (C) 2004 - 2008 Nokia Corporation
9  *	Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10  *
11  *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12  *
13  *  With clkdev bits:
14  *
15  *	Copyright (C) 2008 Russell King.
16  *
17  * This file is subject to the terms and conditions of the GNU General Public
18  * License.  See the file "COPYING" in the main directory of this archive
19  * for more details.
20  */
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/mutex.h>
25 #include <linux/list.h>
26 #include <linux/kobject.h>
27 #include <linux/sysdev.h>
28 #include <linux/seq_file.h>
29 #include <linux/err.h>
30 #include <linux/platform_device.h>
31 #include <linux/proc_fs.h>
32 #include <asm/clock.h>
33 #include <asm/timer.h>
34 
35 static LIST_HEAD(clock_list);
36 static DEFINE_SPINLOCK(clock_lock);
37 static DEFINE_MUTEX(clock_list_sem);
38 
39 /*
40  * Each subtype is expected to define the init routines for these clocks,
41  * as each subtype (or processor family) will have these clocks at the
42  * very least. These are all provided through the CPG, which even some of
43  * the more quirky parts (such as ST40, SH4-202, etc.) still have.
44  *
45  * The processor-specific code is expected to register any additional
46  * clock sources that are of interest.
47  */
48 static struct clk master_clk = {
49 	.name		= "master_clk",
50 	.flags		= CLK_ENABLE_ON_INIT,
51 	.rate		= CONFIG_SH_PCLK_FREQ,
52 };
53 
54 static struct clk module_clk = {
55 	.name		= "module_clk",
56 	.parent		= &master_clk,
57 	.flags		= CLK_ENABLE_ON_INIT,
58 };
59 
60 static struct clk bus_clk = {
61 	.name		= "bus_clk",
62 	.parent		= &master_clk,
63 	.flags		= CLK_ENABLE_ON_INIT,
64 };
65 
66 static struct clk cpu_clk = {
67 	.name		= "cpu_clk",
68 	.parent		= &master_clk,
69 	.flags		= CLK_ENABLE_ON_INIT,
70 };
71 
72 /*
73  * The ordering of these clocks matters, do not change it.
74  */
75 static struct clk *onchip_clocks[] = {
76 	&master_clk,
77 	&module_clk,
78 	&bus_clk,
79 	&cpu_clk,
80 };
81 
82 /* Used for clocks that always have same value as the parent clock */
83 unsigned long followparent_recalc(struct clk *clk)
84 {
85 	return clk->parent->rate;
86 }
87 
88 int clk_reparent(struct clk *child, struct clk *parent)
89 {
90 	list_del_init(&child->sibling);
91 	if (parent)
92 		list_add(&child->sibling, &parent->children);
93 	child->parent = parent;
94 
95 	/* now do the debugfs renaming to reattach the child
96 	   to the proper parent */
97 
98 	return 0;
99 }
100 
101 /* Propagate rate to children */
102 void propagate_rate(struct clk *tclk)
103 {
104 	struct clk *clkp;
105 
106 	list_for_each_entry(clkp, &tclk->children, sibling) {
107 		if (clkp->ops->recalc)
108 			clkp->rate = clkp->ops->recalc(clkp);
109 		propagate_rate(clkp);
110 	}
111 }
112 
113 static void __clk_disable(struct clk *clk)
114 {
115 	if (clk->usecount == 0) {
116 		printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
117 		       clk->name);
118 		WARN_ON(1);
119 		return;
120 	}
121 
122 	if (!(--clk->usecount)) {
123 		if (likely(clk->ops && clk->ops->disable))
124 			clk->ops->disable(clk);
125 		if (likely(clk->parent))
126 			__clk_disable(clk->parent);
127 	}
128 }
129 
130 void clk_disable(struct clk *clk)
131 {
132 	unsigned long flags;
133 
134 	if (!clk)
135 		return;
136 
137 	spin_lock_irqsave(&clock_lock, flags);
138 	__clk_disable(clk);
139 	spin_unlock_irqrestore(&clock_lock, flags);
140 }
141 EXPORT_SYMBOL_GPL(clk_disable);
142 
143 static int __clk_enable(struct clk *clk)
144 {
145 	int ret = 0;
146 
147 	if (clk->usecount++ == 0) {
148 		if (clk->parent) {
149 			ret = __clk_enable(clk->parent);
150 			if (unlikely(ret))
151 				goto err;
152 		}
153 
154 		if (clk->ops && clk->ops->enable) {
155 			ret = clk->ops->enable(clk);
156 			if (ret) {
157 				if (clk->parent)
158 					__clk_disable(clk->parent);
159 				goto err;
160 			}
161 		}
162 	}
163 
164 	return ret;
165 err:
166 	clk->usecount--;
167 	return ret;
168 }
169 
170 int clk_enable(struct clk *clk)
171 {
172 	unsigned long flags;
173 	int ret;
174 
175 	if (!clk)
176 		return -EINVAL;
177 
178 	spin_lock_irqsave(&clock_lock, flags);
179 	ret = __clk_enable(clk);
180 	spin_unlock_irqrestore(&clock_lock, flags);
181 
182 	return ret;
183 }
184 EXPORT_SYMBOL_GPL(clk_enable);
185 
186 static LIST_HEAD(root_clks);
187 
188 /**
189  * recalculate_root_clocks - recalculate and propagate all root clocks
190  *
191  * Recalculates all root clocks (clocks with no parent), which if the
192  * clock's .recalc is set correctly, should also propagate their rates.
193  * Called at init.
194  */
195 void recalculate_root_clocks(void)
196 {
197 	struct clk *clkp;
198 
199 	list_for_each_entry(clkp, &root_clks, sibling) {
200 		if (clkp->ops->recalc)
201 			clkp->rate = clkp->ops->recalc(clkp);
202 		propagate_rate(clkp);
203 	}
204 }
205 
206 int clk_register(struct clk *clk)
207 {
208 	if (clk == NULL || IS_ERR(clk))
209 		return -EINVAL;
210 
211 	/*
212 	 * trap out already registered clocks
213 	 */
214 	if (clk->node.next || clk->node.prev)
215 		return 0;
216 
217 	mutex_lock(&clock_list_sem);
218 
219 	INIT_LIST_HEAD(&clk->children);
220 	clk->usecount = 0;
221 
222 	if (clk->parent)
223 		list_add(&clk->sibling, &clk->parent->children);
224 	else
225 		list_add(&clk->sibling, &root_clks);
226 
227 	list_add(&clk->node, &clock_list);
228 	if (clk->ops->init)
229 		clk->ops->init(clk);
230 	mutex_unlock(&clock_list_sem);
231 
232 	return 0;
233 }
234 EXPORT_SYMBOL_GPL(clk_register);
235 
236 void clk_unregister(struct clk *clk)
237 {
238 	mutex_lock(&clock_list_sem);
239 	list_del(&clk->sibling);
240 	list_del(&clk->node);
241 	mutex_unlock(&clock_list_sem);
242 }
243 EXPORT_SYMBOL_GPL(clk_unregister);
244 
245 static void clk_enable_init_clocks(void)
246 {
247 	struct clk *clkp;
248 
249 	list_for_each_entry(clkp, &clock_list, node)
250 		if (clkp->flags & CLK_ENABLE_ON_INIT)
251 			clk_enable(clkp);
252 }
253 
254 unsigned long clk_get_rate(struct clk *clk)
255 {
256 	return clk->rate;
257 }
258 EXPORT_SYMBOL_GPL(clk_get_rate);
259 
260 int clk_set_rate(struct clk *clk, unsigned long rate)
261 {
262 	return clk_set_rate_ex(clk, rate, 0);
263 }
264 EXPORT_SYMBOL_GPL(clk_set_rate);
265 
266 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
267 {
268 	int ret = -EOPNOTSUPP;
269 
270 	if (likely(clk->ops && clk->ops->set_rate)) {
271 		unsigned long flags;
272 
273 		spin_lock_irqsave(&clock_lock, flags);
274 		ret = clk->ops->set_rate(clk, rate, algo_id);
275 		if (ret == 0) {
276 			if (clk->ops->recalc)
277 				clk->rate = clk->ops->recalc(clk);
278 			propagate_rate(clk);
279 		}
280 		spin_unlock_irqrestore(&clock_lock, flags);
281 	}
282 
283 	return ret;
284 }
285 EXPORT_SYMBOL_GPL(clk_set_rate_ex);
286 
287 int clk_set_parent(struct clk *clk, struct clk *parent)
288 {
289 	unsigned long flags;
290 	int ret = -EINVAL;
291 
292 	if (!parent || !clk)
293 		return ret;
294 	if (clk->parent == parent)
295 		return 0;
296 
297 	spin_lock_irqsave(&clock_lock, flags);
298 	if (clk->usecount == 0) {
299 		if (clk->ops->set_parent)
300 			ret = clk->ops->set_parent(clk, parent);
301 		else
302 			ret = clk_reparent(clk, parent);
303 
304 		if (ret == 0) {
305 			pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
306 				 clk->name, clk->parent->name, clk->rate);
307 			if (clk->ops->recalc)
308 				clk->rate = clk->ops->recalc(clk);
309 			propagate_rate(clk);
310 		}
311 	} else
312 		ret = -EBUSY;
313 	spin_unlock_irqrestore(&clock_lock, flags);
314 
315 	return ret;
316 }
317 EXPORT_SYMBOL_GPL(clk_set_parent);
318 
319 struct clk *clk_get_parent(struct clk *clk)
320 {
321 	return clk->parent;
322 }
323 EXPORT_SYMBOL_GPL(clk_get_parent);
324 
325 long clk_round_rate(struct clk *clk, unsigned long rate)
326 {
327 	if (likely(clk->ops && clk->ops->round_rate)) {
328 		unsigned long flags, rounded;
329 
330 		spin_lock_irqsave(&clock_lock, flags);
331 		rounded = clk->ops->round_rate(clk, rate);
332 		spin_unlock_irqrestore(&clock_lock, flags);
333 
334 		return rounded;
335 	}
336 
337 	return clk_get_rate(clk);
338 }
339 EXPORT_SYMBOL_GPL(clk_round_rate);
340 
341 /*
342  * Find the correct struct clk for the device and connection ID.
343  * We do slightly fuzzy matching here:
344  *  An entry with a NULL ID is assumed to be a wildcard.
345  *  If an entry has a device ID, it must match
346  *  If an entry has a connection ID, it must match
347  * Then we take the most specific entry - with the following
348  * order of precidence: dev+con > dev only > con only.
349  */
350 static struct clk *clk_find(const char *dev_id, const char *con_id)
351 {
352 	struct clk_lookup *p;
353 	struct clk *clk = NULL;
354 	int match, best = 0;
355 
356 	list_for_each_entry(p, &clock_list, node) {
357 		match = 0;
358 		if (p->dev_id) {
359 			if (!dev_id || strcmp(p->dev_id, dev_id))
360 				continue;
361 			match += 2;
362 		}
363 		if (p->con_id) {
364 			if (!con_id || strcmp(p->con_id, con_id))
365 				continue;
366 			match += 1;
367 		}
368 		if (match == 0)
369 			continue;
370 
371 		if (match > best) {
372 			clk = p->clk;
373 			best = match;
374 		}
375 	}
376 	return clk;
377 }
378 
379 struct clk *clk_get_sys(const char *dev_id, const char *con_id)
380 {
381 	struct clk *clk;
382 
383 	mutex_lock(&clock_list_sem);
384 	clk = clk_find(dev_id, con_id);
385 	mutex_unlock(&clock_list_sem);
386 
387 	return clk ? clk : ERR_PTR(-ENOENT);
388 }
389 EXPORT_SYMBOL_GPL(clk_get_sys);
390 
391 /*
392  * Returns a clock. Note that we first try to use device id on the bus
393  * and clock name. If this fails, we try to use clock name only.
394  */
395 struct clk *clk_get(struct device *dev, const char *id)
396 {
397 	const char *dev_id = dev ? dev_name(dev) : NULL;
398 	struct clk *p, *clk = ERR_PTR(-ENOENT);
399 	int idno;
400 
401 	clk = clk_get_sys(dev_id, id);
402 	if (clk)
403 		return clk;
404 
405 	if (dev == NULL || dev->bus != &platform_bus_type)
406 		idno = -1;
407 	else
408 		idno = to_platform_device(dev)->id;
409 
410 	mutex_lock(&clock_list_sem);
411 	list_for_each_entry(p, &clock_list, node) {
412 		if (p->id == idno &&
413 		    strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
414 			clk = p;
415 			goto found;
416 		}
417 	}
418 
419 	list_for_each_entry(p, &clock_list, node) {
420 		if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
421 			clk = p;
422 			break;
423 		}
424 	}
425 
426 found:
427 	mutex_unlock(&clock_list_sem);
428 
429 	return clk;
430 }
431 EXPORT_SYMBOL_GPL(clk_get);
432 
433 void clk_put(struct clk *clk)
434 {
435 	if (clk && !IS_ERR(clk))
436 		module_put(clk->owner);
437 }
438 EXPORT_SYMBOL_GPL(clk_put);
439 
440 void __init __attribute__ ((weak))
441 arch_init_clk_ops(struct clk_ops **ops, int type)
442 {
443 }
444 
445 int __init __attribute__ ((weak))
446 arch_clk_init(void)
447 {
448 	return 0;
449 }
450 
451 static int show_clocks(char *buf, char **start, off_t off,
452 		       int len, int *eof, void *data)
453 {
454 	struct clk *clk;
455 	char *p = buf;
456 
457 	list_for_each_entry_reverse(clk, &clock_list, node) {
458 		unsigned long rate = clk_get_rate(clk);
459 
460 		p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name,
461 			     rate / 1000000, (rate % 1000000) / 10000,
462 			      (clk->usecount > 0) ?  "enabled" : "disabled");
463 	}
464 
465 	return p - buf;
466 }
467 
468 #ifdef CONFIG_PM
469 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
470 {
471 	static pm_message_t prev_state;
472 	struct clk *clkp;
473 
474 	switch (state.event) {
475 	case PM_EVENT_ON:
476 		/* Resumeing from hibernation */
477 		if (prev_state.event != PM_EVENT_FREEZE)
478 			break;
479 
480 		list_for_each_entry(clkp, &clock_list, node) {
481 			if (likely(clkp->ops)) {
482 				unsigned long rate = clkp->rate;
483 
484 				if (likely(clkp->ops->set_parent))
485 					clkp->ops->set_parent(clkp,
486 						clkp->parent);
487 				if (likely(clkp->ops->set_rate))
488 					clkp->ops->set_rate(clkp,
489 						rate, NO_CHANGE);
490 				else if (likely(clkp->ops->recalc))
491 					clkp->rate = clkp->ops->recalc(clkp);
492 			}
493 		}
494 		break;
495 	case PM_EVENT_FREEZE:
496 		break;
497 	case PM_EVENT_SUSPEND:
498 		break;
499 	}
500 
501 	prev_state = state;
502 	return 0;
503 }
504 
505 static int clks_sysdev_resume(struct sys_device *dev)
506 {
507 	return clks_sysdev_suspend(dev, PMSG_ON);
508 }
509 
510 static struct sysdev_class clks_sysdev_class = {
511 	.name = "clks",
512 };
513 
514 static struct sysdev_driver clks_sysdev_driver = {
515 	.suspend = clks_sysdev_suspend,
516 	.resume = clks_sysdev_resume,
517 };
518 
519 static struct sys_device clks_sysdev_dev = {
520 	.cls = &clks_sysdev_class,
521 };
522 
523 static int __init clk_sysdev_init(void)
524 {
525 	sysdev_class_register(&clks_sysdev_class);
526 	sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
527 	sysdev_register(&clks_sysdev_dev);
528 
529 	return 0;
530 }
531 subsys_initcall(clk_sysdev_init);
532 #endif
533 
534 int __init clk_init(void)
535 {
536 	int i, ret = 0;
537 
538 	BUG_ON(!master_clk.rate);
539 
540 	for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
541 		struct clk *clk = onchip_clocks[i];
542 
543 		arch_init_clk_ops(&clk->ops, i);
544 		ret |= clk_register(clk);
545 	}
546 
547 	ret |= arch_clk_init();
548 
549 	/* Kick the child clocks.. */
550 	recalculate_root_clocks();
551 
552 	/* Enable the necessary init clocks */
553 	clk_enable_init_clocks();
554 
555 	return ret;
556 }
557 
558 static int __init clk_proc_init(void)
559 {
560 	struct proc_dir_entry *p;
561 	p = create_proc_read_entry("clocks", S_IRUSR, NULL,
562 				   show_clocks, NULL);
563 	if (unlikely(!p))
564 		return -EINVAL;
565 
566 	return 0;
567 }
568 subsys_initcall(clk_proc_init);
569