xref: /linux/arch/sh/kernel/cpu/clock.c (revision b1f6cfe48c3cb1dfa77db3d2f42f765febaef9bc)
1 /*
2  * arch/sh/kernel/cpu/clock.c - SuperH clock framework
3  *
4  *  Copyright (C) 2005 - 2009  Paul Mundt
5  *
6  * This clock framework is derived from the OMAP version by:
7  *
8  *	Copyright (C) 2004 - 2008 Nokia Corporation
9  *	Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10  *
11  *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12  *
13  * This file is subject to the terms and conditions of the GNU General Public
14  * License.  See the file "COPYING" in the main directory of this archive
15  * for more details.
16  */
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/list.h>
22 #include <linux/kobject.h>
23 #include <linux/sysdev.h>
24 #include <linux/seq_file.h>
25 #include <linux/err.h>
26 #include <linux/platform_device.h>
27 #include <linux/proc_fs.h>
28 #include <asm/clock.h>
29 #include <asm/timer.h>
30 
31 static LIST_HEAD(clock_list);
32 static DEFINE_SPINLOCK(clock_lock);
33 static DEFINE_MUTEX(clock_list_sem);
34 
35 /*
36  * Each subtype is expected to define the init routines for these clocks,
37  * as each subtype (or processor family) will have these clocks at the
38  * very least. These are all provided through the CPG, which even some of
39  * the more quirky parts (such as ST40, SH4-202, etc.) still have.
40  *
41  * The processor-specific code is expected to register any additional
42  * clock sources that are of interest.
43  */
44 static struct clk master_clk = {
45 	.name		= "master_clk",
46 	.flags		= CLK_ALWAYS_ENABLED,
47 	.rate		= CONFIG_SH_PCLK_FREQ,
48 };
49 
50 static struct clk module_clk = {
51 	.name		= "module_clk",
52 	.parent		= &master_clk,
53 	.flags		= CLK_ALWAYS_ENABLED,
54 };
55 
56 static struct clk bus_clk = {
57 	.name		= "bus_clk",
58 	.parent		= &master_clk,
59 	.flags		= CLK_ALWAYS_ENABLED,
60 };
61 
62 static struct clk cpu_clk = {
63 	.name		= "cpu_clk",
64 	.parent		= &master_clk,
65 	.flags		= CLK_ALWAYS_ENABLED,
66 };
67 
68 /*
69  * The ordering of these clocks matters, do not change it.
70  */
71 static struct clk *onchip_clocks[] = {
72 	&master_clk,
73 	&module_clk,
74 	&bus_clk,
75 	&cpu_clk,
76 };
77 
78 /* Used for clocks that always have same value as the parent clock */
79 unsigned long followparent_recalc(struct clk *clk)
80 {
81 	return clk->parent->rate;
82 }
83 
84 /* Propagate rate to children */
85 void propagate_rate(struct clk *tclk)
86 {
87 	struct clk *clkp;
88 
89 	list_for_each_entry(clkp, &tclk->children, sibling) {
90 		if (clkp->ops->recalc)
91 			clkp->rate = clkp->ops->recalc(clkp);
92 		propagate_rate(clkp);
93 	}
94 }
95 
96 static void __clk_init(struct clk *clk)
97 {
98 	/*
99 	 * See if this is the first time we're enabling the clock, some
100 	 * clocks that are always enabled still require "special"
101 	 * initialization. This is especially true if the clock mode
102 	 * changes and the clock needs to hunt for the proper set of
103 	 * divisors to use before it can effectively recalc.
104 	 */
105 
106 	if (clk->flags & CLK_NEEDS_INIT) {
107 		if (clk->ops && clk->ops->init)
108 			clk->ops->init(clk);
109 
110 		clk->flags &= ~CLK_NEEDS_INIT;
111 	}
112 }
113 
114 static int __clk_enable(struct clk *clk)
115 {
116 	if (!clk)
117 		return -EINVAL;
118 
119 	clk->usecount++;
120 
121 	/* nothing to do if always enabled */
122 	if (clk->flags & CLK_ALWAYS_ENABLED)
123 		return 0;
124 
125 	if (clk->usecount == 1) {
126 		__clk_init(clk);
127 
128 		__clk_enable(clk->parent);
129 
130 		if (clk->ops && clk->ops->enable)
131 			clk->ops->enable(clk);
132 	}
133 
134 	return 0;
135 }
136 
137 int clk_enable(struct clk *clk)
138 {
139 	unsigned long flags;
140 	int ret;
141 
142 	spin_lock_irqsave(&clock_lock, flags);
143 	ret = __clk_enable(clk);
144 	spin_unlock_irqrestore(&clock_lock, flags);
145 
146 	return ret;
147 }
148 EXPORT_SYMBOL_GPL(clk_enable);
149 
150 static void __clk_disable(struct clk *clk)
151 {
152 	if (!clk)
153 		return;
154 
155 	clk->usecount--;
156 
157 	WARN_ON(clk->usecount < 0);
158 
159 	if (clk->flags & CLK_ALWAYS_ENABLED)
160 		return;
161 
162 	if (clk->usecount == 0) {
163 		if (likely(clk->ops && clk->ops->disable))
164 			clk->ops->disable(clk);
165 
166 		__clk_disable(clk->parent);
167 	}
168 }
169 
170 void clk_disable(struct clk *clk)
171 {
172 	unsigned long flags;
173 
174 	spin_lock_irqsave(&clock_lock, flags);
175 	__clk_disable(clk);
176 	spin_unlock_irqrestore(&clock_lock, flags);
177 }
178 EXPORT_SYMBOL_GPL(clk_disable);
179 
180 static LIST_HEAD(root_clks);
181 
182 /**
183  * recalculate_root_clocks - recalculate and propagate all root clocks
184  *
185  * Recalculates all root clocks (clocks with no parent), which if the
186  * clock's .recalc is set correctly, should also propagate their rates.
187  * Called at init.
188  */
189 void recalculate_root_clocks(void)
190 {
191 	struct clk *clkp;
192 
193 	list_for_each_entry(clkp, &root_clks, sibling) {
194 		if (clkp->ops->recalc)
195 			clkp->rate = clkp->ops->recalc(clkp);
196 		propagate_rate(clkp);
197 	}
198 }
199 
200 int clk_register(struct clk *clk)
201 {
202 	if (clk == NULL || IS_ERR(clk))
203 		return -EINVAL;
204 
205 	/*
206 	 * trap out already registered clocks
207 	 */
208 	if (clk->node.next || clk->node.prev)
209 		return 0;
210 
211 	mutex_lock(&clock_list_sem);
212 
213 	INIT_LIST_HEAD(&clk->children);
214 
215 	if (clk->parent)
216 		list_add(&clk->sibling, &clk->parent->children);
217 	else
218 		list_add(&clk->sibling, &root_clks);
219 
220 	list_add(&clk->node, &clock_list);
221 	clk->usecount = 0;
222 	clk->flags |= CLK_NEEDS_INIT;
223 
224 	mutex_unlock(&clock_list_sem);
225 
226 	if (clk->flags & CLK_ALWAYS_ENABLED) {
227 		__clk_init(clk);
228 		pr_debug( "Clock '%s' is ALWAYS_ENABLED\n", clk->name);
229 		if (clk->ops && clk->ops->enable)
230 			clk->ops->enable(clk);
231 		pr_debug( "Enabled.");
232 	}
233 
234 	return 0;
235 }
236 EXPORT_SYMBOL_GPL(clk_register);
237 
238 void clk_unregister(struct clk *clk)
239 {
240 	mutex_lock(&clock_list_sem);
241 	list_del(&clk->sibling);
242 	list_del(&clk->node);
243 	mutex_unlock(&clock_list_sem);
244 }
245 EXPORT_SYMBOL_GPL(clk_unregister);
246 
247 unsigned long clk_get_rate(struct clk *clk)
248 {
249 	return clk->rate;
250 }
251 EXPORT_SYMBOL_GPL(clk_get_rate);
252 
253 int clk_set_rate(struct clk *clk, unsigned long rate)
254 {
255 	return clk_set_rate_ex(clk, rate, 0);
256 }
257 EXPORT_SYMBOL_GPL(clk_set_rate);
258 
259 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
260 {
261 	int ret = -EOPNOTSUPP;
262 
263 	if (likely(clk->ops && clk->ops->set_rate)) {
264 		unsigned long flags;
265 
266 		spin_lock_irqsave(&clock_lock, flags);
267 		ret = clk->ops->set_rate(clk, rate, algo_id);
268 		if (ret == 0) {
269 			if (clk->ops->recalc)
270 				clk->rate = clk->ops->recalc(clk);
271 			propagate_rate(clk);
272 		}
273 		spin_unlock_irqrestore(&clock_lock, flags);
274 	}
275 
276 	return ret;
277 }
278 EXPORT_SYMBOL_GPL(clk_set_rate_ex);
279 
280 void clk_recalc_rate(struct clk *clk)
281 {
282 	unsigned long flags;
283 
284 	if (!clk->ops->recalc)
285 		return;
286 
287 	spin_lock_irqsave(&clock_lock, flags);
288 	clk->rate = clk->ops->recalc(clk);
289 	propagate_rate(clk);
290 	spin_unlock_irqrestore(&clock_lock, flags);
291 }
292 EXPORT_SYMBOL_GPL(clk_recalc_rate);
293 
294 int clk_set_parent(struct clk *clk, struct clk *parent)
295 {
296 	unsigned long flags;
297 	int ret = -EINVAL;
298 
299 	if (!parent || !clk)
300 		return ret;
301 
302 	spin_lock_irqsave(&clock_lock, flags);
303 	if (clk->usecount == 0) {
304 		if (clk->ops->set_parent)
305 			ret = clk->ops->set_parent(clk, parent);
306 		if (ret == 0) {
307 			if (clk->ops->recalc)
308 				clk->rate = clk->ops->recalc(clk);
309 			propagate_rate(clk);
310 		}
311 	} else
312 		ret = -EBUSY;
313 	spin_unlock_irqrestore(&clock_lock, flags);
314 
315 	return ret;
316 }
317 EXPORT_SYMBOL_GPL(clk_set_parent);
318 
319 struct clk *clk_get_parent(struct clk *clk)
320 {
321 	return clk->parent;
322 }
323 EXPORT_SYMBOL_GPL(clk_get_parent);
324 
325 long clk_round_rate(struct clk *clk, unsigned long rate)
326 {
327 	if (likely(clk->ops && clk->ops->round_rate)) {
328 		unsigned long flags, rounded;
329 
330 		spin_lock_irqsave(&clock_lock, flags);
331 		rounded = clk->ops->round_rate(clk, rate);
332 		spin_unlock_irqrestore(&clock_lock, flags);
333 
334 		return rounded;
335 	}
336 
337 	return clk_get_rate(clk);
338 }
339 EXPORT_SYMBOL_GPL(clk_round_rate);
340 
341 /*
342  * Returns a clock. Note that we first try to use device id on the bus
343  * and clock name. If this fails, we try to use clock name only.
344  */
345 struct clk *clk_get(struct device *dev, const char *id)
346 {
347 	struct clk *p, *clk = ERR_PTR(-ENOENT);
348 	int idno;
349 
350 	if (dev == NULL || dev->bus != &platform_bus_type)
351 		idno = -1;
352 	else
353 		idno = to_platform_device(dev)->id;
354 
355 	mutex_lock(&clock_list_sem);
356 	list_for_each_entry(p, &clock_list, node) {
357 		if (p->id == idno &&
358 		    strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
359 			clk = p;
360 			goto found;
361 		}
362 	}
363 
364 	list_for_each_entry(p, &clock_list, node) {
365 		if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
366 			clk = p;
367 			break;
368 		}
369 	}
370 
371 found:
372 	mutex_unlock(&clock_list_sem);
373 
374 	return clk;
375 }
376 EXPORT_SYMBOL_GPL(clk_get);
377 
378 void clk_put(struct clk *clk)
379 {
380 	if (clk && !IS_ERR(clk))
381 		module_put(clk->owner);
382 }
383 EXPORT_SYMBOL_GPL(clk_put);
384 
385 void __init __attribute__ ((weak))
386 arch_init_clk_ops(struct clk_ops **ops, int type)
387 {
388 }
389 
390 int __init __attribute__ ((weak))
391 arch_clk_init(void)
392 {
393 	return 0;
394 }
395 
396 static int show_clocks(char *buf, char **start, off_t off,
397 		       int len, int *eof, void *data)
398 {
399 	struct clk *clk;
400 	char *p = buf;
401 
402 	list_for_each_entry_reverse(clk, &clock_list, node) {
403 		unsigned long rate = clk_get_rate(clk);
404 
405 		p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name,
406 			     rate / 1000000, (rate % 1000000) / 10000,
407 			     ((clk->flags & CLK_ALWAYS_ENABLED) ||
408 			      clk->usecount > 0) ?
409 			     "enabled" : "disabled");
410 	}
411 
412 	return p - buf;
413 }
414 
415 #ifdef CONFIG_PM
416 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
417 {
418 	static pm_message_t prev_state;
419 	struct clk *clkp;
420 
421 	switch (state.event) {
422 	case PM_EVENT_ON:
423 		/* Resumeing from hibernation */
424 		if (prev_state.event != PM_EVENT_FREEZE)
425 			break;
426 
427 		list_for_each_entry(clkp, &clock_list, node) {
428 			if (likely(clkp->ops)) {
429 				unsigned long rate = clkp->rate;
430 
431 				if (likely(clkp->ops->set_parent))
432 					clkp->ops->set_parent(clkp,
433 						clkp->parent);
434 				if (likely(clkp->ops->set_rate))
435 					clkp->ops->set_rate(clkp,
436 						rate, NO_CHANGE);
437 				else if (likely(clkp->ops->recalc))
438 					clkp->rate = clkp->ops->recalc(clkp);
439 			}
440 		}
441 		break;
442 	case PM_EVENT_FREEZE:
443 		break;
444 	case PM_EVENT_SUSPEND:
445 		break;
446 	}
447 
448 	prev_state = state;
449 	return 0;
450 }
451 
452 static int clks_sysdev_resume(struct sys_device *dev)
453 {
454 	return clks_sysdev_suspend(dev, PMSG_ON);
455 }
456 
457 static struct sysdev_class clks_sysdev_class = {
458 	.name = "clks",
459 };
460 
461 static struct sysdev_driver clks_sysdev_driver = {
462 	.suspend = clks_sysdev_suspend,
463 	.resume = clks_sysdev_resume,
464 };
465 
466 static struct sys_device clks_sysdev_dev = {
467 	.cls = &clks_sysdev_class,
468 };
469 
470 static int __init clk_sysdev_init(void)
471 {
472 	sysdev_class_register(&clks_sysdev_class);
473 	sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
474 	sysdev_register(&clks_sysdev_dev);
475 
476 	return 0;
477 }
478 subsys_initcall(clk_sysdev_init);
479 #endif
480 
481 int __init clk_init(void)
482 {
483 	int i, ret = 0;
484 
485 	BUG_ON(!master_clk.rate);
486 
487 	for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
488 		struct clk *clk = onchip_clocks[i];
489 
490 		arch_init_clk_ops(&clk->ops, i);
491 		ret |= clk_register(clk);
492 	}
493 
494 	ret |= arch_clk_init();
495 
496 	/* Kick the child clocks.. */
497 	recalculate_root_clocks();
498 
499 	return ret;
500 }
501 
502 static int __init clk_proc_init(void)
503 {
504 	struct proc_dir_entry *p;
505 	p = create_proc_read_entry("clocks", S_IRUSR, NULL,
506 				   show_clocks, NULL);
507 	if (unlikely(!p))
508 		return -EINVAL;
509 
510 	return 0;
511 }
512 subsys_initcall(clk_proc_init);
513