1 /* 2 * arch/sh/kernel/cpu/clock.c - SuperH clock framework 3 * 4 * Copyright (C) 2005, 2006, 2007 Paul Mundt 5 * 6 * This clock framework is derived from the OMAP version by: 7 * 8 * Copyright (C) 2004 - 2005 Nokia Corporation 9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> 10 * 11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> 12 * 13 * This file is subject to the terms and conditions of the GNU General Public 14 * License. See the file "COPYING" in the main directory of this archive 15 * for more details. 16 */ 17 #include <linux/kernel.h> 18 #include <linux/init.h> 19 #include <linux/module.h> 20 #include <linux/mutex.h> 21 #include <linux/list.h> 22 #include <linux/kobject.h> 23 #include <linux/sysdev.h> 24 #include <linux/seq_file.h> 25 #include <linux/err.h> 26 #include <linux/platform_device.h> 27 #include <linux/proc_fs.h> 28 #include <asm/clock.h> 29 #include <asm/timer.h> 30 31 static LIST_HEAD(clock_list); 32 static DEFINE_SPINLOCK(clock_lock); 33 static DEFINE_MUTEX(clock_list_sem); 34 35 /* 36 * Each subtype is expected to define the init routines for these clocks, 37 * as each subtype (or processor family) will have these clocks at the 38 * very least. These are all provided through the CPG, which even some of 39 * the more quirky parts (such as ST40, SH4-202, etc.) still have. 40 * 41 * The processor-specific code is expected to register any additional 42 * clock sources that are of interest. 43 */ 44 static struct clk master_clk = { 45 .name = "master_clk", 46 .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, 47 .rate = CONFIG_SH_PCLK_FREQ, 48 }; 49 50 static struct clk module_clk = { 51 .name = "module_clk", 52 .parent = &master_clk, 53 .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, 54 }; 55 56 static struct clk bus_clk = { 57 .name = "bus_clk", 58 .parent = &master_clk, 59 .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, 60 }; 61 62 static struct clk cpu_clk = { 63 .name = "cpu_clk", 64 .parent = &master_clk, 65 .flags = CLK_ALWAYS_ENABLED, 66 }; 67 68 /* 69 * The ordering of these clocks matters, do not change it. 70 */ 71 static struct clk *onchip_clocks[] = { 72 &master_clk, 73 &module_clk, 74 &bus_clk, 75 &cpu_clk, 76 }; 77 78 /* Propagate rate to children */ 79 static void propagate_rate(struct clk *clk) 80 { 81 struct clk *clkp; 82 83 list_for_each_entry(clkp, &clock_list, node) { 84 if (likely(clkp->parent != clk)) 85 continue; 86 if (likely(clkp->ops && clkp->ops->recalc)) 87 clkp->rate = clkp->ops->recalc(clkp); 88 if (unlikely(clkp->flags & CLK_RATE_PROPAGATES)) 89 propagate_rate(clkp); 90 } 91 } 92 93 static void __clk_init(struct clk *clk) 94 { 95 /* 96 * See if this is the first time we're enabling the clock, some 97 * clocks that are always enabled still require "special" 98 * initialization. This is especially true if the clock mode 99 * changes and the clock needs to hunt for the proper set of 100 * divisors to use before it can effectively recalc. 101 */ 102 103 if (clk->flags & CLK_NEEDS_INIT) { 104 if (clk->ops && clk->ops->init) 105 clk->ops->init(clk); 106 107 clk->flags &= ~CLK_NEEDS_INIT; 108 } 109 } 110 111 static int __clk_enable(struct clk *clk) 112 { 113 if (!clk) 114 return -EINVAL; 115 116 clk->usecount++; 117 118 /* nothing to do if always enabled */ 119 if (clk->flags & CLK_ALWAYS_ENABLED) 120 return 0; 121 122 if (clk->usecount == 1) { 123 __clk_init(clk); 124 125 __clk_enable(clk->parent); 126 127 if (clk->ops && clk->ops->enable) 128 clk->ops->enable(clk); 129 } 130 131 return 0; 132 } 133 134 int clk_enable(struct clk *clk) 135 { 136 unsigned long flags; 137 int ret; 138 139 spin_lock_irqsave(&clock_lock, flags); 140 ret = __clk_enable(clk); 141 spin_unlock_irqrestore(&clock_lock, flags); 142 143 return ret; 144 } 145 EXPORT_SYMBOL_GPL(clk_enable); 146 147 static void __clk_disable(struct clk *clk) 148 { 149 if (!clk) 150 return; 151 152 clk->usecount--; 153 154 WARN_ON(clk->usecount < 0); 155 156 if (clk->flags & CLK_ALWAYS_ENABLED) 157 return; 158 159 if (clk->usecount == 0) { 160 if (likely(clk->ops && clk->ops->disable)) 161 clk->ops->disable(clk); 162 163 __clk_disable(clk->parent); 164 } 165 } 166 167 void clk_disable(struct clk *clk) 168 { 169 unsigned long flags; 170 171 spin_lock_irqsave(&clock_lock, flags); 172 __clk_disable(clk); 173 spin_unlock_irqrestore(&clock_lock, flags); 174 } 175 EXPORT_SYMBOL_GPL(clk_disable); 176 177 int clk_register(struct clk *clk) 178 { 179 mutex_lock(&clock_list_sem); 180 181 list_add(&clk->node, &clock_list); 182 clk->usecount = 0; 183 clk->flags |= CLK_NEEDS_INIT; 184 185 mutex_unlock(&clock_list_sem); 186 187 if (clk->flags & CLK_ALWAYS_ENABLED) { 188 __clk_init(clk); 189 pr_debug( "Clock '%s' is ALWAYS_ENABLED\n", clk->name); 190 if (clk->ops && clk->ops->enable) 191 clk->ops->enable(clk); 192 pr_debug( "Enabled."); 193 } 194 195 return 0; 196 } 197 EXPORT_SYMBOL_GPL(clk_register); 198 199 void clk_unregister(struct clk *clk) 200 { 201 mutex_lock(&clock_list_sem); 202 list_del(&clk->node); 203 mutex_unlock(&clock_list_sem); 204 } 205 EXPORT_SYMBOL_GPL(clk_unregister); 206 207 unsigned long clk_get_rate(struct clk *clk) 208 { 209 return clk->rate; 210 } 211 EXPORT_SYMBOL_GPL(clk_get_rate); 212 213 int clk_set_rate(struct clk *clk, unsigned long rate) 214 { 215 return clk_set_rate_ex(clk, rate, 0); 216 } 217 EXPORT_SYMBOL_GPL(clk_set_rate); 218 219 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) 220 { 221 int ret = -EOPNOTSUPP; 222 223 if (likely(clk->ops && clk->ops->set_rate)) { 224 unsigned long flags; 225 226 spin_lock_irqsave(&clock_lock, flags); 227 ret = clk->ops->set_rate(clk, rate, algo_id); 228 spin_unlock_irqrestore(&clock_lock, flags); 229 } 230 231 if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) 232 propagate_rate(clk); 233 234 return ret; 235 } 236 EXPORT_SYMBOL_GPL(clk_set_rate_ex); 237 238 void clk_recalc_rate(struct clk *clk) 239 { 240 if (likely(clk->ops && clk->ops->recalc)) { 241 unsigned long flags; 242 243 spin_lock_irqsave(&clock_lock, flags); 244 clk->rate = clk->ops->recalc(clk); 245 spin_unlock_irqrestore(&clock_lock, flags); 246 } 247 248 if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) 249 propagate_rate(clk); 250 } 251 EXPORT_SYMBOL_GPL(clk_recalc_rate); 252 253 int clk_set_parent(struct clk *clk, struct clk *parent) 254 { 255 int ret = -EINVAL; 256 struct clk *old; 257 258 if (!parent || !clk) 259 return ret; 260 261 old = clk->parent; 262 if (likely(clk->ops && clk->ops->set_parent)) { 263 unsigned long flags; 264 spin_lock_irqsave(&clock_lock, flags); 265 ret = clk->ops->set_parent(clk, parent); 266 spin_unlock_irqrestore(&clock_lock, flags); 267 clk->parent = (ret ? old : parent); 268 } 269 270 if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) 271 propagate_rate(clk); 272 return ret; 273 } 274 EXPORT_SYMBOL_GPL(clk_set_parent); 275 276 struct clk *clk_get_parent(struct clk *clk) 277 { 278 return clk->parent; 279 } 280 EXPORT_SYMBOL_GPL(clk_get_parent); 281 282 long clk_round_rate(struct clk *clk, unsigned long rate) 283 { 284 if (likely(clk->ops && clk->ops->round_rate)) { 285 unsigned long flags, rounded; 286 287 spin_lock_irqsave(&clock_lock, flags); 288 rounded = clk->ops->round_rate(clk, rate); 289 spin_unlock_irqrestore(&clock_lock, flags); 290 291 return rounded; 292 } 293 294 return clk_get_rate(clk); 295 } 296 EXPORT_SYMBOL_GPL(clk_round_rate); 297 298 /* 299 * Returns a clock. Note that we first try to use device id on the bus 300 * and clock name. If this fails, we try to use clock name only. 301 */ 302 struct clk *clk_get(struct device *dev, const char *id) 303 { 304 struct clk *p, *clk = ERR_PTR(-ENOENT); 305 int idno; 306 307 if (dev == NULL || dev->bus != &platform_bus_type) 308 idno = -1; 309 else 310 idno = to_platform_device(dev)->id; 311 312 mutex_lock(&clock_list_sem); 313 list_for_each_entry(p, &clock_list, node) { 314 if (p->id == idno && 315 strcmp(id, p->name) == 0 && try_module_get(p->owner)) { 316 clk = p; 317 goto found; 318 } 319 } 320 321 list_for_each_entry(p, &clock_list, node) { 322 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) { 323 clk = p; 324 break; 325 } 326 } 327 328 found: 329 mutex_unlock(&clock_list_sem); 330 331 return clk; 332 } 333 EXPORT_SYMBOL_GPL(clk_get); 334 335 void clk_put(struct clk *clk) 336 { 337 if (clk && !IS_ERR(clk)) 338 module_put(clk->owner); 339 } 340 EXPORT_SYMBOL_GPL(clk_put); 341 342 void __init __attribute__ ((weak)) 343 arch_init_clk_ops(struct clk_ops **ops, int type) 344 { 345 } 346 347 int __init __attribute__ ((weak)) 348 arch_clk_init(void) 349 { 350 return 0; 351 } 352 353 static int show_clocks(char *buf, char **start, off_t off, 354 int len, int *eof, void *data) 355 { 356 struct clk *clk; 357 char *p = buf; 358 359 list_for_each_entry_reverse(clk, &clock_list, node) { 360 unsigned long rate = clk_get_rate(clk); 361 362 p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name, 363 rate / 1000000, (rate % 1000000) / 10000, 364 ((clk->flags & CLK_ALWAYS_ENABLED) || 365 clk->usecount > 0) ? 366 "enabled" : "disabled"); 367 } 368 369 return p - buf; 370 } 371 372 #ifdef CONFIG_PM 373 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) 374 { 375 static pm_message_t prev_state; 376 struct clk *clkp; 377 378 switch (state.event) { 379 case PM_EVENT_ON: 380 /* Resumeing from hibernation */ 381 if (prev_state.event != PM_EVENT_FREEZE) 382 break; 383 384 list_for_each_entry(clkp, &clock_list, node) { 385 if (likely(clkp->ops)) { 386 unsigned long rate = clkp->rate; 387 388 if (likely(clkp->ops->set_parent)) 389 clkp->ops->set_parent(clkp, 390 clkp->parent); 391 if (likely(clkp->ops->set_rate)) 392 clkp->ops->set_rate(clkp, 393 rate, NO_CHANGE); 394 else if (likely(clkp->ops->recalc)) 395 clkp->rate = clkp->ops->recalc(clkp); 396 } 397 } 398 break; 399 case PM_EVENT_FREEZE: 400 break; 401 case PM_EVENT_SUSPEND: 402 break; 403 } 404 405 prev_state = state; 406 return 0; 407 } 408 409 static int clks_sysdev_resume(struct sys_device *dev) 410 { 411 return clks_sysdev_suspend(dev, PMSG_ON); 412 } 413 414 static struct sysdev_class clks_sysdev_class = { 415 .name = "clks", 416 }; 417 418 static struct sysdev_driver clks_sysdev_driver = { 419 .suspend = clks_sysdev_suspend, 420 .resume = clks_sysdev_resume, 421 }; 422 423 static struct sys_device clks_sysdev_dev = { 424 .cls = &clks_sysdev_class, 425 }; 426 427 static int __init clk_sysdev_init(void) 428 { 429 sysdev_class_register(&clks_sysdev_class); 430 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver); 431 sysdev_register(&clks_sysdev_dev); 432 433 return 0; 434 } 435 subsys_initcall(clk_sysdev_init); 436 #endif 437 438 int __init clk_init(void) 439 { 440 int i, ret = 0; 441 442 BUG_ON(!master_clk.rate); 443 444 for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) { 445 struct clk *clk = onchip_clocks[i]; 446 447 arch_init_clk_ops(&clk->ops, i); 448 ret |= clk_register(clk); 449 } 450 451 ret |= arch_clk_init(); 452 453 /* Kick the child clocks.. */ 454 propagate_rate(&master_clk); 455 propagate_rate(&bus_clk); 456 457 return ret; 458 } 459 460 static int __init clk_proc_init(void) 461 { 462 struct proc_dir_entry *p; 463 p = create_proc_read_entry("clocks", S_IRUSR, NULL, 464 show_clocks, NULL); 465 if (unlikely(!p)) 466 return -EINVAL; 467 468 return 0; 469 } 470 subsys_initcall(clk_proc_init); 471