1 /* 2 * arch/sh/kernel/cpu/clock.c - SuperH clock framework 3 * 4 * Copyright (C) 2005 - 2009 Paul Mundt 5 * 6 * This clock framework is derived from the OMAP version by: 7 * 8 * Copyright (C) 2004 - 2008 Nokia Corporation 9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> 10 * 11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> 12 * 13 * This file is subject to the terms and conditions of the GNU General Public 14 * License. See the file "COPYING" in the main directory of this archive 15 * for more details. 16 */ 17 #include <linux/kernel.h> 18 #include <linux/init.h> 19 #include <linux/module.h> 20 #include <linux/mutex.h> 21 #include <linux/list.h> 22 #include <linux/kobject.h> 23 #include <linux/sysdev.h> 24 #include <linux/seq_file.h> 25 #include <linux/err.h> 26 #include <linux/platform_device.h> 27 #include <linux/proc_fs.h> 28 #include <asm/clock.h> 29 #include <asm/timer.h> 30 31 static LIST_HEAD(clock_list); 32 static DEFINE_SPINLOCK(clock_lock); 33 static DEFINE_MUTEX(clock_list_sem); 34 35 /* 36 * Each subtype is expected to define the init routines for these clocks, 37 * as each subtype (or processor family) will have these clocks at the 38 * very least. These are all provided through the CPG, which even some of 39 * the more quirky parts (such as ST40, SH4-202, etc.) still have. 40 * 41 * The processor-specific code is expected to register any additional 42 * clock sources that are of interest. 43 */ 44 static struct clk master_clk = { 45 .name = "master_clk", 46 .flags = CLK_ENABLE_ON_INIT, 47 .rate = CONFIG_SH_PCLK_FREQ, 48 }; 49 50 static struct clk module_clk = { 51 .name = "module_clk", 52 .parent = &master_clk, 53 .flags = CLK_ENABLE_ON_INIT, 54 }; 55 56 static struct clk bus_clk = { 57 .name = "bus_clk", 58 .parent = &master_clk, 59 .flags = CLK_ENABLE_ON_INIT, 60 }; 61 62 static struct clk cpu_clk = { 63 .name = "cpu_clk", 64 .parent = &master_clk, 65 .flags = CLK_ENABLE_ON_INIT, 66 }; 67 68 /* 69 * The ordering of these clocks matters, do not change it. 70 */ 71 static struct clk *onchip_clocks[] = { 72 &master_clk, 73 &module_clk, 74 &bus_clk, 75 &cpu_clk, 76 }; 77 78 /* Used for clocks that always have same value as the parent clock */ 79 unsigned long followparent_recalc(struct clk *clk) 80 { 81 return clk->parent->rate; 82 } 83 84 /* Propagate rate to children */ 85 void propagate_rate(struct clk *tclk) 86 { 87 struct clk *clkp; 88 89 list_for_each_entry(clkp, &tclk->children, sibling) { 90 if (clkp->ops->recalc) 91 clkp->rate = clkp->ops->recalc(clkp); 92 propagate_rate(clkp); 93 } 94 } 95 96 static int __clk_enable(struct clk *clk) 97 { 98 if (clk->usecount++ == 0) { 99 if (clk->parent) 100 __clk_enable(clk->parent); 101 102 if (clk->ops && clk->ops->enable) 103 clk->ops->enable(clk); 104 } 105 106 return 0; 107 } 108 109 int clk_enable(struct clk *clk) 110 { 111 unsigned long flags; 112 int ret; 113 114 if (!clk) 115 return -EINVAL; 116 117 spin_lock_irqsave(&clock_lock, flags); 118 ret = __clk_enable(clk); 119 spin_unlock_irqrestore(&clock_lock, flags); 120 121 return ret; 122 } 123 EXPORT_SYMBOL_GPL(clk_enable); 124 125 static void __clk_disable(struct clk *clk) 126 { 127 if (clk->usecount > 0 && !(--clk->usecount)) { 128 if (likely(clk->ops && clk->ops->disable)) 129 clk->ops->disable(clk); 130 if (likely(clk->parent)) 131 __clk_disable(clk->parent); 132 } 133 } 134 135 void clk_disable(struct clk *clk) 136 { 137 unsigned long flags; 138 139 if (!clk) 140 return; 141 142 spin_lock_irqsave(&clock_lock, flags); 143 __clk_disable(clk); 144 spin_unlock_irqrestore(&clock_lock, flags); 145 } 146 EXPORT_SYMBOL_GPL(clk_disable); 147 148 static LIST_HEAD(root_clks); 149 150 /** 151 * recalculate_root_clocks - recalculate and propagate all root clocks 152 * 153 * Recalculates all root clocks (clocks with no parent), which if the 154 * clock's .recalc is set correctly, should also propagate their rates. 155 * Called at init. 156 */ 157 void recalculate_root_clocks(void) 158 { 159 struct clk *clkp; 160 161 list_for_each_entry(clkp, &root_clks, sibling) { 162 if (clkp->ops->recalc) 163 clkp->rate = clkp->ops->recalc(clkp); 164 propagate_rate(clkp); 165 } 166 } 167 168 int clk_register(struct clk *clk) 169 { 170 if (clk == NULL || IS_ERR(clk)) 171 return -EINVAL; 172 173 /* 174 * trap out already registered clocks 175 */ 176 if (clk->node.next || clk->node.prev) 177 return 0; 178 179 mutex_lock(&clock_list_sem); 180 181 INIT_LIST_HEAD(&clk->children); 182 clk->usecount = 0; 183 184 if (clk->parent) 185 list_add(&clk->sibling, &clk->parent->children); 186 else 187 list_add(&clk->sibling, &root_clks); 188 189 list_add(&clk->node, &clock_list); 190 if (clk->ops->init) 191 clk->ops->init(clk); 192 mutex_unlock(&clock_list_sem); 193 194 return 0; 195 } 196 EXPORT_SYMBOL_GPL(clk_register); 197 198 void clk_unregister(struct clk *clk) 199 { 200 mutex_lock(&clock_list_sem); 201 list_del(&clk->sibling); 202 list_del(&clk->node); 203 mutex_unlock(&clock_list_sem); 204 } 205 EXPORT_SYMBOL_GPL(clk_unregister); 206 207 static void clk_enable_init_clocks(void) 208 { 209 struct clk *clkp; 210 211 list_for_each_entry(clkp, &clock_list, node) 212 if (clkp->flags & CLK_ENABLE_ON_INIT) 213 clk_enable(clkp); 214 } 215 216 unsigned long clk_get_rate(struct clk *clk) 217 { 218 return clk->rate; 219 } 220 EXPORT_SYMBOL_GPL(clk_get_rate); 221 222 int clk_set_rate(struct clk *clk, unsigned long rate) 223 { 224 return clk_set_rate_ex(clk, rate, 0); 225 } 226 EXPORT_SYMBOL_GPL(clk_set_rate); 227 228 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) 229 { 230 int ret = -EOPNOTSUPP; 231 232 if (likely(clk->ops && clk->ops->set_rate)) { 233 unsigned long flags; 234 235 spin_lock_irqsave(&clock_lock, flags); 236 ret = clk->ops->set_rate(clk, rate, algo_id); 237 if (ret == 0) { 238 if (clk->ops->recalc) 239 clk->rate = clk->ops->recalc(clk); 240 propagate_rate(clk); 241 } 242 spin_unlock_irqrestore(&clock_lock, flags); 243 } 244 245 return ret; 246 } 247 EXPORT_SYMBOL_GPL(clk_set_rate_ex); 248 249 void clk_recalc_rate(struct clk *clk) 250 { 251 unsigned long flags; 252 253 if (!clk->ops->recalc) 254 return; 255 256 spin_lock_irqsave(&clock_lock, flags); 257 clk->rate = clk->ops->recalc(clk); 258 propagate_rate(clk); 259 spin_unlock_irqrestore(&clock_lock, flags); 260 } 261 EXPORT_SYMBOL_GPL(clk_recalc_rate); 262 263 int clk_set_parent(struct clk *clk, struct clk *parent) 264 { 265 unsigned long flags; 266 int ret = -EINVAL; 267 268 if (!parent || !clk) 269 return ret; 270 271 spin_lock_irqsave(&clock_lock, flags); 272 if (clk->usecount == 0) { 273 if (clk->ops->set_parent) 274 ret = clk->ops->set_parent(clk, parent); 275 if (ret == 0) { 276 if (clk->ops->recalc) 277 clk->rate = clk->ops->recalc(clk); 278 propagate_rate(clk); 279 } 280 } else 281 ret = -EBUSY; 282 spin_unlock_irqrestore(&clock_lock, flags); 283 284 return ret; 285 } 286 EXPORT_SYMBOL_GPL(clk_set_parent); 287 288 struct clk *clk_get_parent(struct clk *clk) 289 { 290 return clk->parent; 291 } 292 EXPORT_SYMBOL_GPL(clk_get_parent); 293 294 long clk_round_rate(struct clk *clk, unsigned long rate) 295 { 296 if (likely(clk->ops && clk->ops->round_rate)) { 297 unsigned long flags, rounded; 298 299 spin_lock_irqsave(&clock_lock, flags); 300 rounded = clk->ops->round_rate(clk, rate); 301 spin_unlock_irqrestore(&clock_lock, flags); 302 303 return rounded; 304 } 305 306 return clk_get_rate(clk); 307 } 308 EXPORT_SYMBOL_GPL(clk_round_rate); 309 310 /* 311 * Returns a clock. Note that we first try to use device id on the bus 312 * and clock name. If this fails, we try to use clock name only. 313 */ 314 struct clk *clk_get(struct device *dev, const char *id) 315 { 316 struct clk *p, *clk = ERR_PTR(-ENOENT); 317 int idno; 318 319 if (dev == NULL || dev->bus != &platform_bus_type) 320 idno = -1; 321 else 322 idno = to_platform_device(dev)->id; 323 324 mutex_lock(&clock_list_sem); 325 list_for_each_entry(p, &clock_list, node) { 326 if (p->id == idno && 327 strcmp(id, p->name) == 0 && try_module_get(p->owner)) { 328 clk = p; 329 goto found; 330 } 331 } 332 333 list_for_each_entry(p, &clock_list, node) { 334 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) { 335 clk = p; 336 break; 337 } 338 } 339 340 found: 341 mutex_unlock(&clock_list_sem); 342 343 return clk; 344 } 345 EXPORT_SYMBOL_GPL(clk_get); 346 347 void clk_put(struct clk *clk) 348 { 349 if (clk && !IS_ERR(clk)) 350 module_put(clk->owner); 351 } 352 EXPORT_SYMBOL_GPL(clk_put); 353 354 void __init __attribute__ ((weak)) 355 arch_init_clk_ops(struct clk_ops **ops, int type) 356 { 357 } 358 359 int __init __attribute__ ((weak)) 360 arch_clk_init(void) 361 { 362 return 0; 363 } 364 365 static int show_clocks(char *buf, char **start, off_t off, 366 int len, int *eof, void *data) 367 { 368 struct clk *clk; 369 char *p = buf; 370 371 list_for_each_entry_reverse(clk, &clock_list, node) { 372 unsigned long rate = clk_get_rate(clk); 373 374 p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name, 375 rate / 1000000, (rate % 1000000) / 10000, 376 (clk->usecount > 0) ? "enabled" : "disabled"); 377 } 378 379 return p - buf; 380 } 381 382 #ifdef CONFIG_PM 383 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) 384 { 385 static pm_message_t prev_state; 386 struct clk *clkp; 387 388 switch (state.event) { 389 case PM_EVENT_ON: 390 /* Resumeing from hibernation */ 391 if (prev_state.event != PM_EVENT_FREEZE) 392 break; 393 394 list_for_each_entry(clkp, &clock_list, node) { 395 if (likely(clkp->ops)) { 396 unsigned long rate = clkp->rate; 397 398 if (likely(clkp->ops->set_parent)) 399 clkp->ops->set_parent(clkp, 400 clkp->parent); 401 if (likely(clkp->ops->set_rate)) 402 clkp->ops->set_rate(clkp, 403 rate, NO_CHANGE); 404 else if (likely(clkp->ops->recalc)) 405 clkp->rate = clkp->ops->recalc(clkp); 406 } 407 } 408 break; 409 case PM_EVENT_FREEZE: 410 break; 411 case PM_EVENT_SUSPEND: 412 break; 413 } 414 415 prev_state = state; 416 return 0; 417 } 418 419 static int clks_sysdev_resume(struct sys_device *dev) 420 { 421 return clks_sysdev_suspend(dev, PMSG_ON); 422 } 423 424 static struct sysdev_class clks_sysdev_class = { 425 .name = "clks", 426 }; 427 428 static struct sysdev_driver clks_sysdev_driver = { 429 .suspend = clks_sysdev_suspend, 430 .resume = clks_sysdev_resume, 431 }; 432 433 static struct sys_device clks_sysdev_dev = { 434 .cls = &clks_sysdev_class, 435 }; 436 437 static int __init clk_sysdev_init(void) 438 { 439 sysdev_class_register(&clks_sysdev_class); 440 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver); 441 sysdev_register(&clks_sysdev_dev); 442 443 return 0; 444 } 445 subsys_initcall(clk_sysdev_init); 446 #endif 447 448 int __init clk_init(void) 449 { 450 int i, ret = 0; 451 452 BUG_ON(!master_clk.rate); 453 454 for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) { 455 struct clk *clk = onchip_clocks[i]; 456 457 arch_init_clk_ops(&clk->ops, i); 458 ret |= clk_register(clk); 459 } 460 461 ret |= arch_clk_init(); 462 463 /* Kick the child clocks.. */ 464 recalculate_root_clocks(); 465 466 /* Enable the necessary init clocks */ 467 clk_enable_init_clocks(); 468 469 return ret; 470 } 471 472 static int __init clk_proc_init(void) 473 { 474 struct proc_dir_entry *p; 475 p = create_proc_read_entry("clocks", S_IRUSR, NULL, 476 show_clocks, NULL); 477 if (unlikely(!p)) 478 return -EINVAL; 479 480 return 0; 481 } 482 subsys_initcall(clk_proc_init); 483