1 /* 2 * arch/sh/kernel/cpu/clock.c - SuperH clock framework 3 * 4 * Copyright (C) 2005, 2006, 2007 Paul Mundt 5 * 6 * This clock framework is derived from the OMAP version by: 7 * 8 * Copyright (C) 2004 - 2005 Nokia Corporation 9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> 10 * 11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> 12 * 13 * This file is subject to the terms and conditions of the GNU General Public 14 * License. See the file "COPYING" in the main directory of this archive 15 * for more details. 16 */ 17 #include <linux/kernel.h> 18 #include <linux/init.h> 19 #include <linux/module.h> 20 #include <linux/mutex.h> 21 #include <linux/list.h> 22 #include <linux/kobject.h> 23 #include <linux/sysdev.h> 24 #include <linux/seq_file.h> 25 #include <linux/err.h> 26 #include <linux/platform_device.h> 27 #include <linux/proc_fs.h> 28 #include <asm/clock.h> 29 30 static LIST_HEAD(clock_list); 31 static DEFINE_SPINLOCK(clock_lock); 32 static DEFINE_MUTEX(clock_list_sem); 33 34 /* 35 * Each subtype is expected to define the init routines for these clocks, 36 * as each subtype (or processor family) will have these clocks at the 37 * very least. These are all provided through the CPG, which even some of 38 * the more quirky parts (such as ST40, SH4-202, etc.) still have. 39 * 40 * The processor-specific code is expected to register any additional 41 * clock sources that are of interest. 42 */ 43 static struct clk master_clk = { 44 .name = "master_clk", 45 .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, 46 .rate = CONFIG_SH_PCLK_FREQ, 47 }; 48 49 static struct clk module_clk = { 50 .name = "module_clk", 51 .parent = &master_clk, 52 .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, 53 }; 54 55 static struct clk bus_clk = { 56 .name = "bus_clk", 57 .parent = &master_clk, 58 .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, 59 }; 60 61 static struct clk cpu_clk = { 62 .name = "cpu_clk", 63 .parent = &master_clk, 64 .flags = CLK_ALWAYS_ENABLED, 65 }; 66 67 /* 68 * The ordering of these clocks matters, do not change it. 69 */ 70 static struct clk *onchip_clocks[] = { 71 &master_clk, 72 &module_clk, 73 &bus_clk, 74 &cpu_clk, 75 }; 76 77 static void propagate_rate(struct clk *clk) 78 { 79 struct clk *clkp; 80 81 list_for_each_entry(clkp, &clock_list, node) { 82 if (likely(clkp->parent != clk)) 83 continue; 84 if (likely(clkp->ops && clkp->ops->recalc)) 85 clkp->ops->recalc(clkp); 86 if (unlikely(clkp->flags & CLK_RATE_PROPAGATES)) 87 propagate_rate(clkp); 88 } 89 } 90 91 static void __clk_init(struct clk *clk) 92 { 93 /* 94 * See if this is the first time we're enabling the clock, some 95 * clocks that are always enabled still require "special" 96 * initialization. This is especially true if the clock mode 97 * changes and the clock needs to hunt for the proper set of 98 * divisors to use before it can effectively recalc. 99 */ 100 101 if (clk->flags & CLK_NEEDS_INIT) { 102 if (clk->ops && clk->ops->init) 103 clk->ops->init(clk); 104 105 clk->flags &= ~CLK_NEEDS_INIT; 106 } 107 } 108 109 static int __clk_enable(struct clk *clk) 110 { 111 if (!clk) 112 return -EINVAL; 113 114 clk->usecount++; 115 116 /* nothing to do if always enabled */ 117 if (clk->flags & CLK_ALWAYS_ENABLED) 118 return 0; 119 120 if (clk->usecount == 1) { 121 __clk_init(clk); 122 123 __clk_enable(clk->parent); 124 125 if (clk->ops && clk->ops->enable) 126 clk->ops->enable(clk); 127 } 128 129 return 0; 130 } 131 132 int clk_enable(struct clk *clk) 133 { 134 unsigned long flags; 135 int ret; 136 137 spin_lock_irqsave(&clock_lock, flags); 138 ret = __clk_enable(clk); 139 spin_unlock_irqrestore(&clock_lock, flags); 140 141 return ret; 142 } 143 EXPORT_SYMBOL_GPL(clk_enable); 144 145 static void __clk_disable(struct clk *clk) 146 { 147 if (!clk) 148 return; 149 150 clk->usecount--; 151 152 WARN_ON(clk->usecount < 0); 153 154 if (clk->flags & CLK_ALWAYS_ENABLED) 155 return; 156 157 if (clk->usecount == 0) { 158 if (likely(clk->ops && clk->ops->disable)) 159 clk->ops->disable(clk); 160 161 __clk_disable(clk->parent); 162 } 163 } 164 165 void clk_disable(struct clk *clk) 166 { 167 unsigned long flags; 168 169 spin_lock_irqsave(&clock_lock, flags); 170 __clk_disable(clk); 171 spin_unlock_irqrestore(&clock_lock, flags); 172 } 173 EXPORT_SYMBOL_GPL(clk_disable); 174 175 int clk_register(struct clk *clk) 176 { 177 mutex_lock(&clock_list_sem); 178 179 list_add(&clk->node, &clock_list); 180 clk->usecount = 0; 181 clk->flags |= CLK_NEEDS_INIT; 182 183 mutex_unlock(&clock_list_sem); 184 185 if (clk->flags & CLK_ALWAYS_ENABLED) { 186 __clk_init(clk); 187 pr_debug( "Clock '%s' is ALWAYS_ENABLED\n", clk->name); 188 if (clk->ops && clk->ops->enable) 189 clk->ops->enable(clk); 190 pr_debug( "Enabled."); 191 } 192 193 return 0; 194 } 195 EXPORT_SYMBOL_GPL(clk_register); 196 197 void clk_unregister(struct clk *clk) 198 { 199 mutex_lock(&clock_list_sem); 200 list_del(&clk->node); 201 mutex_unlock(&clock_list_sem); 202 } 203 EXPORT_SYMBOL_GPL(clk_unregister); 204 205 unsigned long clk_get_rate(struct clk *clk) 206 { 207 return clk->rate; 208 } 209 EXPORT_SYMBOL_GPL(clk_get_rate); 210 211 int clk_set_rate(struct clk *clk, unsigned long rate) 212 { 213 return clk_set_rate_ex(clk, rate, 0); 214 } 215 EXPORT_SYMBOL_GPL(clk_set_rate); 216 217 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) 218 { 219 int ret = -EOPNOTSUPP; 220 221 if (likely(clk->ops && clk->ops->set_rate)) { 222 unsigned long flags; 223 224 spin_lock_irqsave(&clock_lock, flags); 225 ret = clk->ops->set_rate(clk, rate, algo_id); 226 spin_unlock_irqrestore(&clock_lock, flags); 227 } 228 229 if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) 230 propagate_rate(clk); 231 232 return ret; 233 } 234 EXPORT_SYMBOL_GPL(clk_set_rate_ex); 235 236 void clk_recalc_rate(struct clk *clk) 237 { 238 if (likely(clk->ops && clk->ops->recalc)) { 239 unsigned long flags; 240 241 spin_lock_irqsave(&clock_lock, flags); 242 clk->ops->recalc(clk); 243 spin_unlock_irqrestore(&clock_lock, flags); 244 } 245 246 if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) 247 propagate_rate(clk); 248 } 249 EXPORT_SYMBOL_GPL(clk_recalc_rate); 250 251 int clk_set_parent(struct clk *clk, struct clk *parent) 252 { 253 int ret = -EINVAL; 254 struct clk *old; 255 256 if (!parent || !clk) 257 return ret; 258 259 old = clk->parent; 260 if (likely(clk->ops && clk->ops->set_parent)) { 261 unsigned long flags; 262 spin_lock_irqsave(&clock_lock, flags); 263 ret = clk->ops->set_parent(clk, parent); 264 spin_unlock_irqrestore(&clock_lock, flags); 265 clk->parent = (ret ? old : parent); 266 } 267 268 if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) 269 propagate_rate(clk); 270 return ret; 271 } 272 EXPORT_SYMBOL_GPL(clk_set_parent); 273 274 struct clk *clk_get_parent(struct clk *clk) 275 { 276 return clk->parent; 277 } 278 EXPORT_SYMBOL_GPL(clk_get_parent); 279 280 long clk_round_rate(struct clk *clk, unsigned long rate) 281 { 282 if (likely(clk->ops && clk->ops->round_rate)) { 283 unsigned long flags, rounded; 284 285 spin_lock_irqsave(&clock_lock, flags); 286 rounded = clk->ops->round_rate(clk, rate); 287 spin_unlock_irqrestore(&clock_lock, flags); 288 289 return rounded; 290 } 291 292 return clk_get_rate(clk); 293 } 294 EXPORT_SYMBOL_GPL(clk_round_rate); 295 296 /* 297 * Returns a clock. Note that we first try to use device id on the bus 298 * and clock name. If this fails, we try to use clock name only. 299 */ 300 struct clk *clk_get(struct device *dev, const char *id) 301 { 302 struct clk *p, *clk = ERR_PTR(-ENOENT); 303 int idno; 304 305 if (dev == NULL || dev->bus != &platform_bus_type) 306 idno = -1; 307 else 308 idno = to_platform_device(dev)->id; 309 310 mutex_lock(&clock_list_sem); 311 list_for_each_entry(p, &clock_list, node) { 312 if (p->id == idno && 313 strcmp(id, p->name) == 0 && try_module_get(p->owner)) { 314 clk = p; 315 goto found; 316 } 317 } 318 319 list_for_each_entry(p, &clock_list, node) { 320 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) { 321 clk = p; 322 break; 323 } 324 } 325 326 found: 327 mutex_unlock(&clock_list_sem); 328 329 return clk; 330 } 331 EXPORT_SYMBOL_GPL(clk_get); 332 333 void clk_put(struct clk *clk) 334 { 335 if (clk && !IS_ERR(clk)) 336 module_put(clk->owner); 337 } 338 EXPORT_SYMBOL_GPL(clk_put); 339 340 void __init __attribute__ ((weak)) 341 arch_init_clk_ops(struct clk_ops **ops, int type) 342 { 343 } 344 345 int __init __attribute__ ((weak)) 346 arch_clk_init(void) 347 { 348 return 0; 349 } 350 351 static int show_clocks(char *buf, char **start, off_t off, 352 int len, int *eof, void *data) 353 { 354 struct clk *clk; 355 char *p = buf; 356 357 list_for_each_entry_reverse(clk, &clock_list, node) { 358 unsigned long rate = clk_get_rate(clk); 359 360 p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name, 361 rate / 1000000, (rate % 1000000) / 10000, 362 ((clk->flags & CLK_ALWAYS_ENABLED) || 363 clk->usecount > 0) ? 364 "enabled" : "disabled"); 365 } 366 367 return p - buf; 368 } 369 370 #ifdef CONFIG_PM 371 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) 372 { 373 static pm_message_t prev_state; 374 struct clk *clkp; 375 376 switch (state.event) { 377 case PM_EVENT_ON: 378 /* Resumeing from hibernation */ 379 if (prev_state.event == PM_EVENT_FREEZE) { 380 list_for_each_entry(clkp, &clock_list, node) 381 if (likely(clkp->ops)) { 382 unsigned long rate = clkp->rate; 383 384 if (likely(clkp->ops->set_parent)) 385 clkp->ops->set_parent(clkp, 386 clkp->parent); 387 if (likely(clkp->ops->set_rate)) 388 clkp->ops->set_rate(clkp, 389 rate, NO_CHANGE); 390 else if (likely(clkp->ops->recalc)) 391 clkp->ops->recalc(clkp); 392 } 393 } 394 break; 395 case PM_EVENT_FREEZE: 396 break; 397 case PM_EVENT_SUSPEND: 398 break; 399 } 400 401 prev_state = state; 402 return 0; 403 } 404 405 static int clks_sysdev_resume(struct sys_device *dev) 406 { 407 return clks_sysdev_suspend(dev, PMSG_ON); 408 } 409 410 static struct sysdev_class clks_sysdev_class = { 411 .name = "clks", 412 }; 413 414 static struct sysdev_driver clks_sysdev_driver = { 415 .suspend = clks_sysdev_suspend, 416 .resume = clks_sysdev_resume, 417 }; 418 419 static struct sys_device clks_sysdev_dev = { 420 .cls = &clks_sysdev_class, 421 }; 422 423 static int __init clk_sysdev_init(void) 424 { 425 sysdev_class_register(&clks_sysdev_class); 426 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver); 427 sysdev_register(&clks_sysdev_dev); 428 429 return 0; 430 } 431 subsys_initcall(clk_sysdev_init); 432 #endif 433 434 int __init clk_init(void) 435 { 436 int i, ret = 0; 437 438 BUG_ON(!master_clk.rate); 439 440 for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) { 441 struct clk *clk = onchip_clocks[i]; 442 443 arch_init_clk_ops(&clk->ops, i); 444 ret |= clk_register(clk); 445 } 446 447 ret |= arch_clk_init(); 448 449 /* Kick the child clocks.. */ 450 propagate_rate(&master_clk); 451 propagate_rate(&bus_clk); 452 453 return ret; 454 } 455 456 static int __init clk_proc_init(void) 457 { 458 struct proc_dir_entry *p; 459 p = create_proc_read_entry("clocks", S_IRUSR, NULL, 460 show_clocks, NULL); 461 if (unlikely(!p)) 462 return -EINVAL; 463 464 return 0; 465 } 466 subsys_initcall(clk_proc_init); 467