1 /* 2 * arch/sh/kernel/cpu/clock.c - SuperH clock framework 3 * 4 * Copyright (C) 2005, 2006, 2007 Paul Mundt 5 * 6 * This clock framework is derived from the OMAP version by: 7 * 8 * Copyright (C) 2004 - 2005 Nokia Corporation 9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> 10 * 11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> 12 * 13 * This file is subject to the terms and conditions of the GNU General Public 14 * License. See the file "COPYING" in the main directory of this archive 15 * for more details. 16 */ 17 #include <linux/kernel.h> 18 #include <linux/init.h> 19 #include <linux/module.h> 20 #include <linux/mutex.h> 21 #include <linux/list.h> 22 #include <linux/kref.h> 23 #include <linux/kobject.h> 24 #include <linux/sysdev.h> 25 #include <linux/seq_file.h> 26 #include <linux/err.h> 27 #include <linux/platform_device.h> 28 #include <linux/proc_fs.h> 29 #include <asm/clock.h> 30 #include <asm/timer.h> 31 32 static LIST_HEAD(clock_list); 33 static DEFINE_SPINLOCK(clock_lock); 34 static DEFINE_MUTEX(clock_list_sem); 35 36 /* 37 * Each subtype is expected to define the init routines for these clocks, 38 * as each subtype (or processor family) will have these clocks at the 39 * very least. These are all provided through the CPG, which even some of 40 * the more quirky parts (such as ST40, SH4-202, etc.) still have. 41 * 42 * The processor-specific code is expected to register any additional 43 * clock sources that are of interest. 44 */ 45 static struct clk master_clk = { 46 .name = "master_clk", 47 .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, 48 .rate = CONFIG_SH_PCLK_FREQ, 49 }; 50 51 static struct clk module_clk = { 52 .name = "module_clk", 53 .parent = &master_clk, 54 .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, 55 }; 56 57 static struct clk bus_clk = { 58 .name = "bus_clk", 59 .parent = &master_clk, 60 .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, 61 }; 62 63 static struct clk cpu_clk = { 64 .name = "cpu_clk", 65 .parent = &master_clk, 66 .flags = CLK_ALWAYS_ENABLED, 67 }; 68 69 /* 70 * The ordering of these clocks matters, do not change it. 71 */ 72 static struct clk *onchip_clocks[] = { 73 &master_clk, 74 &module_clk, 75 &bus_clk, 76 &cpu_clk, 77 }; 78 79 static void propagate_rate(struct clk *clk) 80 { 81 struct clk *clkp; 82 83 list_for_each_entry(clkp, &clock_list, node) { 84 if (likely(clkp->parent != clk)) 85 continue; 86 if (likely(clkp->ops && clkp->ops->recalc)) 87 clkp->ops->recalc(clkp); 88 if (unlikely(clkp->flags & CLK_RATE_PROPAGATES)) 89 propagate_rate(clkp); 90 } 91 } 92 93 static int __clk_enable(struct clk *clk) 94 { 95 /* 96 * See if this is the first time we're enabling the clock, some 97 * clocks that are always enabled still require "special" 98 * initialization. This is especially true if the clock mode 99 * changes and the clock needs to hunt for the proper set of 100 * divisors to use before it can effectively recalc. 101 */ 102 if (unlikely(atomic_read(&clk->kref.refcount) == 1)) 103 if (clk->ops && clk->ops->init) 104 clk->ops->init(clk); 105 106 kref_get(&clk->kref); 107 108 if (clk->flags & CLK_ALWAYS_ENABLED) 109 return 0; 110 111 if (likely(clk->ops && clk->ops->enable)) 112 clk->ops->enable(clk); 113 114 return 0; 115 } 116 117 int clk_enable(struct clk *clk) 118 { 119 unsigned long flags; 120 int ret; 121 122 if (!clk) 123 return -EINVAL; 124 125 clk_enable(clk->parent); 126 127 spin_lock_irqsave(&clock_lock, flags); 128 ret = __clk_enable(clk); 129 spin_unlock_irqrestore(&clock_lock, flags); 130 131 return ret; 132 } 133 EXPORT_SYMBOL_GPL(clk_enable); 134 135 static void clk_kref_release(struct kref *kref) 136 { 137 /* Nothing to do */ 138 } 139 140 static void __clk_disable(struct clk *clk) 141 { 142 int count = kref_put(&clk->kref, clk_kref_release); 143 144 if (clk->flags & CLK_ALWAYS_ENABLED) 145 return; 146 147 if (!count) { /* count reaches zero, disable the clock */ 148 if (likely(clk->ops && clk->ops->disable)) 149 clk->ops->disable(clk); 150 } 151 } 152 153 void clk_disable(struct clk *clk) 154 { 155 unsigned long flags; 156 157 if (!clk) 158 return; 159 160 spin_lock_irqsave(&clock_lock, flags); 161 __clk_disable(clk); 162 spin_unlock_irqrestore(&clock_lock, flags); 163 164 clk_disable(clk->parent); 165 } 166 EXPORT_SYMBOL_GPL(clk_disable); 167 168 int clk_register(struct clk *clk) 169 { 170 mutex_lock(&clock_list_sem); 171 172 list_add(&clk->node, &clock_list); 173 kref_init(&clk->kref); 174 175 mutex_unlock(&clock_list_sem); 176 177 if (clk->flags & CLK_ALWAYS_ENABLED) { 178 pr_debug( "Clock '%s' is ALWAYS_ENABLED\n", clk->name); 179 if (clk->ops && clk->ops->init) 180 clk->ops->init(clk); 181 if (clk->ops && clk->ops->enable) 182 clk->ops->enable(clk); 183 pr_debug( "Enabled."); 184 } 185 186 return 0; 187 } 188 EXPORT_SYMBOL_GPL(clk_register); 189 190 void clk_unregister(struct clk *clk) 191 { 192 mutex_lock(&clock_list_sem); 193 list_del(&clk->node); 194 mutex_unlock(&clock_list_sem); 195 } 196 EXPORT_SYMBOL_GPL(clk_unregister); 197 198 unsigned long clk_get_rate(struct clk *clk) 199 { 200 return clk->rate; 201 } 202 EXPORT_SYMBOL_GPL(clk_get_rate); 203 204 int clk_set_rate(struct clk *clk, unsigned long rate) 205 { 206 return clk_set_rate_ex(clk, rate, 0); 207 } 208 EXPORT_SYMBOL_GPL(clk_set_rate); 209 210 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) 211 { 212 int ret = -EOPNOTSUPP; 213 214 if (likely(clk->ops && clk->ops->set_rate)) { 215 unsigned long flags; 216 217 spin_lock_irqsave(&clock_lock, flags); 218 ret = clk->ops->set_rate(clk, rate, algo_id); 219 spin_unlock_irqrestore(&clock_lock, flags); 220 } 221 222 if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) 223 propagate_rate(clk); 224 225 return ret; 226 } 227 EXPORT_SYMBOL_GPL(clk_set_rate_ex); 228 229 void clk_recalc_rate(struct clk *clk) 230 { 231 if (likely(clk->ops && clk->ops->recalc)) { 232 unsigned long flags; 233 234 spin_lock_irqsave(&clock_lock, flags); 235 clk->ops->recalc(clk); 236 spin_unlock_irqrestore(&clock_lock, flags); 237 } 238 239 if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) 240 propagate_rate(clk); 241 } 242 EXPORT_SYMBOL_GPL(clk_recalc_rate); 243 244 int clk_set_parent(struct clk *clk, struct clk *parent) 245 { 246 int ret = -EINVAL; 247 struct clk *old; 248 249 if (!parent || !clk) 250 return ret; 251 252 old = clk->parent; 253 if (likely(clk->ops && clk->ops->set_parent)) { 254 unsigned long flags; 255 spin_lock_irqsave(&clock_lock, flags); 256 ret = clk->ops->set_parent(clk, parent); 257 spin_unlock_irqrestore(&clock_lock, flags); 258 clk->parent = (ret ? old : parent); 259 } 260 261 if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) 262 propagate_rate(clk); 263 return ret; 264 } 265 EXPORT_SYMBOL_GPL(clk_set_parent); 266 267 struct clk *clk_get_parent(struct clk *clk) 268 { 269 return clk->parent; 270 } 271 EXPORT_SYMBOL_GPL(clk_get_parent); 272 273 long clk_round_rate(struct clk *clk, unsigned long rate) 274 { 275 if (likely(clk->ops && clk->ops->round_rate)) { 276 unsigned long flags, rounded; 277 278 spin_lock_irqsave(&clock_lock, flags); 279 rounded = clk->ops->round_rate(clk, rate); 280 spin_unlock_irqrestore(&clock_lock, flags); 281 282 return rounded; 283 } 284 285 return clk_get_rate(clk); 286 } 287 EXPORT_SYMBOL_GPL(clk_round_rate); 288 289 /* 290 * Returns a clock. Note that we first try to use device id on the bus 291 * and clock name. If this fails, we try to use clock name only. 292 */ 293 struct clk *clk_get(struct device *dev, const char *id) 294 { 295 struct clk *p, *clk = ERR_PTR(-ENOENT); 296 int idno; 297 298 if (dev == NULL || dev->bus != &platform_bus_type) 299 idno = -1; 300 else 301 idno = to_platform_device(dev)->id; 302 303 mutex_lock(&clock_list_sem); 304 list_for_each_entry(p, &clock_list, node) { 305 if (p->id == idno && 306 strcmp(id, p->name) == 0 && try_module_get(p->owner)) { 307 clk = p; 308 goto found; 309 } 310 } 311 312 list_for_each_entry(p, &clock_list, node) { 313 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) { 314 clk = p; 315 break; 316 } 317 } 318 319 found: 320 mutex_unlock(&clock_list_sem); 321 322 return clk; 323 } 324 EXPORT_SYMBOL_GPL(clk_get); 325 326 void clk_put(struct clk *clk) 327 { 328 if (clk && !IS_ERR(clk)) 329 module_put(clk->owner); 330 } 331 EXPORT_SYMBOL_GPL(clk_put); 332 333 void __init __attribute__ ((weak)) 334 arch_init_clk_ops(struct clk_ops **ops, int type) 335 { 336 } 337 338 int __init __attribute__ ((weak)) 339 arch_clk_init(void) 340 { 341 return 0; 342 } 343 344 static int show_clocks(char *buf, char **start, off_t off, 345 int len, int *eof, void *data) 346 { 347 struct clk *clk; 348 char *p = buf; 349 350 list_for_each_entry_reverse(clk, &clock_list, node) { 351 unsigned long rate = clk_get_rate(clk); 352 353 p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name, 354 rate / 1000000, (rate % 1000000) / 10000, 355 ((clk->flags & CLK_ALWAYS_ENABLED) || 356 (atomic_read(&clk->kref.refcount) != 1)) ? 357 "enabled" : "disabled"); 358 } 359 360 return p - buf; 361 } 362 363 #ifdef CONFIG_PM 364 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) 365 { 366 static pm_message_t prev_state; 367 struct clk *clkp; 368 369 switch (state.event) { 370 case PM_EVENT_ON: 371 /* Resumeing from hibernation */ 372 if (prev_state.event == PM_EVENT_FREEZE) { 373 list_for_each_entry(clkp, &clock_list, node) 374 if (likely(clkp->ops)) { 375 unsigned long rate = clkp->rate; 376 377 if (likely(clkp->ops->set_parent)) 378 clkp->ops->set_parent(clkp, 379 clkp->parent); 380 if (likely(clkp->ops->set_rate)) 381 clkp->ops->set_rate(clkp, 382 rate, NO_CHANGE); 383 else if (likely(clkp->ops->recalc)) 384 clkp->ops->recalc(clkp); 385 } 386 } 387 break; 388 case PM_EVENT_FREEZE: 389 break; 390 case PM_EVENT_SUSPEND: 391 break; 392 } 393 394 prev_state = state; 395 return 0; 396 } 397 398 static int clks_sysdev_resume(struct sys_device *dev) 399 { 400 return clks_sysdev_suspend(dev, PMSG_ON); 401 } 402 403 static struct sysdev_class clks_sysdev_class = { 404 .name = "clks", 405 }; 406 407 static struct sysdev_driver clks_sysdev_driver = { 408 .suspend = clks_sysdev_suspend, 409 .resume = clks_sysdev_resume, 410 }; 411 412 static struct sys_device clks_sysdev_dev = { 413 .cls = &clks_sysdev_class, 414 }; 415 416 static int __init clk_sysdev_init(void) 417 { 418 sysdev_class_register(&clks_sysdev_class); 419 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver); 420 sysdev_register(&clks_sysdev_dev); 421 422 return 0; 423 } 424 subsys_initcall(clk_sysdev_init); 425 #endif 426 427 int __init clk_init(void) 428 { 429 int i, ret = 0; 430 431 BUG_ON(!master_clk.rate); 432 433 for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) { 434 struct clk *clk = onchip_clocks[i]; 435 436 arch_init_clk_ops(&clk->ops, i); 437 ret |= clk_register(clk); 438 } 439 440 ret |= arch_clk_init(); 441 442 /* Kick the child clocks.. */ 443 propagate_rate(&master_clk); 444 propagate_rate(&bus_clk); 445 446 return ret; 447 } 448 449 static int __init clk_proc_init(void) 450 { 451 struct proc_dir_entry *p; 452 p = create_proc_read_entry("clocks", S_IRUSR, NULL, 453 show_clocks, NULL); 454 if (unlikely(!p)) 455 return -EINVAL; 456 457 return 0; 458 } 459 subsys_initcall(clk_proc_init); 460