1 /* 2 * SuperH clock framework 3 * 4 * Copyright (C) 2005 - 2010 Paul Mundt 5 * 6 * This clock framework is derived from the OMAP version by: 7 * 8 * Copyright (C) 2004 - 2008 Nokia Corporation 9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> 10 * 11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> 12 * 13 * This file is subject to the terms and conditions of the GNU General Public 14 * License. See the file "COPYING" in the main directory of this archive 15 * for more details. 16 */ 17 #define pr_fmt(fmt) "clock: " fmt 18 19 #include <linux/kernel.h> 20 #include <linux/init.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/list.h> 24 #include <linux/kobject.h> 25 #include <linux/sysdev.h> 26 #include <linux/seq_file.h> 27 #include <linux/err.h> 28 #include <linux/io.h> 29 #include <linux/debugfs.h> 30 #include <linux/cpufreq.h> 31 #include <linux/clk.h> 32 #include <linux/sh_clk.h> 33 34 static LIST_HEAD(clock_list); 35 static DEFINE_SPINLOCK(clock_lock); 36 static DEFINE_MUTEX(clock_list_sem); 37 38 void clk_rate_table_build(struct clk *clk, 39 struct cpufreq_frequency_table *freq_table, 40 int nr_freqs, 41 struct clk_div_mult_table *src_table, 42 unsigned long *bitmap) 43 { 44 unsigned long mult, div; 45 unsigned long freq; 46 int i; 47 48 clk->nr_freqs = nr_freqs; 49 50 for (i = 0; i < nr_freqs; i++) { 51 div = 1; 52 mult = 1; 53 54 if (src_table->divisors && i < src_table->nr_divisors) 55 div = src_table->divisors[i]; 56 57 if (src_table->multipliers && i < src_table->nr_multipliers) 58 mult = src_table->multipliers[i]; 59 60 if (!div || !mult || (bitmap && !test_bit(i, bitmap))) 61 freq = CPUFREQ_ENTRY_INVALID; 62 else 63 freq = clk->parent->rate * mult / div; 64 65 freq_table[i].index = i; 66 freq_table[i].frequency = freq; 67 } 68 69 /* Termination entry */ 70 freq_table[i].index = i; 71 freq_table[i].frequency = CPUFREQ_TABLE_END; 72 } 73 74 struct clk_rate_round_data; 75 76 struct clk_rate_round_data { 77 unsigned long rate; 78 unsigned int min, max; 79 long (*func)(unsigned int, struct clk_rate_round_data *); 80 void *arg; 81 }; 82 83 #define for_each_frequency(pos, r, freq) \ 84 for (pos = r->min, freq = r->func(pos, r); \ 85 pos <= r->max; pos++, freq = r->func(pos, r)) \ 86 if (unlikely(freq == 0)) \ 87 ; \ 88 else 89 90 static long clk_rate_round_helper(struct clk_rate_round_data *rounder) 91 { 92 unsigned long rate_error, rate_error_prev = ~0UL; 93 unsigned long rate_best_fit = rounder->rate; 94 unsigned long highest, lowest, freq; 95 int i; 96 97 highest = 0; 98 lowest = ~0UL; 99 100 for_each_frequency(i, rounder, freq) { 101 if (freq > highest) 102 highest = freq; 103 if (freq < lowest) 104 lowest = freq; 105 106 rate_error = abs(freq - rounder->rate); 107 if (rate_error < rate_error_prev) { 108 rate_best_fit = freq; 109 rate_error_prev = rate_error; 110 } 111 112 if (rate_error == 0) 113 break; 114 } 115 116 if (rounder->rate >= highest) 117 rate_best_fit = highest; 118 if (rounder->rate <= lowest) 119 rate_best_fit = lowest; 120 121 return rate_best_fit; 122 } 123 124 static long clk_rate_table_iter(unsigned int pos, 125 struct clk_rate_round_data *rounder) 126 { 127 struct cpufreq_frequency_table *freq_table = rounder->arg; 128 unsigned long freq = freq_table[pos].frequency; 129 130 if (freq == CPUFREQ_ENTRY_INVALID) 131 freq = 0; 132 133 return freq; 134 } 135 136 long clk_rate_table_round(struct clk *clk, 137 struct cpufreq_frequency_table *freq_table, 138 unsigned long rate) 139 { 140 struct clk_rate_round_data table_round = { 141 .min = 0, 142 .max = clk->nr_freqs - 1, 143 .func = clk_rate_table_iter, 144 .arg = freq_table, 145 .rate = rate, 146 }; 147 148 if (clk->nr_freqs < 1) 149 return 0; 150 151 return clk_rate_round_helper(&table_round); 152 } 153 154 static long clk_rate_div_range_iter(unsigned int pos, 155 struct clk_rate_round_data *rounder) 156 { 157 return clk_get_rate(rounder->arg) / pos; 158 } 159 160 long clk_rate_div_range_round(struct clk *clk, unsigned int div_min, 161 unsigned int div_max, unsigned long rate) 162 { 163 struct clk_rate_round_data div_range_round = { 164 .min = div_min, 165 .max = div_max, 166 .func = clk_rate_div_range_iter, 167 .arg = clk_get_parent(clk), 168 .rate = rate, 169 }; 170 171 return clk_rate_round_helper(&div_range_round); 172 } 173 174 int clk_rate_table_find(struct clk *clk, 175 struct cpufreq_frequency_table *freq_table, 176 unsigned long rate) 177 { 178 int i; 179 180 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { 181 unsigned long freq = freq_table[i].frequency; 182 183 if (freq == CPUFREQ_ENTRY_INVALID) 184 continue; 185 186 if (freq == rate) 187 return i; 188 } 189 190 return -ENOENT; 191 } 192 193 /* Used for clocks that always have same value as the parent clock */ 194 unsigned long followparent_recalc(struct clk *clk) 195 { 196 return clk->parent ? clk->parent->rate : 0; 197 } 198 199 int clk_reparent(struct clk *child, struct clk *parent) 200 { 201 list_del_init(&child->sibling); 202 if (parent) 203 list_add(&child->sibling, &parent->children); 204 child->parent = parent; 205 206 /* now do the debugfs renaming to reattach the child 207 to the proper parent */ 208 209 return 0; 210 } 211 212 /* Propagate rate to children */ 213 void propagate_rate(struct clk *tclk) 214 { 215 struct clk *clkp; 216 217 list_for_each_entry(clkp, &tclk->children, sibling) { 218 if (clkp->ops && clkp->ops->recalc) 219 clkp->rate = clkp->ops->recalc(clkp); 220 221 propagate_rate(clkp); 222 } 223 } 224 225 static void __clk_disable(struct clk *clk) 226 { 227 if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n", 228 clk)) 229 return; 230 231 if (!(--clk->usecount)) { 232 if (likely(clk->ops && clk->ops->disable)) 233 clk->ops->disable(clk); 234 if (likely(clk->parent)) 235 __clk_disable(clk->parent); 236 } 237 } 238 239 void clk_disable(struct clk *clk) 240 { 241 unsigned long flags; 242 243 if (!clk) 244 return; 245 246 spin_lock_irqsave(&clock_lock, flags); 247 __clk_disable(clk); 248 spin_unlock_irqrestore(&clock_lock, flags); 249 } 250 EXPORT_SYMBOL_GPL(clk_disable); 251 252 static int __clk_enable(struct clk *clk) 253 { 254 int ret = 0; 255 256 if (clk->usecount++ == 0) { 257 if (clk->parent) { 258 ret = __clk_enable(clk->parent); 259 if (unlikely(ret)) 260 goto err; 261 } 262 263 if (clk->ops && clk->ops->enable) { 264 ret = clk->ops->enable(clk); 265 if (ret) { 266 if (clk->parent) 267 __clk_disable(clk->parent); 268 goto err; 269 } 270 } 271 } 272 273 return ret; 274 err: 275 clk->usecount--; 276 return ret; 277 } 278 279 int clk_enable(struct clk *clk) 280 { 281 unsigned long flags; 282 int ret; 283 284 if (!clk) 285 return -EINVAL; 286 287 spin_lock_irqsave(&clock_lock, flags); 288 ret = __clk_enable(clk); 289 spin_unlock_irqrestore(&clock_lock, flags); 290 291 return ret; 292 } 293 EXPORT_SYMBOL_GPL(clk_enable); 294 295 static LIST_HEAD(root_clks); 296 297 /** 298 * recalculate_root_clocks - recalculate and propagate all root clocks 299 * 300 * Recalculates all root clocks (clocks with no parent), which if the 301 * clock's .recalc is set correctly, should also propagate their rates. 302 * Called at init. 303 */ 304 void recalculate_root_clocks(void) 305 { 306 struct clk *clkp; 307 308 list_for_each_entry(clkp, &root_clks, sibling) { 309 if (clkp->ops && clkp->ops->recalc) 310 clkp->rate = clkp->ops->recalc(clkp); 311 propagate_rate(clkp); 312 } 313 } 314 315 static struct clk_mapping dummy_mapping; 316 317 static struct clk *lookup_root_clock(struct clk *clk) 318 { 319 while (clk->parent) 320 clk = clk->parent; 321 322 return clk; 323 } 324 325 static int clk_establish_mapping(struct clk *clk) 326 { 327 struct clk_mapping *mapping = clk->mapping; 328 329 /* 330 * Propagate mappings. 331 */ 332 if (!mapping) { 333 struct clk *clkp; 334 335 /* 336 * dummy mapping for root clocks with no specified ranges 337 */ 338 if (!clk->parent) { 339 clk->mapping = &dummy_mapping; 340 return 0; 341 } 342 343 /* 344 * If we're on a child clock and it provides no mapping of its 345 * own, inherit the mapping from its root clock. 346 */ 347 clkp = lookup_root_clock(clk); 348 mapping = clkp->mapping; 349 BUG_ON(!mapping); 350 } 351 352 /* 353 * Establish initial mapping. 354 */ 355 if (!mapping->base && mapping->phys) { 356 kref_init(&mapping->ref); 357 358 mapping->base = ioremap_nocache(mapping->phys, mapping->len); 359 if (unlikely(!mapping->base)) 360 return -ENXIO; 361 } else if (mapping->base) { 362 /* 363 * Bump the refcount for an existing mapping 364 */ 365 kref_get(&mapping->ref); 366 } 367 368 clk->mapping = mapping; 369 return 0; 370 } 371 372 static void clk_destroy_mapping(struct kref *kref) 373 { 374 struct clk_mapping *mapping; 375 376 mapping = container_of(kref, struct clk_mapping, ref); 377 378 iounmap(mapping->base); 379 } 380 381 static void clk_teardown_mapping(struct clk *clk) 382 { 383 struct clk_mapping *mapping = clk->mapping; 384 385 /* Nothing to do */ 386 if (mapping == &dummy_mapping) 387 return; 388 389 kref_put(&mapping->ref, clk_destroy_mapping); 390 clk->mapping = NULL; 391 } 392 393 int clk_register(struct clk *clk) 394 { 395 int ret; 396 397 if (clk == NULL || IS_ERR(clk)) 398 return -EINVAL; 399 400 /* 401 * trap out already registered clocks 402 */ 403 if (clk->node.next || clk->node.prev) 404 return 0; 405 406 mutex_lock(&clock_list_sem); 407 408 INIT_LIST_HEAD(&clk->children); 409 clk->usecount = 0; 410 411 ret = clk_establish_mapping(clk); 412 if (unlikely(ret)) 413 goto out_unlock; 414 415 if (clk->parent) 416 list_add(&clk->sibling, &clk->parent->children); 417 else 418 list_add(&clk->sibling, &root_clks); 419 420 list_add(&clk->node, &clock_list); 421 if (clk->ops && clk->ops->init) 422 clk->ops->init(clk); 423 424 out_unlock: 425 mutex_unlock(&clock_list_sem); 426 427 return ret; 428 } 429 EXPORT_SYMBOL_GPL(clk_register); 430 431 void clk_unregister(struct clk *clk) 432 { 433 mutex_lock(&clock_list_sem); 434 list_del(&clk->sibling); 435 list_del(&clk->node); 436 clk_teardown_mapping(clk); 437 mutex_unlock(&clock_list_sem); 438 } 439 EXPORT_SYMBOL_GPL(clk_unregister); 440 441 void clk_enable_init_clocks(void) 442 { 443 struct clk *clkp; 444 445 list_for_each_entry(clkp, &clock_list, node) 446 if (clkp->flags & CLK_ENABLE_ON_INIT) 447 clk_enable(clkp); 448 } 449 450 unsigned long clk_get_rate(struct clk *clk) 451 { 452 return clk->rate; 453 } 454 EXPORT_SYMBOL_GPL(clk_get_rate); 455 456 int clk_set_rate(struct clk *clk, unsigned long rate) 457 { 458 return clk_set_rate_ex(clk, rate, 0); 459 } 460 EXPORT_SYMBOL_GPL(clk_set_rate); 461 462 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) 463 { 464 int ret = -EOPNOTSUPP; 465 unsigned long flags; 466 467 spin_lock_irqsave(&clock_lock, flags); 468 469 if (likely(clk->ops && clk->ops->set_rate)) { 470 ret = clk->ops->set_rate(clk, rate, algo_id); 471 if (ret != 0) 472 goto out_unlock; 473 } else { 474 clk->rate = rate; 475 ret = 0; 476 } 477 478 if (clk->ops && clk->ops->recalc) 479 clk->rate = clk->ops->recalc(clk); 480 481 propagate_rate(clk); 482 483 out_unlock: 484 spin_unlock_irqrestore(&clock_lock, flags); 485 486 return ret; 487 } 488 EXPORT_SYMBOL_GPL(clk_set_rate_ex); 489 490 int clk_set_parent(struct clk *clk, struct clk *parent) 491 { 492 unsigned long flags; 493 int ret = -EINVAL; 494 495 if (!parent || !clk) 496 return ret; 497 if (clk->parent == parent) 498 return 0; 499 500 spin_lock_irqsave(&clock_lock, flags); 501 if (clk->usecount == 0) { 502 if (clk->ops->set_parent) 503 ret = clk->ops->set_parent(clk, parent); 504 else 505 ret = clk_reparent(clk, parent); 506 507 if (ret == 0) { 508 if (clk->ops->recalc) 509 clk->rate = clk->ops->recalc(clk); 510 pr_debug("set parent of %p to %p (new rate %ld)\n", 511 clk, clk->parent, clk->rate); 512 propagate_rate(clk); 513 } 514 } else 515 ret = -EBUSY; 516 spin_unlock_irqrestore(&clock_lock, flags); 517 518 return ret; 519 } 520 EXPORT_SYMBOL_GPL(clk_set_parent); 521 522 struct clk *clk_get_parent(struct clk *clk) 523 { 524 return clk->parent; 525 } 526 EXPORT_SYMBOL_GPL(clk_get_parent); 527 528 long clk_round_rate(struct clk *clk, unsigned long rate) 529 { 530 if (likely(clk->ops && clk->ops->round_rate)) { 531 unsigned long flags, rounded; 532 533 spin_lock_irqsave(&clock_lock, flags); 534 rounded = clk->ops->round_rate(clk, rate); 535 spin_unlock_irqrestore(&clock_lock, flags); 536 537 return rounded; 538 } 539 540 return clk_get_rate(clk); 541 } 542 EXPORT_SYMBOL_GPL(clk_round_rate); 543 544 #ifdef CONFIG_PM 545 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) 546 { 547 static pm_message_t prev_state; 548 struct clk *clkp; 549 550 switch (state.event) { 551 case PM_EVENT_ON: 552 /* Resumeing from hibernation */ 553 if (prev_state.event != PM_EVENT_FREEZE) 554 break; 555 556 list_for_each_entry(clkp, &clock_list, node) { 557 if (likely(clkp->ops)) { 558 unsigned long rate = clkp->rate; 559 560 if (likely(clkp->ops->set_parent)) 561 clkp->ops->set_parent(clkp, 562 clkp->parent); 563 if (likely(clkp->ops->set_rate)) 564 clkp->ops->set_rate(clkp, 565 rate, NO_CHANGE); 566 else if (likely(clkp->ops->recalc)) 567 clkp->rate = clkp->ops->recalc(clkp); 568 } 569 } 570 break; 571 case PM_EVENT_FREEZE: 572 break; 573 case PM_EVENT_SUSPEND: 574 break; 575 } 576 577 prev_state = state; 578 return 0; 579 } 580 581 static int clks_sysdev_resume(struct sys_device *dev) 582 { 583 return clks_sysdev_suspend(dev, PMSG_ON); 584 } 585 586 static struct sysdev_class clks_sysdev_class = { 587 .name = "clks", 588 }; 589 590 static struct sysdev_driver clks_sysdev_driver = { 591 .suspend = clks_sysdev_suspend, 592 .resume = clks_sysdev_resume, 593 }; 594 595 static struct sys_device clks_sysdev_dev = { 596 .cls = &clks_sysdev_class, 597 }; 598 599 static int __init clk_sysdev_init(void) 600 { 601 sysdev_class_register(&clks_sysdev_class); 602 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver); 603 sysdev_register(&clks_sysdev_dev); 604 605 return 0; 606 } 607 subsys_initcall(clk_sysdev_init); 608 #endif 609 610 /* 611 * debugfs support to trace clock tree hierarchy and attributes 612 */ 613 static struct dentry *clk_debugfs_root; 614 615 static int clk_debugfs_register_one(struct clk *c) 616 { 617 int err; 618 struct dentry *d, *child, *child_tmp; 619 struct clk *pa = c->parent; 620 char s[255]; 621 char *p = s; 622 623 p += sprintf(p, "%p", c); 624 d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root); 625 if (!d) 626 return -ENOMEM; 627 c->dentry = d; 628 629 d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount); 630 if (!d) { 631 err = -ENOMEM; 632 goto err_out; 633 } 634 d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate); 635 if (!d) { 636 err = -ENOMEM; 637 goto err_out; 638 } 639 d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags); 640 if (!d) { 641 err = -ENOMEM; 642 goto err_out; 643 } 644 return 0; 645 646 err_out: 647 d = c->dentry; 648 list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child) 649 debugfs_remove(child); 650 debugfs_remove(c->dentry); 651 return err; 652 } 653 654 static int clk_debugfs_register(struct clk *c) 655 { 656 int err; 657 struct clk *pa = c->parent; 658 659 if (pa && !pa->dentry) { 660 err = clk_debugfs_register(pa); 661 if (err) 662 return err; 663 } 664 665 if (!c->dentry) { 666 err = clk_debugfs_register_one(c); 667 if (err) 668 return err; 669 } 670 return 0; 671 } 672 673 static int __init clk_debugfs_init(void) 674 { 675 struct clk *c; 676 struct dentry *d; 677 int err; 678 679 d = debugfs_create_dir("clock", NULL); 680 if (!d) 681 return -ENOMEM; 682 clk_debugfs_root = d; 683 684 list_for_each_entry(c, &clock_list, node) { 685 err = clk_debugfs_register(c); 686 if (err) 687 goto err_out; 688 } 689 return 0; 690 err_out: 691 debugfs_remove_recursive(clk_debugfs_root); 692 return err; 693 } 694 late_initcall(clk_debugfs_init); 695