1 /* 2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> 3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * Standard functionality for the common clock API. See Documentation/clk.txt 10 */ 11 12 #include <linux/clk-private.h> 13 #include <linux/clk/clk-conf.h> 14 #include <linux/module.h> 15 #include <linux/mutex.h> 16 #include <linux/spinlock.h> 17 #include <linux/err.h> 18 #include <linux/list.h> 19 #include <linux/slab.h> 20 #include <linux/of.h> 21 #include <linux/device.h> 22 #include <linux/init.h> 23 #include <linux/sched.h> 24 25 #include "clk.h" 26 27 static DEFINE_SPINLOCK(enable_lock); 28 static DEFINE_MUTEX(prepare_lock); 29 30 static struct task_struct *prepare_owner; 31 static struct task_struct *enable_owner; 32 33 static int prepare_refcnt; 34 static int enable_refcnt; 35 36 static HLIST_HEAD(clk_root_list); 37 static HLIST_HEAD(clk_orphan_list); 38 static LIST_HEAD(clk_notifier_list); 39 40 /*** locking ***/ 41 static void clk_prepare_lock(void) 42 { 43 if (!mutex_trylock(&prepare_lock)) { 44 if (prepare_owner == current) { 45 prepare_refcnt++; 46 return; 47 } 48 mutex_lock(&prepare_lock); 49 } 50 WARN_ON_ONCE(prepare_owner != NULL); 51 WARN_ON_ONCE(prepare_refcnt != 0); 52 prepare_owner = current; 53 prepare_refcnt = 1; 54 } 55 56 static void clk_prepare_unlock(void) 57 { 58 WARN_ON_ONCE(prepare_owner != current); 59 WARN_ON_ONCE(prepare_refcnt == 0); 60 61 if (--prepare_refcnt) 62 return; 63 prepare_owner = NULL; 64 mutex_unlock(&prepare_lock); 65 } 66 67 static unsigned long clk_enable_lock(void) 68 { 69 unsigned long flags; 70 71 if (!spin_trylock_irqsave(&enable_lock, flags)) { 72 if (enable_owner == current) { 73 enable_refcnt++; 74 return flags; 75 } 76 spin_lock_irqsave(&enable_lock, flags); 77 } 78 WARN_ON_ONCE(enable_owner != NULL); 79 WARN_ON_ONCE(enable_refcnt != 0); 80 enable_owner = current; 81 enable_refcnt = 1; 82 return flags; 83 } 84 85 static void clk_enable_unlock(unsigned long flags) 86 { 87 WARN_ON_ONCE(enable_owner != current); 88 WARN_ON_ONCE(enable_refcnt == 0); 89 90 if (--enable_refcnt) 91 return; 92 enable_owner = NULL; 93 spin_unlock_irqrestore(&enable_lock, flags); 94 } 95 96 /*** debugfs support ***/ 97 98 #ifdef CONFIG_DEBUG_FS 99 #include <linux/debugfs.h> 100 101 static struct dentry *rootdir; 102 static int inited = 0; 103 static DEFINE_MUTEX(clk_debug_lock); 104 static HLIST_HEAD(clk_debug_list); 105 106 static struct hlist_head *all_lists[] = { 107 &clk_root_list, 108 &clk_orphan_list, 109 NULL, 110 }; 111 112 static struct hlist_head *orphan_list[] = { 113 &clk_orphan_list, 114 NULL, 115 }; 116 117 static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level) 118 { 119 if (!c) 120 return; 121 122 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n", 123 level * 3 + 1, "", 124 30 - level * 3, c->name, 125 c->enable_count, c->prepare_count, clk_get_rate(c), 126 clk_get_accuracy(c), clk_get_phase(c)); 127 } 128 129 static void clk_summary_show_subtree(struct seq_file *s, struct clk *c, 130 int level) 131 { 132 struct clk *child; 133 134 if (!c) 135 return; 136 137 clk_summary_show_one(s, c, level); 138 139 hlist_for_each_entry(child, &c->children, child_node) 140 clk_summary_show_subtree(s, child, level + 1); 141 } 142 143 static int clk_summary_show(struct seq_file *s, void *data) 144 { 145 struct clk *c; 146 struct hlist_head **lists = (struct hlist_head **)s->private; 147 148 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n"); 149 seq_puts(s, "----------------------------------------------------------------------------------------\n"); 150 151 clk_prepare_lock(); 152 153 for (; *lists; lists++) 154 hlist_for_each_entry(c, *lists, child_node) 155 clk_summary_show_subtree(s, c, 0); 156 157 clk_prepare_unlock(); 158 159 return 0; 160 } 161 162 163 static int clk_summary_open(struct inode *inode, struct file *file) 164 { 165 return single_open(file, clk_summary_show, inode->i_private); 166 } 167 168 static const struct file_operations clk_summary_fops = { 169 .open = clk_summary_open, 170 .read = seq_read, 171 .llseek = seq_lseek, 172 .release = single_release, 173 }; 174 175 static void clk_dump_one(struct seq_file *s, struct clk *c, int level) 176 { 177 if (!c) 178 return; 179 180 seq_printf(s, "\"%s\": { ", c->name); 181 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 182 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 183 seq_printf(s, "\"rate\": %lu", clk_get_rate(c)); 184 seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c)); 185 seq_printf(s, "\"phase\": %d", clk_get_phase(c)); 186 } 187 188 static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level) 189 { 190 struct clk *child; 191 192 if (!c) 193 return; 194 195 clk_dump_one(s, c, level); 196 197 hlist_for_each_entry(child, &c->children, child_node) { 198 seq_printf(s, ","); 199 clk_dump_subtree(s, child, level + 1); 200 } 201 202 seq_printf(s, "}"); 203 } 204 205 static int clk_dump(struct seq_file *s, void *data) 206 { 207 struct clk *c; 208 bool first_node = true; 209 struct hlist_head **lists = (struct hlist_head **)s->private; 210 211 seq_printf(s, "{"); 212 213 clk_prepare_lock(); 214 215 for (; *lists; lists++) { 216 hlist_for_each_entry(c, *lists, child_node) { 217 if (!first_node) 218 seq_puts(s, ","); 219 first_node = false; 220 clk_dump_subtree(s, c, 0); 221 } 222 } 223 224 clk_prepare_unlock(); 225 226 seq_printf(s, "}"); 227 return 0; 228 } 229 230 231 static int clk_dump_open(struct inode *inode, struct file *file) 232 { 233 return single_open(file, clk_dump, inode->i_private); 234 } 235 236 static const struct file_operations clk_dump_fops = { 237 .open = clk_dump_open, 238 .read = seq_read, 239 .llseek = seq_lseek, 240 .release = single_release, 241 }; 242 243 static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry) 244 { 245 struct dentry *d; 246 int ret = -ENOMEM; 247 248 if (!clk || !pdentry) { 249 ret = -EINVAL; 250 goto out; 251 } 252 253 d = debugfs_create_dir(clk->name, pdentry); 254 if (!d) 255 goto out; 256 257 clk->dentry = d; 258 259 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry, 260 (u32 *)&clk->rate); 261 if (!d) 262 goto err_out; 263 264 d = debugfs_create_u32("clk_accuracy", S_IRUGO, clk->dentry, 265 (u32 *)&clk->accuracy); 266 if (!d) 267 goto err_out; 268 269 d = debugfs_create_u32("clk_phase", S_IRUGO, clk->dentry, 270 (u32 *)&clk->phase); 271 if (!d) 272 goto err_out; 273 274 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry, 275 (u32 *)&clk->flags); 276 if (!d) 277 goto err_out; 278 279 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry, 280 (u32 *)&clk->prepare_count); 281 if (!d) 282 goto err_out; 283 284 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry, 285 (u32 *)&clk->enable_count); 286 if (!d) 287 goto err_out; 288 289 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry, 290 (u32 *)&clk->notifier_count); 291 if (!d) 292 goto err_out; 293 294 if (clk->ops->debug_init) { 295 ret = clk->ops->debug_init(clk->hw, clk->dentry); 296 if (ret) 297 goto err_out; 298 } 299 300 ret = 0; 301 goto out; 302 303 err_out: 304 debugfs_remove_recursive(clk->dentry); 305 clk->dentry = NULL; 306 out: 307 return ret; 308 } 309 310 /** 311 * clk_debug_register - add a clk node to the debugfs clk tree 312 * @clk: the clk being added to the debugfs clk tree 313 * 314 * Dynamically adds a clk to the debugfs clk tree if debugfs has been 315 * initialized. Otherwise it bails out early since the debugfs clk tree 316 * will be created lazily by clk_debug_init as part of a late_initcall. 317 */ 318 static int clk_debug_register(struct clk *clk) 319 { 320 int ret = 0; 321 322 mutex_lock(&clk_debug_lock); 323 hlist_add_head(&clk->debug_node, &clk_debug_list); 324 325 if (!inited) 326 goto unlock; 327 328 ret = clk_debug_create_one(clk, rootdir); 329 unlock: 330 mutex_unlock(&clk_debug_lock); 331 332 return ret; 333 } 334 335 /** 336 * clk_debug_unregister - remove a clk node from the debugfs clk tree 337 * @clk: the clk being removed from the debugfs clk tree 338 * 339 * Dynamically removes a clk and all it's children clk nodes from the 340 * debugfs clk tree if clk->dentry points to debugfs created by 341 * clk_debug_register in __clk_init. 342 */ 343 static void clk_debug_unregister(struct clk *clk) 344 { 345 mutex_lock(&clk_debug_lock); 346 if (!clk->dentry) 347 goto out; 348 349 hlist_del_init(&clk->debug_node); 350 debugfs_remove_recursive(clk->dentry); 351 clk->dentry = NULL; 352 out: 353 mutex_unlock(&clk_debug_lock); 354 } 355 356 struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode, 357 void *data, const struct file_operations *fops) 358 { 359 struct dentry *d = NULL; 360 361 if (hw->clk->dentry) 362 d = debugfs_create_file(name, mode, hw->clk->dentry, data, fops); 363 364 return d; 365 } 366 EXPORT_SYMBOL_GPL(clk_debugfs_add_file); 367 368 /** 369 * clk_debug_init - lazily create the debugfs clk tree visualization 370 * 371 * clks are often initialized very early during boot before memory can 372 * be dynamically allocated and well before debugfs is setup. 373 * clk_debug_init walks the clk tree hierarchy while holding 374 * prepare_lock and creates the topology as part of a late_initcall, 375 * thus insuring that clks initialized very early will still be 376 * represented in the debugfs clk tree. This function should only be 377 * called once at boot-time, and all other clks added dynamically will 378 * be done so with clk_debug_register. 379 */ 380 static int __init clk_debug_init(void) 381 { 382 struct clk *clk; 383 struct dentry *d; 384 385 rootdir = debugfs_create_dir("clk", NULL); 386 387 if (!rootdir) 388 return -ENOMEM; 389 390 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists, 391 &clk_summary_fops); 392 if (!d) 393 return -ENOMEM; 394 395 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists, 396 &clk_dump_fops); 397 if (!d) 398 return -ENOMEM; 399 400 d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir, 401 &orphan_list, &clk_summary_fops); 402 if (!d) 403 return -ENOMEM; 404 405 d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir, 406 &orphan_list, &clk_dump_fops); 407 if (!d) 408 return -ENOMEM; 409 410 mutex_lock(&clk_debug_lock); 411 hlist_for_each_entry(clk, &clk_debug_list, debug_node) 412 clk_debug_create_one(clk, rootdir); 413 414 inited = 1; 415 mutex_unlock(&clk_debug_lock); 416 417 return 0; 418 } 419 late_initcall(clk_debug_init); 420 #else 421 static inline int clk_debug_register(struct clk *clk) { return 0; } 422 static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent) 423 { 424 } 425 static inline void clk_debug_unregister(struct clk *clk) 426 { 427 } 428 #endif 429 430 /* caller must hold prepare_lock */ 431 static void clk_unprepare_unused_subtree(struct clk *clk) 432 { 433 struct clk *child; 434 435 if (!clk) 436 return; 437 438 hlist_for_each_entry(child, &clk->children, child_node) 439 clk_unprepare_unused_subtree(child); 440 441 if (clk->prepare_count) 442 return; 443 444 if (clk->flags & CLK_IGNORE_UNUSED) 445 return; 446 447 if (__clk_is_prepared(clk)) { 448 if (clk->ops->unprepare_unused) 449 clk->ops->unprepare_unused(clk->hw); 450 else if (clk->ops->unprepare) 451 clk->ops->unprepare(clk->hw); 452 } 453 } 454 455 /* caller must hold prepare_lock */ 456 static void clk_disable_unused_subtree(struct clk *clk) 457 { 458 struct clk *child; 459 unsigned long flags; 460 461 if (!clk) 462 goto out; 463 464 hlist_for_each_entry(child, &clk->children, child_node) 465 clk_disable_unused_subtree(child); 466 467 flags = clk_enable_lock(); 468 469 if (clk->enable_count) 470 goto unlock_out; 471 472 if (clk->flags & CLK_IGNORE_UNUSED) 473 goto unlock_out; 474 475 /* 476 * some gate clocks have special needs during the disable-unused 477 * sequence. call .disable_unused if available, otherwise fall 478 * back to .disable 479 */ 480 if (__clk_is_enabled(clk)) { 481 if (clk->ops->disable_unused) 482 clk->ops->disable_unused(clk->hw); 483 else if (clk->ops->disable) 484 clk->ops->disable(clk->hw); 485 } 486 487 unlock_out: 488 clk_enable_unlock(flags); 489 490 out: 491 return; 492 } 493 494 static bool clk_ignore_unused; 495 static int __init clk_ignore_unused_setup(char *__unused) 496 { 497 clk_ignore_unused = true; 498 return 1; 499 } 500 __setup("clk_ignore_unused", clk_ignore_unused_setup); 501 502 static int clk_disable_unused(void) 503 { 504 struct clk *clk; 505 506 if (clk_ignore_unused) { 507 pr_warn("clk: Not disabling unused clocks\n"); 508 return 0; 509 } 510 511 clk_prepare_lock(); 512 513 hlist_for_each_entry(clk, &clk_root_list, child_node) 514 clk_disable_unused_subtree(clk); 515 516 hlist_for_each_entry(clk, &clk_orphan_list, child_node) 517 clk_disable_unused_subtree(clk); 518 519 hlist_for_each_entry(clk, &clk_root_list, child_node) 520 clk_unprepare_unused_subtree(clk); 521 522 hlist_for_each_entry(clk, &clk_orphan_list, child_node) 523 clk_unprepare_unused_subtree(clk); 524 525 clk_prepare_unlock(); 526 527 return 0; 528 } 529 late_initcall_sync(clk_disable_unused); 530 531 /*** helper functions ***/ 532 533 const char *__clk_get_name(struct clk *clk) 534 { 535 return !clk ? NULL : clk->name; 536 } 537 EXPORT_SYMBOL_GPL(__clk_get_name); 538 539 struct clk_hw *__clk_get_hw(struct clk *clk) 540 { 541 return !clk ? NULL : clk->hw; 542 } 543 EXPORT_SYMBOL_GPL(__clk_get_hw); 544 545 u8 __clk_get_num_parents(struct clk *clk) 546 { 547 return !clk ? 0 : clk->num_parents; 548 } 549 EXPORT_SYMBOL_GPL(__clk_get_num_parents); 550 551 struct clk *__clk_get_parent(struct clk *clk) 552 { 553 return !clk ? NULL : clk->parent; 554 } 555 EXPORT_SYMBOL_GPL(__clk_get_parent); 556 557 struct clk *clk_get_parent_by_index(struct clk *clk, u8 index) 558 { 559 if (!clk || index >= clk->num_parents) 560 return NULL; 561 else if (!clk->parents) 562 return __clk_lookup(clk->parent_names[index]); 563 else if (!clk->parents[index]) 564 return clk->parents[index] = 565 __clk_lookup(clk->parent_names[index]); 566 else 567 return clk->parents[index]; 568 } 569 EXPORT_SYMBOL_GPL(clk_get_parent_by_index); 570 571 unsigned int __clk_get_enable_count(struct clk *clk) 572 { 573 return !clk ? 0 : clk->enable_count; 574 } 575 576 unsigned long __clk_get_rate(struct clk *clk) 577 { 578 unsigned long ret; 579 580 if (!clk) { 581 ret = 0; 582 goto out; 583 } 584 585 ret = clk->rate; 586 587 if (clk->flags & CLK_IS_ROOT) 588 goto out; 589 590 if (!clk->parent) 591 ret = 0; 592 593 out: 594 return ret; 595 } 596 EXPORT_SYMBOL_GPL(__clk_get_rate); 597 598 static unsigned long __clk_get_accuracy(struct clk *clk) 599 { 600 if (!clk) 601 return 0; 602 603 return clk->accuracy; 604 } 605 606 unsigned long __clk_get_flags(struct clk *clk) 607 { 608 return !clk ? 0 : clk->flags; 609 } 610 EXPORT_SYMBOL_GPL(__clk_get_flags); 611 612 bool __clk_is_prepared(struct clk *clk) 613 { 614 int ret; 615 616 if (!clk) 617 return false; 618 619 /* 620 * .is_prepared is optional for clocks that can prepare 621 * fall back to software usage counter if it is missing 622 */ 623 if (!clk->ops->is_prepared) { 624 ret = clk->prepare_count ? 1 : 0; 625 goto out; 626 } 627 628 ret = clk->ops->is_prepared(clk->hw); 629 out: 630 return !!ret; 631 } 632 633 bool __clk_is_enabled(struct clk *clk) 634 { 635 int ret; 636 637 if (!clk) 638 return false; 639 640 /* 641 * .is_enabled is only mandatory for clocks that gate 642 * fall back to software usage counter if .is_enabled is missing 643 */ 644 if (!clk->ops->is_enabled) { 645 ret = clk->enable_count ? 1 : 0; 646 goto out; 647 } 648 649 ret = clk->ops->is_enabled(clk->hw); 650 out: 651 return !!ret; 652 } 653 EXPORT_SYMBOL_GPL(__clk_is_enabled); 654 655 static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk) 656 { 657 struct clk *child; 658 struct clk *ret; 659 660 if (!strcmp(clk->name, name)) 661 return clk; 662 663 hlist_for_each_entry(child, &clk->children, child_node) { 664 ret = __clk_lookup_subtree(name, child); 665 if (ret) 666 return ret; 667 } 668 669 return NULL; 670 } 671 672 struct clk *__clk_lookup(const char *name) 673 { 674 struct clk *root_clk; 675 struct clk *ret; 676 677 if (!name) 678 return NULL; 679 680 /* search the 'proper' clk tree first */ 681 hlist_for_each_entry(root_clk, &clk_root_list, child_node) { 682 ret = __clk_lookup_subtree(name, root_clk); 683 if (ret) 684 return ret; 685 } 686 687 /* if not found, then search the orphan tree */ 688 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { 689 ret = __clk_lookup_subtree(name, root_clk); 690 if (ret) 691 return ret; 692 } 693 694 return NULL; 695 } 696 697 /* 698 * Helper for finding best parent to provide a given frequency. This can be used 699 * directly as a determine_rate callback (e.g. for a mux), or from a more 700 * complex clock that may combine a mux with other operations. 701 */ 702 long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, 703 unsigned long *best_parent_rate, 704 struct clk_hw **best_parent_p) 705 { 706 struct clk *clk = hw->clk, *parent, *best_parent = NULL; 707 int i, num_parents; 708 unsigned long parent_rate, best = 0; 709 710 /* if NO_REPARENT flag set, pass through to current parent */ 711 if (clk->flags & CLK_SET_RATE_NO_REPARENT) { 712 parent = clk->parent; 713 if (clk->flags & CLK_SET_RATE_PARENT) 714 best = __clk_round_rate(parent, rate); 715 else if (parent) 716 best = __clk_get_rate(parent); 717 else 718 best = __clk_get_rate(clk); 719 goto out; 720 } 721 722 /* find the parent that can provide the fastest rate <= rate */ 723 num_parents = clk->num_parents; 724 for (i = 0; i < num_parents; i++) { 725 parent = clk_get_parent_by_index(clk, i); 726 if (!parent) 727 continue; 728 if (clk->flags & CLK_SET_RATE_PARENT) 729 parent_rate = __clk_round_rate(parent, rate); 730 else 731 parent_rate = __clk_get_rate(parent); 732 if (parent_rate <= rate && parent_rate > best) { 733 best_parent = parent; 734 best = parent_rate; 735 } 736 } 737 738 out: 739 if (best_parent) 740 *best_parent_p = best_parent->hw; 741 *best_parent_rate = best; 742 743 return best; 744 } 745 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); 746 747 /*** clk api ***/ 748 749 void __clk_unprepare(struct clk *clk) 750 { 751 if (!clk) 752 return; 753 754 if (WARN_ON(clk->prepare_count == 0)) 755 return; 756 757 if (--clk->prepare_count > 0) 758 return; 759 760 WARN_ON(clk->enable_count > 0); 761 762 if (clk->ops->unprepare) 763 clk->ops->unprepare(clk->hw); 764 765 __clk_unprepare(clk->parent); 766 } 767 768 /** 769 * clk_unprepare - undo preparation of a clock source 770 * @clk: the clk being unprepared 771 * 772 * clk_unprepare may sleep, which differentiates it from clk_disable. In a 773 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk 774 * if the operation may sleep. One example is a clk which is accessed over 775 * I2c. In the complex case a clk gate operation may require a fast and a slow 776 * part. It is this reason that clk_unprepare and clk_disable are not mutually 777 * exclusive. In fact clk_disable must be called before clk_unprepare. 778 */ 779 void clk_unprepare(struct clk *clk) 780 { 781 if (IS_ERR_OR_NULL(clk)) 782 return; 783 784 clk_prepare_lock(); 785 __clk_unprepare(clk); 786 clk_prepare_unlock(); 787 } 788 EXPORT_SYMBOL_GPL(clk_unprepare); 789 790 int __clk_prepare(struct clk *clk) 791 { 792 int ret = 0; 793 794 if (!clk) 795 return 0; 796 797 if (clk->prepare_count == 0) { 798 ret = __clk_prepare(clk->parent); 799 if (ret) 800 return ret; 801 802 if (clk->ops->prepare) { 803 ret = clk->ops->prepare(clk->hw); 804 if (ret) { 805 __clk_unprepare(clk->parent); 806 return ret; 807 } 808 } 809 } 810 811 clk->prepare_count++; 812 813 return 0; 814 } 815 816 /** 817 * clk_prepare - prepare a clock source 818 * @clk: the clk being prepared 819 * 820 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple 821 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the 822 * operation may sleep. One example is a clk which is accessed over I2c. In 823 * the complex case a clk ungate operation may require a fast and a slow part. 824 * It is this reason that clk_prepare and clk_enable are not mutually 825 * exclusive. In fact clk_prepare must be called before clk_enable. 826 * Returns 0 on success, -EERROR otherwise. 827 */ 828 int clk_prepare(struct clk *clk) 829 { 830 int ret; 831 832 clk_prepare_lock(); 833 ret = __clk_prepare(clk); 834 clk_prepare_unlock(); 835 836 return ret; 837 } 838 EXPORT_SYMBOL_GPL(clk_prepare); 839 840 static void __clk_disable(struct clk *clk) 841 { 842 if (!clk) 843 return; 844 845 if (WARN_ON(clk->enable_count == 0)) 846 return; 847 848 if (--clk->enable_count > 0) 849 return; 850 851 if (clk->ops->disable) 852 clk->ops->disable(clk->hw); 853 854 __clk_disable(clk->parent); 855 } 856 857 /** 858 * clk_disable - gate a clock 859 * @clk: the clk being gated 860 * 861 * clk_disable must not sleep, which differentiates it from clk_unprepare. In 862 * a simple case, clk_disable can be used instead of clk_unprepare to gate a 863 * clk if the operation is fast and will never sleep. One example is a 864 * SoC-internal clk which is controlled via simple register writes. In the 865 * complex case a clk gate operation may require a fast and a slow part. It is 866 * this reason that clk_unprepare and clk_disable are not mutually exclusive. 867 * In fact clk_disable must be called before clk_unprepare. 868 */ 869 void clk_disable(struct clk *clk) 870 { 871 unsigned long flags; 872 873 if (IS_ERR_OR_NULL(clk)) 874 return; 875 876 flags = clk_enable_lock(); 877 __clk_disable(clk); 878 clk_enable_unlock(flags); 879 } 880 EXPORT_SYMBOL_GPL(clk_disable); 881 882 static int __clk_enable(struct clk *clk) 883 { 884 int ret = 0; 885 886 if (!clk) 887 return 0; 888 889 if (WARN_ON(clk->prepare_count == 0)) 890 return -ESHUTDOWN; 891 892 if (clk->enable_count == 0) { 893 ret = __clk_enable(clk->parent); 894 895 if (ret) 896 return ret; 897 898 if (clk->ops->enable) { 899 ret = clk->ops->enable(clk->hw); 900 if (ret) { 901 __clk_disable(clk->parent); 902 return ret; 903 } 904 } 905 } 906 907 clk->enable_count++; 908 return 0; 909 } 910 911 /** 912 * clk_enable - ungate a clock 913 * @clk: the clk being ungated 914 * 915 * clk_enable must not sleep, which differentiates it from clk_prepare. In a 916 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk 917 * if the operation will never sleep. One example is a SoC-internal clk which 918 * is controlled via simple register writes. In the complex case a clk ungate 919 * operation may require a fast and a slow part. It is this reason that 920 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare 921 * must be called before clk_enable. Returns 0 on success, -EERROR 922 * otherwise. 923 */ 924 int clk_enable(struct clk *clk) 925 { 926 unsigned long flags; 927 int ret; 928 929 flags = clk_enable_lock(); 930 ret = __clk_enable(clk); 931 clk_enable_unlock(flags); 932 933 return ret; 934 } 935 EXPORT_SYMBOL_GPL(clk_enable); 936 937 /** 938 * __clk_round_rate - round the given rate for a clk 939 * @clk: round the rate of this clock 940 * @rate: the rate which is to be rounded 941 * 942 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate 943 */ 944 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate) 945 { 946 unsigned long parent_rate = 0; 947 struct clk *parent; 948 struct clk_hw *parent_hw; 949 950 if (!clk) 951 return 0; 952 953 parent = clk->parent; 954 if (parent) 955 parent_rate = parent->rate; 956 957 if (clk->ops->determine_rate) { 958 parent_hw = parent ? parent->hw : NULL; 959 return clk->ops->determine_rate(clk->hw, rate, &parent_rate, 960 &parent_hw); 961 } else if (clk->ops->round_rate) 962 return clk->ops->round_rate(clk->hw, rate, &parent_rate); 963 else if (clk->flags & CLK_SET_RATE_PARENT) 964 return __clk_round_rate(clk->parent, rate); 965 else 966 return clk->rate; 967 } 968 EXPORT_SYMBOL_GPL(__clk_round_rate); 969 970 /** 971 * clk_round_rate - round the given rate for a clk 972 * @clk: the clk for which we are rounding a rate 973 * @rate: the rate which is to be rounded 974 * 975 * Takes in a rate as input and rounds it to a rate that the clk can actually 976 * use which is then returned. If clk doesn't support round_rate operation 977 * then the parent rate is returned. 978 */ 979 long clk_round_rate(struct clk *clk, unsigned long rate) 980 { 981 unsigned long ret; 982 983 clk_prepare_lock(); 984 ret = __clk_round_rate(clk, rate); 985 clk_prepare_unlock(); 986 987 return ret; 988 } 989 EXPORT_SYMBOL_GPL(clk_round_rate); 990 991 /** 992 * __clk_notify - call clk notifier chain 993 * @clk: struct clk * that is changing rate 994 * @msg: clk notifier type (see include/linux/clk.h) 995 * @old_rate: old clk rate 996 * @new_rate: new clk rate 997 * 998 * Triggers a notifier call chain on the clk rate-change notification 999 * for 'clk'. Passes a pointer to the struct clk and the previous 1000 * and current rates to the notifier callback. Intended to be called by 1001 * internal clock code only. Returns NOTIFY_DONE from the last driver 1002 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 1003 * a driver returns that. 1004 */ 1005 static int __clk_notify(struct clk *clk, unsigned long msg, 1006 unsigned long old_rate, unsigned long new_rate) 1007 { 1008 struct clk_notifier *cn; 1009 struct clk_notifier_data cnd; 1010 int ret = NOTIFY_DONE; 1011 1012 cnd.clk = clk; 1013 cnd.old_rate = old_rate; 1014 cnd.new_rate = new_rate; 1015 1016 list_for_each_entry(cn, &clk_notifier_list, node) { 1017 if (cn->clk == clk) { 1018 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 1019 &cnd); 1020 break; 1021 } 1022 } 1023 1024 return ret; 1025 } 1026 1027 /** 1028 * __clk_recalc_accuracies 1029 * @clk: first clk in the subtree 1030 * 1031 * Walks the subtree of clks starting with clk and recalculates accuracies as 1032 * it goes. Note that if a clk does not implement the .recalc_accuracy 1033 * callback then it is assumed that the clock will take on the accuracy of it's 1034 * parent. 1035 * 1036 * Caller must hold prepare_lock. 1037 */ 1038 static void __clk_recalc_accuracies(struct clk *clk) 1039 { 1040 unsigned long parent_accuracy = 0; 1041 struct clk *child; 1042 1043 if (clk->parent) 1044 parent_accuracy = clk->parent->accuracy; 1045 1046 if (clk->ops->recalc_accuracy) 1047 clk->accuracy = clk->ops->recalc_accuracy(clk->hw, 1048 parent_accuracy); 1049 else 1050 clk->accuracy = parent_accuracy; 1051 1052 hlist_for_each_entry(child, &clk->children, child_node) 1053 __clk_recalc_accuracies(child); 1054 } 1055 1056 /** 1057 * clk_get_accuracy - return the accuracy of clk 1058 * @clk: the clk whose accuracy is being returned 1059 * 1060 * Simply returns the cached accuracy of the clk, unless 1061 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be 1062 * issued. 1063 * If clk is NULL then returns 0. 1064 */ 1065 long clk_get_accuracy(struct clk *clk) 1066 { 1067 unsigned long accuracy; 1068 1069 clk_prepare_lock(); 1070 if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE)) 1071 __clk_recalc_accuracies(clk); 1072 1073 accuracy = __clk_get_accuracy(clk); 1074 clk_prepare_unlock(); 1075 1076 return accuracy; 1077 } 1078 EXPORT_SYMBOL_GPL(clk_get_accuracy); 1079 1080 static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate) 1081 { 1082 if (clk->ops->recalc_rate) 1083 return clk->ops->recalc_rate(clk->hw, parent_rate); 1084 return parent_rate; 1085 } 1086 1087 /** 1088 * __clk_recalc_rates 1089 * @clk: first clk in the subtree 1090 * @msg: notification type (see include/linux/clk.h) 1091 * 1092 * Walks the subtree of clks starting with clk and recalculates rates as it 1093 * goes. Note that if a clk does not implement the .recalc_rate callback then 1094 * it is assumed that the clock will take on the rate of its parent. 1095 * 1096 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, 1097 * if necessary. 1098 * 1099 * Caller must hold prepare_lock. 1100 */ 1101 static void __clk_recalc_rates(struct clk *clk, unsigned long msg) 1102 { 1103 unsigned long old_rate; 1104 unsigned long parent_rate = 0; 1105 struct clk *child; 1106 1107 old_rate = clk->rate; 1108 1109 if (clk->parent) 1110 parent_rate = clk->parent->rate; 1111 1112 clk->rate = clk_recalc(clk, parent_rate); 1113 1114 /* 1115 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE 1116 * & ABORT_RATE_CHANGE notifiers 1117 */ 1118 if (clk->notifier_count && msg) 1119 __clk_notify(clk, msg, old_rate, clk->rate); 1120 1121 hlist_for_each_entry(child, &clk->children, child_node) 1122 __clk_recalc_rates(child, msg); 1123 } 1124 1125 /** 1126 * clk_get_rate - return the rate of clk 1127 * @clk: the clk whose rate is being returned 1128 * 1129 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag 1130 * is set, which means a recalc_rate will be issued. 1131 * If clk is NULL then returns 0. 1132 */ 1133 unsigned long clk_get_rate(struct clk *clk) 1134 { 1135 unsigned long rate; 1136 1137 clk_prepare_lock(); 1138 1139 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE)) 1140 __clk_recalc_rates(clk, 0); 1141 1142 rate = __clk_get_rate(clk); 1143 clk_prepare_unlock(); 1144 1145 return rate; 1146 } 1147 EXPORT_SYMBOL_GPL(clk_get_rate); 1148 1149 static int clk_fetch_parent_index(struct clk *clk, struct clk *parent) 1150 { 1151 int i; 1152 1153 if (!clk->parents) { 1154 clk->parents = kcalloc(clk->num_parents, 1155 sizeof(struct clk *), GFP_KERNEL); 1156 if (!clk->parents) 1157 return -ENOMEM; 1158 } 1159 1160 /* 1161 * find index of new parent clock using cached parent ptrs, 1162 * or if not yet cached, use string name comparison and cache 1163 * them now to avoid future calls to __clk_lookup. 1164 */ 1165 for (i = 0; i < clk->num_parents; i++) { 1166 if (clk->parents[i] == parent) 1167 return i; 1168 1169 if (clk->parents[i]) 1170 continue; 1171 1172 if (!strcmp(clk->parent_names[i], parent->name)) { 1173 clk->parents[i] = __clk_lookup(parent->name); 1174 return i; 1175 } 1176 } 1177 1178 return -EINVAL; 1179 } 1180 1181 static void clk_reparent(struct clk *clk, struct clk *new_parent) 1182 { 1183 hlist_del(&clk->child_node); 1184 1185 if (new_parent) { 1186 /* avoid duplicate POST_RATE_CHANGE notifications */ 1187 if (new_parent->new_child == clk) 1188 new_parent->new_child = NULL; 1189 1190 hlist_add_head(&clk->child_node, &new_parent->children); 1191 } else { 1192 hlist_add_head(&clk->child_node, &clk_orphan_list); 1193 } 1194 1195 clk->parent = new_parent; 1196 } 1197 1198 static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent) 1199 { 1200 unsigned long flags; 1201 struct clk *old_parent = clk->parent; 1202 1203 /* 1204 * Migrate prepare state between parents and prevent race with 1205 * clk_enable(). 1206 * 1207 * If the clock is not prepared, then a race with 1208 * clk_enable/disable() is impossible since we already have the 1209 * prepare lock (future calls to clk_enable() need to be preceded by 1210 * a clk_prepare()). 1211 * 1212 * If the clock is prepared, migrate the prepared state to the new 1213 * parent and also protect against a race with clk_enable() by 1214 * forcing the clock and the new parent on. This ensures that all 1215 * future calls to clk_enable() are practically NOPs with respect to 1216 * hardware and software states. 1217 * 1218 * See also: Comment for clk_set_parent() below. 1219 */ 1220 if (clk->prepare_count) { 1221 __clk_prepare(parent); 1222 clk_enable(parent); 1223 clk_enable(clk); 1224 } 1225 1226 /* update the clk tree topology */ 1227 flags = clk_enable_lock(); 1228 clk_reparent(clk, parent); 1229 clk_enable_unlock(flags); 1230 1231 return old_parent; 1232 } 1233 1234 static void __clk_set_parent_after(struct clk *clk, struct clk *parent, 1235 struct clk *old_parent) 1236 { 1237 /* 1238 * Finish the migration of prepare state and undo the changes done 1239 * for preventing a race with clk_enable(). 1240 */ 1241 if (clk->prepare_count) { 1242 clk_disable(clk); 1243 clk_disable(old_parent); 1244 __clk_unprepare(old_parent); 1245 } 1246 } 1247 1248 static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index) 1249 { 1250 unsigned long flags; 1251 int ret = 0; 1252 struct clk *old_parent; 1253 1254 old_parent = __clk_set_parent_before(clk, parent); 1255 1256 /* change clock input source */ 1257 if (parent && clk->ops->set_parent) 1258 ret = clk->ops->set_parent(clk->hw, p_index); 1259 1260 if (ret) { 1261 flags = clk_enable_lock(); 1262 clk_reparent(clk, old_parent); 1263 clk_enable_unlock(flags); 1264 1265 if (clk->prepare_count) { 1266 clk_disable(clk); 1267 clk_disable(parent); 1268 __clk_unprepare(parent); 1269 } 1270 return ret; 1271 } 1272 1273 __clk_set_parent_after(clk, parent, old_parent); 1274 1275 return 0; 1276 } 1277 1278 /** 1279 * __clk_speculate_rates 1280 * @clk: first clk in the subtree 1281 * @parent_rate: the "future" rate of clk's parent 1282 * 1283 * Walks the subtree of clks starting with clk, speculating rates as it 1284 * goes and firing off PRE_RATE_CHANGE notifications as necessary. 1285 * 1286 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending 1287 * pre-rate change notifications and returns early if no clks in the 1288 * subtree have subscribed to the notifications. Note that if a clk does not 1289 * implement the .recalc_rate callback then it is assumed that the clock will 1290 * take on the rate of its parent. 1291 * 1292 * Caller must hold prepare_lock. 1293 */ 1294 static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate) 1295 { 1296 struct clk *child; 1297 unsigned long new_rate; 1298 int ret = NOTIFY_DONE; 1299 1300 new_rate = clk_recalc(clk, parent_rate); 1301 1302 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ 1303 if (clk->notifier_count) 1304 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate); 1305 1306 if (ret & NOTIFY_STOP_MASK) { 1307 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", 1308 __func__, clk->name, ret); 1309 goto out; 1310 } 1311 1312 hlist_for_each_entry(child, &clk->children, child_node) { 1313 ret = __clk_speculate_rates(child, new_rate); 1314 if (ret & NOTIFY_STOP_MASK) 1315 break; 1316 } 1317 1318 out: 1319 return ret; 1320 } 1321 1322 static void clk_calc_subtree(struct clk *clk, unsigned long new_rate, 1323 struct clk *new_parent, u8 p_index) 1324 { 1325 struct clk *child; 1326 1327 clk->new_rate = new_rate; 1328 clk->new_parent = new_parent; 1329 clk->new_parent_index = p_index; 1330 /* include clk in new parent's PRE_RATE_CHANGE notifications */ 1331 clk->new_child = NULL; 1332 if (new_parent && new_parent != clk->parent) 1333 new_parent->new_child = clk; 1334 1335 hlist_for_each_entry(child, &clk->children, child_node) { 1336 child->new_rate = clk_recalc(child, new_rate); 1337 clk_calc_subtree(child, child->new_rate, NULL, 0); 1338 } 1339 } 1340 1341 /* 1342 * calculate the new rates returning the topmost clock that has to be 1343 * changed. 1344 */ 1345 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate) 1346 { 1347 struct clk *top = clk; 1348 struct clk *old_parent, *parent; 1349 struct clk_hw *parent_hw; 1350 unsigned long best_parent_rate = 0; 1351 unsigned long new_rate; 1352 int p_index = 0; 1353 1354 /* sanity */ 1355 if (IS_ERR_OR_NULL(clk)) 1356 return NULL; 1357 1358 /* save parent rate, if it exists */ 1359 parent = old_parent = clk->parent; 1360 if (parent) 1361 best_parent_rate = parent->rate; 1362 1363 /* find the closest rate and parent clk/rate */ 1364 if (clk->ops->determine_rate) { 1365 parent_hw = parent ? parent->hw : NULL; 1366 new_rate = clk->ops->determine_rate(clk->hw, rate, 1367 &best_parent_rate, 1368 &parent_hw); 1369 parent = parent_hw ? parent_hw->clk : NULL; 1370 } else if (clk->ops->round_rate) { 1371 new_rate = clk->ops->round_rate(clk->hw, rate, 1372 &best_parent_rate); 1373 } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) { 1374 /* pass-through clock without adjustable parent */ 1375 clk->new_rate = clk->rate; 1376 return NULL; 1377 } else { 1378 /* pass-through clock with adjustable parent */ 1379 top = clk_calc_new_rates(parent, rate); 1380 new_rate = parent->new_rate; 1381 goto out; 1382 } 1383 1384 /* some clocks must be gated to change parent */ 1385 if (parent != old_parent && 1386 (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) { 1387 pr_debug("%s: %s not gated but wants to reparent\n", 1388 __func__, clk->name); 1389 return NULL; 1390 } 1391 1392 /* try finding the new parent index */ 1393 if (parent) { 1394 p_index = clk_fetch_parent_index(clk, parent); 1395 if (p_index < 0) { 1396 pr_debug("%s: clk %s can not be parent of clk %s\n", 1397 __func__, parent->name, clk->name); 1398 return NULL; 1399 } 1400 } 1401 1402 if ((clk->flags & CLK_SET_RATE_PARENT) && parent && 1403 best_parent_rate != parent->rate) 1404 top = clk_calc_new_rates(parent, best_parent_rate); 1405 1406 out: 1407 clk_calc_subtree(clk, new_rate, parent, p_index); 1408 1409 return top; 1410 } 1411 1412 /* 1413 * Notify about rate changes in a subtree. Always walk down the whole tree 1414 * so that in case of an error we can walk down the whole tree again and 1415 * abort the change. 1416 */ 1417 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event) 1418 { 1419 struct clk *child, *tmp_clk, *fail_clk = NULL; 1420 int ret = NOTIFY_DONE; 1421 1422 if (clk->rate == clk->new_rate) 1423 return NULL; 1424 1425 if (clk->notifier_count) { 1426 ret = __clk_notify(clk, event, clk->rate, clk->new_rate); 1427 if (ret & NOTIFY_STOP_MASK) 1428 fail_clk = clk; 1429 } 1430 1431 hlist_for_each_entry(child, &clk->children, child_node) { 1432 /* Skip children who will be reparented to another clock */ 1433 if (child->new_parent && child->new_parent != clk) 1434 continue; 1435 tmp_clk = clk_propagate_rate_change(child, event); 1436 if (tmp_clk) 1437 fail_clk = tmp_clk; 1438 } 1439 1440 /* handle the new child who might not be in clk->children yet */ 1441 if (clk->new_child) { 1442 tmp_clk = clk_propagate_rate_change(clk->new_child, event); 1443 if (tmp_clk) 1444 fail_clk = tmp_clk; 1445 } 1446 1447 return fail_clk; 1448 } 1449 1450 /* 1451 * walk down a subtree and set the new rates notifying the rate 1452 * change on the way 1453 */ 1454 static void clk_change_rate(struct clk *clk) 1455 { 1456 struct clk *child; 1457 struct hlist_node *tmp; 1458 unsigned long old_rate; 1459 unsigned long best_parent_rate = 0; 1460 bool skip_set_rate = false; 1461 struct clk *old_parent; 1462 1463 old_rate = clk->rate; 1464 1465 if (clk->new_parent) 1466 best_parent_rate = clk->new_parent->rate; 1467 else if (clk->parent) 1468 best_parent_rate = clk->parent->rate; 1469 1470 if (clk->new_parent && clk->new_parent != clk->parent) { 1471 old_parent = __clk_set_parent_before(clk, clk->new_parent); 1472 1473 if (clk->ops->set_rate_and_parent) { 1474 skip_set_rate = true; 1475 clk->ops->set_rate_and_parent(clk->hw, clk->new_rate, 1476 best_parent_rate, 1477 clk->new_parent_index); 1478 } else if (clk->ops->set_parent) { 1479 clk->ops->set_parent(clk->hw, clk->new_parent_index); 1480 } 1481 1482 __clk_set_parent_after(clk, clk->new_parent, old_parent); 1483 } 1484 1485 if (!skip_set_rate && clk->ops->set_rate) 1486 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate); 1487 1488 clk->rate = clk_recalc(clk, best_parent_rate); 1489 1490 if (clk->notifier_count && old_rate != clk->rate) 1491 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); 1492 1493 /* 1494 * Use safe iteration, as change_rate can actually swap parents 1495 * for certain clock types. 1496 */ 1497 hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) { 1498 /* Skip children who will be reparented to another clock */ 1499 if (child->new_parent && child->new_parent != clk) 1500 continue; 1501 clk_change_rate(child); 1502 } 1503 1504 /* handle the new child who might not be in clk->children yet */ 1505 if (clk->new_child) 1506 clk_change_rate(clk->new_child); 1507 } 1508 1509 /** 1510 * clk_set_rate - specify a new rate for clk 1511 * @clk: the clk whose rate is being changed 1512 * @rate: the new rate for clk 1513 * 1514 * In the simplest case clk_set_rate will only adjust the rate of clk. 1515 * 1516 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to 1517 * propagate up to clk's parent; whether or not this happens depends on the 1518 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged 1519 * after calling .round_rate then upstream parent propagation is ignored. If 1520 * *parent_rate comes back with a new rate for clk's parent then we propagate 1521 * up to clk's parent and set its rate. Upward propagation will continue 1522 * until either a clk does not support the CLK_SET_RATE_PARENT flag or 1523 * .round_rate stops requesting changes to clk's parent_rate. 1524 * 1525 * Rate changes are accomplished via tree traversal that also recalculates the 1526 * rates for the clocks and fires off POST_RATE_CHANGE notifiers. 1527 * 1528 * Returns 0 on success, -EERROR otherwise. 1529 */ 1530 int clk_set_rate(struct clk *clk, unsigned long rate) 1531 { 1532 struct clk *top, *fail_clk; 1533 int ret = 0; 1534 1535 if (!clk) 1536 return 0; 1537 1538 /* prevent racing with updates to the clock topology */ 1539 clk_prepare_lock(); 1540 1541 /* bail early if nothing to do */ 1542 if (rate == clk_get_rate(clk)) 1543 goto out; 1544 1545 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) { 1546 ret = -EBUSY; 1547 goto out; 1548 } 1549 1550 /* calculate new rates and get the topmost changed clock */ 1551 top = clk_calc_new_rates(clk, rate); 1552 if (!top) { 1553 ret = -EINVAL; 1554 goto out; 1555 } 1556 1557 /* notify that we are about to change rates */ 1558 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 1559 if (fail_clk) { 1560 pr_debug("%s: failed to set %s rate\n", __func__, 1561 fail_clk->name); 1562 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 1563 ret = -EBUSY; 1564 goto out; 1565 } 1566 1567 /* change the rates */ 1568 clk_change_rate(top); 1569 1570 out: 1571 clk_prepare_unlock(); 1572 1573 return ret; 1574 } 1575 EXPORT_SYMBOL_GPL(clk_set_rate); 1576 1577 /** 1578 * clk_get_parent - return the parent of a clk 1579 * @clk: the clk whose parent gets returned 1580 * 1581 * Simply returns clk->parent. Returns NULL if clk is NULL. 1582 */ 1583 struct clk *clk_get_parent(struct clk *clk) 1584 { 1585 struct clk *parent; 1586 1587 clk_prepare_lock(); 1588 parent = __clk_get_parent(clk); 1589 clk_prepare_unlock(); 1590 1591 return parent; 1592 } 1593 EXPORT_SYMBOL_GPL(clk_get_parent); 1594 1595 /* 1596 * .get_parent is mandatory for clocks with multiple possible parents. It is 1597 * optional for single-parent clocks. Always call .get_parent if it is 1598 * available and WARN if it is missing for multi-parent clocks. 1599 * 1600 * For single-parent clocks without .get_parent, first check to see if the 1601 * .parents array exists, and if so use it to avoid an expensive tree 1602 * traversal. If .parents does not exist then walk the tree with __clk_lookup. 1603 */ 1604 static struct clk *__clk_init_parent(struct clk *clk) 1605 { 1606 struct clk *ret = NULL; 1607 u8 index; 1608 1609 /* handle the trivial cases */ 1610 1611 if (!clk->num_parents) 1612 goto out; 1613 1614 if (clk->num_parents == 1) { 1615 if (IS_ERR_OR_NULL(clk->parent)) 1616 clk->parent = __clk_lookup(clk->parent_names[0]); 1617 ret = clk->parent; 1618 goto out; 1619 } 1620 1621 if (!clk->ops->get_parent) { 1622 WARN(!clk->ops->get_parent, 1623 "%s: multi-parent clocks must implement .get_parent\n", 1624 __func__); 1625 goto out; 1626 }; 1627 1628 /* 1629 * Do our best to cache parent clocks in clk->parents. This prevents 1630 * unnecessary and expensive calls to __clk_lookup. We don't set 1631 * clk->parent here; that is done by the calling function 1632 */ 1633 1634 index = clk->ops->get_parent(clk->hw); 1635 1636 if (!clk->parents) 1637 clk->parents = 1638 kcalloc(clk->num_parents, sizeof(struct clk *), 1639 GFP_KERNEL); 1640 1641 ret = clk_get_parent_by_index(clk, index); 1642 1643 out: 1644 return ret; 1645 } 1646 1647 void __clk_reparent(struct clk *clk, struct clk *new_parent) 1648 { 1649 clk_reparent(clk, new_parent); 1650 __clk_recalc_accuracies(clk); 1651 __clk_recalc_rates(clk, POST_RATE_CHANGE); 1652 } 1653 1654 /** 1655 * clk_set_parent - switch the parent of a mux clk 1656 * @clk: the mux clk whose input we are switching 1657 * @parent: the new input to clk 1658 * 1659 * Re-parent clk to use parent as its new input source. If clk is in 1660 * prepared state, the clk will get enabled for the duration of this call. If 1661 * that's not acceptable for a specific clk (Eg: the consumer can't handle 1662 * that, the reparenting is glitchy in hardware, etc), use the 1663 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. 1664 * 1665 * After successfully changing clk's parent clk_set_parent will update the 1666 * clk topology, sysfs topology and propagate rate recalculation via 1667 * __clk_recalc_rates. 1668 * 1669 * Returns 0 on success, -EERROR otherwise. 1670 */ 1671 int clk_set_parent(struct clk *clk, struct clk *parent) 1672 { 1673 int ret = 0; 1674 int p_index = 0; 1675 unsigned long p_rate = 0; 1676 1677 if (!clk) 1678 return 0; 1679 1680 /* verify ops for for multi-parent clks */ 1681 if ((clk->num_parents > 1) && (!clk->ops->set_parent)) 1682 return -ENOSYS; 1683 1684 /* prevent racing with updates to the clock topology */ 1685 clk_prepare_lock(); 1686 1687 if (clk->parent == parent) 1688 goto out; 1689 1690 /* check that we are allowed to re-parent if the clock is in use */ 1691 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) { 1692 ret = -EBUSY; 1693 goto out; 1694 } 1695 1696 /* try finding the new parent index */ 1697 if (parent) { 1698 p_index = clk_fetch_parent_index(clk, parent); 1699 p_rate = parent->rate; 1700 if (p_index < 0) { 1701 pr_debug("%s: clk %s can not be parent of clk %s\n", 1702 __func__, parent->name, clk->name); 1703 ret = p_index; 1704 goto out; 1705 } 1706 } 1707 1708 /* propagate PRE_RATE_CHANGE notifications */ 1709 ret = __clk_speculate_rates(clk, p_rate); 1710 1711 /* abort if a driver objects */ 1712 if (ret & NOTIFY_STOP_MASK) 1713 goto out; 1714 1715 /* do the re-parent */ 1716 ret = __clk_set_parent(clk, parent, p_index); 1717 1718 /* propagate rate an accuracy recalculation accordingly */ 1719 if (ret) { 1720 __clk_recalc_rates(clk, ABORT_RATE_CHANGE); 1721 } else { 1722 __clk_recalc_rates(clk, POST_RATE_CHANGE); 1723 __clk_recalc_accuracies(clk); 1724 } 1725 1726 out: 1727 clk_prepare_unlock(); 1728 1729 return ret; 1730 } 1731 EXPORT_SYMBOL_GPL(clk_set_parent); 1732 1733 /** 1734 * clk_set_phase - adjust the phase shift of a clock signal 1735 * @clk: clock signal source 1736 * @degrees: number of degrees the signal is shifted 1737 * 1738 * Shifts the phase of a clock signal by the specified 1739 * degrees. Returns 0 on success, -EERROR otherwise. 1740 * 1741 * This function makes no distinction about the input or reference 1742 * signal that we adjust the clock signal phase against. For example 1743 * phase locked-loop clock signal generators we may shift phase with 1744 * respect to feedback clock signal input, but for other cases the 1745 * clock phase may be shifted with respect to some other, unspecified 1746 * signal. 1747 * 1748 * Additionally the concept of phase shift does not propagate through 1749 * the clock tree hierarchy, which sets it apart from clock rates and 1750 * clock accuracy. A parent clock phase attribute does not have an 1751 * impact on the phase attribute of a child clock. 1752 */ 1753 int clk_set_phase(struct clk *clk, int degrees) 1754 { 1755 int ret = 0; 1756 1757 if (!clk) 1758 goto out; 1759 1760 /* sanity check degrees */ 1761 degrees %= 360; 1762 if (degrees < 0) 1763 degrees += 360; 1764 1765 clk_prepare_lock(); 1766 1767 if (!clk->ops->set_phase) 1768 goto out_unlock; 1769 1770 ret = clk->ops->set_phase(clk->hw, degrees); 1771 1772 if (!ret) 1773 clk->phase = degrees; 1774 1775 out_unlock: 1776 clk_prepare_unlock(); 1777 1778 out: 1779 return ret; 1780 } 1781 1782 /** 1783 * clk_get_phase - return the phase shift of a clock signal 1784 * @clk: clock signal source 1785 * 1786 * Returns the phase shift of a clock node in degrees, otherwise returns 1787 * -EERROR. 1788 */ 1789 int clk_get_phase(struct clk *clk) 1790 { 1791 int ret = 0; 1792 1793 if (!clk) 1794 goto out; 1795 1796 clk_prepare_lock(); 1797 ret = clk->phase; 1798 clk_prepare_unlock(); 1799 1800 out: 1801 return ret; 1802 } 1803 1804 /** 1805 * __clk_init - initialize the data structures in a struct clk 1806 * @dev: device initializing this clk, placeholder for now 1807 * @clk: clk being initialized 1808 * 1809 * Initializes the lists in struct clk, queries the hardware for the 1810 * parent and rate and sets them both. 1811 */ 1812 int __clk_init(struct device *dev, struct clk *clk) 1813 { 1814 int i, ret = 0; 1815 struct clk *orphan; 1816 struct hlist_node *tmp2; 1817 1818 if (!clk) 1819 return -EINVAL; 1820 1821 clk_prepare_lock(); 1822 1823 /* check to see if a clock with this name is already registered */ 1824 if (__clk_lookup(clk->name)) { 1825 pr_debug("%s: clk %s already initialized\n", 1826 __func__, clk->name); 1827 ret = -EEXIST; 1828 goto out; 1829 } 1830 1831 /* check that clk_ops are sane. See Documentation/clk.txt */ 1832 if (clk->ops->set_rate && 1833 !((clk->ops->round_rate || clk->ops->determine_rate) && 1834 clk->ops->recalc_rate)) { 1835 pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", 1836 __func__, clk->name); 1837 ret = -EINVAL; 1838 goto out; 1839 } 1840 1841 if (clk->ops->set_parent && !clk->ops->get_parent) { 1842 pr_warning("%s: %s must implement .get_parent & .set_parent\n", 1843 __func__, clk->name); 1844 ret = -EINVAL; 1845 goto out; 1846 } 1847 1848 if (clk->ops->set_rate_and_parent && 1849 !(clk->ops->set_parent && clk->ops->set_rate)) { 1850 pr_warn("%s: %s must implement .set_parent & .set_rate\n", 1851 __func__, clk->name); 1852 ret = -EINVAL; 1853 goto out; 1854 } 1855 1856 /* throw a WARN if any entries in parent_names are NULL */ 1857 for (i = 0; i < clk->num_parents; i++) 1858 WARN(!clk->parent_names[i], 1859 "%s: invalid NULL in %s's .parent_names\n", 1860 __func__, clk->name); 1861 1862 /* 1863 * Allocate an array of struct clk *'s to avoid unnecessary string 1864 * look-ups of clk's possible parents. This can fail for clocks passed 1865 * in to clk_init during early boot; thus any access to clk->parents[] 1866 * must always check for a NULL pointer and try to populate it if 1867 * necessary. 1868 * 1869 * If clk->parents is not NULL we skip this entire block. This allows 1870 * for clock drivers to statically initialize clk->parents. 1871 */ 1872 if (clk->num_parents > 1 && !clk->parents) { 1873 clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *), 1874 GFP_KERNEL); 1875 /* 1876 * __clk_lookup returns NULL for parents that have not been 1877 * clk_init'd; thus any access to clk->parents[] must check 1878 * for a NULL pointer. We can always perform lazy lookups for 1879 * missing parents later on. 1880 */ 1881 if (clk->parents) 1882 for (i = 0; i < clk->num_parents; i++) 1883 clk->parents[i] = 1884 __clk_lookup(clk->parent_names[i]); 1885 } 1886 1887 clk->parent = __clk_init_parent(clk); 1888 1889 /* 1890 * Populate clk->parent if parent has already been __clk_init'd. If 1891 * parent has not yet been __clk_init'd then place clk in the orphan 1892 * list. If clk has set the CLK_IS_ROOT flag then place it in the root 1893 * clk list. 1894 * 1895 * Every time a new clk is clk_init'd then we walk the list of orphan 1896 * clocks and re-parent any that are children of the clock currently 1897 * being clk_init'd. 1898 */ 1899 if (clk->parent) 1900 hlist_add_head(&clk->child_node, 1901 &clk->parent->children); 1902 else if (clk->flags & CLK_IS_ROOT) 1903 hlist_add_head(&clk->child_node, &clk_root_list); 1904 else 1905 hlist_add_head(&clk->child_node, &clk_orphan_list); 1906 1907 /* 1908 * Set clk's accuracy. The preferred method is to use 1909 * .recalc_accuracy. For simple clocks and lazy developers the default 1910 * fallback is to use the parent's accuracy. If a clock doesn't have a 1911 * parent (or is orphaned) then accuracy is set to zero (perfect 1912 * clock). 1913 */ 1914 if (clk->ops->recalc_accuracy) 1915 clk->accuracy = clk->ops->recalc_accuracy(clk->hw, 1916 __clk_get_accuracy(clk->parent)); 1917 else if (clk->parent) 1918 clk->accuracy = clk->parent->accuracy; 1919 else 1920 clk->accuracy = 0; 1921 1922 /* 1923 * Set clk's phase. 1924 * Since a phase is by definition relative to its parent, just 1925 * query the current clock phase, or just assume it's in phase. 1926 */ 1927 if (clk->ops->get_phase) 1928 clk->phase = clk->ops->get_phase(clk->hw); 1929 else 1930 clk->phase = 0; 1931 1932 /* 1933 * Set clk's rate. The preferred method is to use .recalc_rate. For 1934 * simple clocks and lazy developers the default fallback is to use the 1935 * parent's rate. If a clock doesn't have a parent (or is orphaned) 1936 * then rate is set to zero. 1937 */ 1938 if (clk->ops->recalc_rate) 1939 clk->rate = clk->ops->recalc_rate(clk->hw, 1940 __clk_get_rate(clk->parent)); 1941 else if (clk->parent) 1942 clk->rate = clk->parent->rate; 1943 else 1944 clk->rate = 0; 1945 1946 /* 1947 * walk the list of orphan clocks and reparent any that are children of 1948 * this clock 1949 */ 1950 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 1951 if (orphan->num_parents && orphan->ops->get_parent) { 1952 i = orphan->ops->get_parent(orphan->hw); 1953 if (!strcmp(clk->name, orphan->parent_names[i])) 1954 __clk_reparent(orphan, clk); 1955 continue; 1956 } 1957 1958 for (i = 0; i < orphan->num_parents; i++) 1959 if (!strcmp(clk->name, orphan->parent_names[i])) { 1960 __clk_reparent(orphan, clk); 1961 break; 1962 } 1963 } 1964 1965 /* 1966 * optional platform-specific magic 1967 * 1968 * The .init callback is not used by any of the basic clock types, but 1969 * exists for weird hardware that must perform initialization magic. 1970 * Please consider other ways of solving initialization problems before 1971 * using this callback, as its use is discouraged. 1972 */ 1973 if (clk->ops->init) 1974 clk->ops->init(clk->hw); 1975 1976 kref_init(&clk->ref); 1977 out: 1978 clk_prepare_unlock(); 1979 1980 if (!ret) 1981 clk_debug_register(clk); 1982 1983 return ret; 1984 } 1985 1986 /** 1987 * __clk_register - register a clock and return a cookie. 1988 * 1989 * Same as clk_register, except that the .clk field inside hw shall point to a 1990 * preallocated (generally statically allocated) struct clk. None of the fields 1991 * of the struct clk need to be initialized. 1992 * 1993 * The data pointed to by .init and .clk field shall NOT be marked as init 1994 * data. 1995 * 1996 * __clk_register is only exposed via clk-private.h and is intended for use with 1997 * very large numbers of clocks that need to be statically initialized. It is 1998 * a layering violation to include clk-private.h from any code which implements 1999 * a clock's .ops; as such any statically initialized clock data MUST be in a 2000 * separate C file from the logic that implements its operations. Returns 0 2001 * on success, otherwise an error code. 2002 */ 2003 struct clk *__clk_register(struct device *dev, struct clk_hw *hw) 2004 { 2005 int ret; 2006 struct clk *clk; 2007 2008 clk = hw->clk; 2009 clk->name = hw->init->name; 2010 clk->ops = hw->init->ops; 2011 clk->hw = hw; 2012 clk->flags = hw->init->flags; 2013 clk->parent_names = hw->init->parent_names; 2014 clk->num_parents = hw->init->num_parents; 2015 if (dev && dev->driver) 2016 clk->owner = dev->driver->owner; 2017 else 2018 clk->owner = NULL; 2019 2020 ret = __clk_init(dev, clk); 2021 if (ret) 2022 return ERR_PTR(ret); 2023 2024 return clk; 2025 } 2026 EXPORT_SYMBOL_GPL(__clk_register); 2027 2028 /** 2029 * clk_register - allocate a new clock, register it and return an opaque cookie 2030 * @dev: device that is registering this clock 2031 * @hw: link to hardware-specific clock data 2032 * 2033 * clk_register is the primary interface for populating the clock tree with new 2034 * clock nodes. It returns a pointer to the newly allocated struct clk which 2035 * cannot be dereferenced by driver code but may be used in conjuction with the 2036 * rest of the clock API. In the event of an error clk_register will return an 2037 * error code; drivers must test for an error code after calling clk_register. 2038 */ 2039 struct clk *clk_register(struct device *dev, struct clk_hw *hw) 2040 { 2041 int i, ret; 2042 struct clk *clk; 2043 2044 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 2045 if (!clk) { 2046 pr_err("%s: could not allocate clk\n", __func__); 2047 ret = -ENOMEM; 2048 goto fail_out; 2049 } 2050 2051 clk->name = kstrdup(hw->init->name, GFP_KERNEL); 2052 if (!clk->name) { 2053 pr_err("%s: could not allocate clk->name\n", __func__); 2054 ret = -ENOMEM; 2055 goto fail_name; 2056 } 2057 clk->ops = hw->init->ops; 2058 if (dev && dev->driver) 2059 clk->owner = dev->driver->owner; 2060 clk->hw = hw; 2061 clk->flags = hw->init->flags; 2062 clk->num_parents = hw->init->num_parents; 2063 hw->clk = clk; 2064 2065 /* allocate local copy in case parent_names is __initdata */ 2066 clk->parent_names = kcalloc(clk->num_parents, sizeof(char *), 2067 GFP_KERNEL); 2068 2069 if (!clk->parent_names) { 2070 pr_err("%s: could not allocate clk->parent_names\n", __func__); 2071 ret = -ENOMEM; 2072 goto fail_parent_names; 2073 } 2074 2075 2076 /* copy each string name in case parent_names is __initdata */ 2077 for (i = 0; i < clk->num_parents; i++) { 2078 clk->parent_names[i] = kstrdup(hw->init->parent_names[i], 2079 GFP_KERNEL); 2080 if (!clk->parent_names[i]) { 2081 pr_err("%s: could not copy parent_names\n", __func__); 2082 ret = -ENOMEM; 2083 goto fail_parent_names_copy; 2084 } 2085 } 2086 2087 ret = __clk_init(dev, clk); 2088 if (!ret) 2089 return clk; 2090 2091 fail_parent_names_copy: 2092 while (--i >= 0) 2093 kfree(clk->parent_names[i]); 2094 kfree(clk->parent_names); 2095 fail_parent_names: 2096 kfree(clk->name); 2097 fail_name: 2098 kfree(clk); 2099 fail_out: 2100 return ERR_PTR(ret); 2101 } 2102 EXPORT_SYMBOL_GPL(clk_register); 2103 2104 /* 2105 * Free memory allocated for a clock. 2106 * Caller must hold prepare_lock. 2107 */ 2108 static void __clk_release(struct kref *ref) 2109 { 2110 struct clk *clk = container_of(ref, struct clk, ref); 2111 int i = clk->num_parents; 2112 2113 kfree(clk->parents); 2114 while (--i >= 0) 2115 kfree(clk->parent_names[i]); 2116 2117 kfree(clk->parent_names); 2118 kfree(clk->name); 2119 kfree(clk); 2120 } 2121 2122 /* 2123 * Empty clk_ops for unregistered clocks. These are used temporarily 2124 * after clk_unregister() was called on a clock and until last clock 2125 * consumer calls clk_put() and the struct clk object is freed. 2126 */ 2127 static int clk_nodrv_prepare_enable(struct clk_hw *hw) 2128 { 2129 return -ENXIO; 2130 } 2131 2132 static void clk_nodrv_disable_unprepare(struct clk_hw *hw) 2133 { 2134 WARN_ON_ONCE(1); 2135 } 2136 2137 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, 2138 unsigned long parent_rate) 2139 { 2140 return -ENXIO; 2141 } 2142 2143 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) 2144 { 2145 return -ENXIO; 2146 } 2147 2148 static const struct clk_ops clk_nodrv_ops = { 2149 .enable = clk_nodrv_prepare_enable, 2150 .disable = clk_nodrv_disable_unprepare, 2151 .prepare = clk_nodrv_prepare_enable, 2152 .unprepare = clk_nodrv_disable_unprepare, 2153 .set_rate = clk_nodrv_set_rate, 2154 .set_parent = clk_nodrv_set_parent, 2155 }; 2156 2157 /** 2158 * clk_unregister - unregister a currently registered clock 2159 * @clk: clock to unregister 2160 */ 2161 void clk_unregister(struct clk *clk) 2162 { 2163 unsigned long flags; 2164 2165 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 2166 return; 2167 2168 clk_debug_unregister(clk); 2169 2170 clk_prepare_lock(); 2171 2172 if (clk->ops == &clk_nodrv_ops) { 2173 pr_err("%s: unregistered clock: %s\n", __func__, clk->name); 2174 return; 2175 } 2176 /* 2177 * Assign empty clock ops for consumers that might still hold 2178 * a reference to this clock. 2179 */ 2180 flags = clk_enable_lock(); 2181 clk->ops = &clk_nodrv_ops; 2182 clk_enable_unlock(flags); 2183 2184 if (!hlist_empty(&clk->children)) { 2185 struct clk *child; 2186 struct hlist_node *t; 2187 2188 /* Reparent all children to the orphan list. */ 2189 hlist_for_each_entry_safe(child, t, &clk->children, child_node) 2190 clk_set_parent(child, NULL); 2191 } 2192 2193 hlist_del_init(&clk->child_node); 2194 2195 if (clk->prepare_count) 2196 pr_warn("%s: unregistering prepared clock: %s\n", 2197 __func__, clk->name); 2198 kref_put(&clk->ref, __clk_release); 2199 2200 clk_prepare_unlock(); 2201 } 2202 EXPORT_SYMBOL_GPL(clk_unregister); 2203 2204 static void devm_clk_release(struct device *dev, void *res) 2205 { 2206 clk_unregister(*(struct clk **)res); 2207 } 2208 2209 /** 2210 * devm_clk_register - resource managed clk_register() 2211 * @dev: device that is registering this clock 2212 * @hw: link to hardware-specific clock data 2213 * 2214 * Managed clk_register(). Clocks returned from this function are 2215 * automatically clk_unregister()ed on driver detach. See clk_register() for 2216 * more information. 2217 */ 2218 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 2219 { 2220 struct clk *clk; 2221 struct clk **clkp; 2222 2223 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); 2224 if (!clkp) 2225 return ERR_PTR(-ENOMEM); 2226 2227 clk = clk_register(dev, hw); 2228 if (!IS_ERR(clk)) { 2229 *clkp = clk; 2230 devres_add(dev, clkp); 2231 } else { 2232 devres_free(clkp); 2233 } 2234 2235 return clk; 2236 } 2237 EXPORT_SYMBOL_GPL(devm_clk_register); 2238 2239 static int devm_clk_match(struct device *dev, void *res, void *data) 2240 { 2241 struct clk *c = res; 2242 if (WARN_ON(!c)) 2243 return 0; 2244 return c == data; 2245 } 2246 2247 /** 2248 * devm_clk_unregister - resource managed clk_unregister() 2249 * @clk: clock to unregister 2250 * 2251 * Deallocate a clock allocated with devm_clk_register(). Normally 2252 * this function will not need to be called and the resource management 2253 * code will ensure that the resource is freed. 2254 */ 2255 void devm_clk_unregister(struct device *dev, struct clk *clk) 2256 { 2257 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk)); 2258 } 2259 EXPORT_SYMBOL_GPL(devm_clk_unregister); 2260 2261 /* 2262 * clkdev helpers 2263 */ 2264 int __clk_get(struct clk *clk) 2265 { 2266 if (clk) { 2267 if (!try_module_get(clk->owner)) 2268 return 0; 2269 2270 kref_get(&clk->ref); 2271 } 2272 return 1; 2273 } 2274 2275 void __clk_put(struct clk *clk) 2276 { 2277 struct module *owner; 2278 2279 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 2280 return; 2281 2282 clk_prepare_lock(); 2283 owner = clk->owner; 2284 kref_put(&clk->ref, __clk_release); 2285 clk_prepare_unlock(); 2286 2287 module_put(owner); 2288 } 2289 2290 /*** clk rate change notifiers ***/ 2291 2292 /** 2293 * clk_notifier_register - add a clk rate change notifier 2294 * @clk: struct clk * to watch 2295 * @nb: struct notifier_block * with callback info 2296 * 2297 * Request notification when clk's rate changes. This uses an SRCU 2298 * notifier because we want it to block and notifier unregistrations are 2299 * uncommon. The callbacks associated with the notifier must not 2300 * re-enter into the clk framework by calling any top-level clk APIs; 2301 * this will cause a nested prepare_lock mutex. 2302 * 2303 * In all notification cases cases (pre, post and abort rate change) the 2304 * original clock rate is passed to the callback via struct 2305 * clk_notifier_data.old_rate and the new frequency is passed via struct 2306 * clk_notifier_data.new_rate. 2307 * 2308 * clk_notifier_register() must be called from non-atomic context. 2309 * Returns -EINVAL if called with null arguments, -ENOMEM upon 2310 * allocation failure; otherwise, passes along the return value of 2311 * srcu_notifier_chain_register(). 2312 */ 2313 int clk_notifier_register(struct clk *clk, struct notifier_block *nb) 2314 { 2315 struct clk_notifier *cn; 2316 int ret = -ENOMEM; 2317 2318 if (!clk || !nb) 2319 return -EINVAL; 2320 2321 clk_prepare_lock(); 2322 2323 /* search the list of notifiers for this clk */ 2324 list_for_each_entry(cn, &clk_notifier_list, node) 2325 if (cn->clk == clk) 2326 break; 2327 2328 /* if clk wasn't in the notifier list, allocate new clk_notifier */ 2329 if (cn->clk != clk) { 2330 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL); 2331 if (!cn) 2332 goto out; 2333 2334 cn->clk = clk; 2335 srcu_init_notifier_head(&cn->notifier_head); 2336 2337 list_add(&cn->node, &clk_notifier_list); 2338 } 2339 2340 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 2341 2342 clk->notifier_count++; 2343 2344 out: 2345 clk_prepare_unlock(); 2346 2347 return ret; 2348 } 2349 EXPORT_SYMBOL_GPL(clk_notifier_register); 2350 2351 /** 2352 * clk_notifier_unregister - remove a clk rate change notifier 2353 * @clk: struct clk * 2354 * @nb: struct notifier_block * with callback info 2355 * 2356 * Request no further notification for changes to 'clk' and frees memory 2357 * allocated in clk_notifier_register. 2358 * 2359 * Returns -EINVAL if called with null arguments; otherwise, passes 2360 * along the return value of srcu_notifier_chain_unregister(). 2361 */ 2362 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) 2363 { 2364 struct clk_notifier *cn = NULL; 2365 int ret = -EINVAL; 2366 2367 if (!clk || !nb) 2368 return -EINVAL; 2369 2370 clk_prepare_lock(); 2371 2372 list_for_each_entry(cn, &clk_notifier_list, node) 2373 if (cn->clk == clk) 2374 break; 2375 2376 if (cn->clk == clk) { 2377 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 2378 2379 clk->notifier_count--; 2380 2381 /* XXX the notifier code should handle this better */ 2382 if (!cn->notifier_head.head) { 2383 srcu_cleanup_notifier_head(&cn->notifier_head); 2384 list_del(&cn->node); 2385 kfree(cn); 2386 } 2387 2388 } else { 2389 ret = -ENOENT; 2390 } 2391 2392 clk_prepare_unlock(); 2393 2394 return ret; 2395 } 2396 EXPORT_SYMBOL_GPL(clk_notifier_unregister); 2397 2398 #ifdef CONFIG_OF 2399 /** 2400 * struct of_clk_provider - Clock provider registration structure 2401 * @link: Entry in global list of clock providers 2402 * @node: Pointer to device tree node of clock provider 2403 * @get: Get clock callback. Returns NULL or a struct clk for the 2404 * given clock specifier 2405 * @data: context pointer to be passed into @get callback 2406 */ 2407 struct of_clk_provider { 2408 struct list_head link; 2409 2410 struct device_node *node; 2411 struct clk *(*get)(struct of_phandle_args *clkspec, void *data); 2412 void *data; 2413 }; 2414 2415 static const struct of_device_id __clk_of_table_sentinel 2416 __used __section(__clk_of_table_end); 2417 2418 static LIST_HEAD(of_clk_providers); 2419 static DEFINE_MUTEX(of_clk_mutex); 2420 2421 /* of_clk_provider list locking helpers */ 2422 void of_clk_lock(void) 2423 { 2424 mutex_lock(&of_clk_mutex); 2425 } 2426 2427 void of_clk_unlock(void) 2428 { 2429 mutex_unlock(&of_clk_mutex); 2430 } 2431 2432 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 2433 void *data) 2434 { 2435 return data; 2436 } 2437 EXPORT_SYMBOL_GPL(of_clk_src_simple_get); 2438 2439 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) 2440 { 2441 struct clk_onecell_data *clk_data = data; 2442 unsigned int idx = clkspec->args[0]; 2443 2444 if (idx >= clk_data->clk_num) { 2445 pr_err("%s: invalid clock index %d\n", __func__, idx); 2446 return ERR_PTR(-EINVAL); 2447 } 2448 2449 return clk_data->clks[idx]; 2450 } 2451 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); 2452 2453 /** 2454 * of_clk_add_provider() - Register a clock provider for a node 2455 * @np: Device node pointer associated with clock provider 2456 * @clk_src_get: callback for decoding clock 2457 * @data: context pointer for @clk_src_get callback. 2458 */ 2459 int of_clk_add_provider(struct device_node *np, 2460 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, 2461 void *data), 2462 void *data) 2463 { 2464 struct of_clk_provider *cp; 2465 int ret; 2466 2467 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL); 2468 if (!cp) 2469 return -ENOMEM; 2470 2471 cp->node = of_node_get(np); 2472 cp->data = data; 2473 cp->get = clk_src_get; 2474 2475 mutex_lock(&of_clk_mutex); 2476 list_add(&cp->link, &of_clk_providers); 2477 mutex_unlock(&of_clk_mutex); 2478 pr_debug("Added clock from %s\n", np->full_name); 2479 2480 ret = of_clk_set_defaults(np, true); 2481 if (ret < 0) 2482 of_clk_del_provider(np); 2483 2484 return ret; 2485 } 2486 EXPORT_SYMBOL_GPL(of_clk_add_provider); 2487 2488 /** 2489 * of_clk_del_provider() - Remove a previously registered clock provider 2490 * @np: Device node pointer associated with clock provider 2491 */ 2492 void of_clk_del_provider(struct device_node *np) 2493 { 2494 struct of_clk_provider *cp; 2495 2496 mutex_lock(&of_clk_mutex); 2497 list_for_each_entry(cp, &of_clk_providers, link) { 2498 if (cp->node == np) { 2499 list_del(&cp->link); 2500 of_node_put(cp->node); 2501 kfree(cp); 2502 break; 2503 } 2504 } 2505 mutex_unlock(&of_clk_mutex); 2506 } 2507 EXPORT_SYMBOL_GPL(of_clk_del_provider); 2508 2509 struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec) 2510 { 2511 struct of_clk_provider *provider; 2512 struct clk *clk = ERR_PTR(-EPROBE_DEFER); 2513 2514 /* Check if we have such a provider in our array */ 2515 list_for_each_entry(provider, &of_clk_providers, link) { 2516 if (provider->node == clkspec->np) 2517 clk = provider->get(clkspec, provider->data); 2518 if (!IS_ERR(clk)) 2519 break; 2520 } 2521 2522 return clk; 2523 } 2524 2525 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 2526 { 2527 struct clk *clk; 2528 2529 mutex_lock(&of_clk_mutex); 2530 clk = __of_clk_get_from_provider(clkspec); 2531 mutex_unlock(&of_clk_mutex); 2532 2533 return clk; 2534 } 2535 2536 int of_clk_get_parent_count(struct device_node *np) 2537 { 2538 return of_count_phandle_with_args(np, "clocks", "#clock-cells"); 2539 } 2540 EXPORT_SYMBOL_GPL(of_clk_get_parent_count); 2541 2542 const char *of_clk_get_parent_name(struct device_node *np, int index) 2543 { 2544 struct of_phandle_args clkspec; 2545 struct property *prop; 2546 const char *clk_name; 2547 const __be32 *vp; 2548 u32 pv; 2549 int rc; 2550 int count; 2551 2552 if (index < 0) 2553 return NULL; 2554 2555 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, 2556 &clkspec); 2557 if (rc) 2558 return NULL; 2559 2560 index = clkspec.args_count ? clkspec.args[0] : 0; 2561 count = 0; 2562 2563 /* if there is an indices property, use it to transfer the index 2564 * specified into an array offset for the clock-output-names property. 2565 */ 2566 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { 2567 if (index == pv) { 2568 index = count; 2569 break; 2570 } 2571 count++; 2572 } 2573 2574 if (of_property_read_string_index(clkspec.np, "clock-output-names", 2575 index, 2576 &clk_name) < 0) 2577 clk_name = clkspec.np->name; 2578 2579 of_node_put(clkspec.np); 2580 return clk_name; 2581 } 2582 EXPORT_SYMBOL_GPL(of_clk_get_parent_name); 2583 2584 struct clock_provider { 2585 of_clk_init_cb_t clk_init_cb; 2586 struct device_node *np; 2587 struct list_head node; 2588 }; 2589 2590 static LIST_HEAD(clk_provider_list); 2591 2592 /* 2593 * This function looks for a parent clock. If there is one, then it 2594 * checks that the provider for this parent clock was initialized, in 2595 * this case the parent clock will be ready. 2596 */ 2597 static int parent_ready(struct device_node *np) 2598 { 2599 int i = 0; 2600 2601 while (true) { 2602 struct clk *clk = of_clk_get(np, i); 2603 2604 /* this parent is ready we can check the next one */ 2605 if (!IS_ERR(clk)) { 2606 clk_put(clk); 2607 i++; 2608 continue; 2609 } 2610 2611 /* at least one parent is not ready, we exit now */ 2612 if (PTR_ERR(clk) == -EPROBE_DEFER) 2613 return 0; 2614 2615 /* 2616 * Here we make assumption that the device tree is 2617 * written correctly. So an error means that there is 2618 * no more parent. As we didn't exit yet, then the 2619 * previous parent are ready. If there is no clock 2620 * parent, no need to wait for them, then we can 2621 * consider their absence as being ready 2622 */ 2623 return 1; 2624 } 2625 } 2626 2627 /** 2628 * of_clk_init() - Scan and init clock providers from the DT 2629 * @matches: array of compatible values and init functions for providers. 2630 * 2631 * This function scans the device tree for matching clock providers 2632 * and calls their initialization functions. It also does it by trying 2633 * to follow the dependencies. 2634 */ 2635 void __init of_clk_init(const struct of_device_id *matches) 2636 { 2637 const struct of_device_id *match; 2638 struct device_node *np; 2639 struct clock_provider *clk_provider, *next; 2640 bool is_init_done; 2641 bool force = false; 2642 2643 if (!matches) 2644 matches = &__clk_of_table; 2645 2646 /* First prepare the list of the clocks providers */ 2647 for_each_matching_node_and_match(np, matches, &match) { 2648 struct clock_provider *parent = 2649 kzalloc(sizeof(struct clock_provider), GFP_KERNEL); 2650 2651 parent->clk_init_cb = match->data; 2652 parent->np = np; 2653 list_add_tail(&parent->node, &clk_provider_list); 2654 } 2655 2656 while (!list_empty(&clk_provider_list)) { 2657 is_init_done = false; 2658 list_for_each_entry_safe(clk_provider, next, 2659 &clk_provider_list, node) { 2660 if (force || parent_ready(clk_provider->np)) { 2661 2662 clk_provider->clk_init_cb(clk_provider->np); 2663 of_clk_set_defaults(clk_provider->np, true); 2664 2665 list_del(&clk_provider->node); 2666 kfree(clk_provider); 2667 is_init_done = true; 2668 } 2669 } 2670 2671 /* 2672 * We didn't manage to initialize any of the 2673 * remaining providers during the last loop, so now we 2674 * initialize all the remaining ones unconditionally 2675 * in case the clock parent was not mandatory 2676 */ 2677 if (!is_init_done) 2678 force = true; 2679 } 2680 } 2681 #endif 2682